本文整理汇总了C++中Layer::Backward方法的典型用法代码示例。如果您正苦于以下问题:C++ Layer::Backward方法的具体用法?C++ Layer::Backward怎么用?C++ Layer::Backward使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Layer
的用法示例。
在下文中一共展示了Layer::Backward方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: caffe_layer_Backward
EXPORT void caffe_layer_Backward(void *layerAnon, void *topAnon, void *propagate_downAnon, void *bottomAnon)
{
Layer<float> *layer = (Layer<float> *)layerAnon;
vector<Blob<float>*>& top = (vector<Blob<float>*>&)topAnon;
vector<bool>& propagate_down = (vector<bool>&)propagate_downAnon;
vector<Blob<float>*>& bottom = (vector<Blob<float>*>&)bottomAnon;
layer->Backward(top, propagate_down, bottom);
}
示例2: GetObjAndGradient
void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
int check_bottom, int top_id, int top_data_id) {
// First, figure out what blobs we need to check against.
vector<Blob<Dtype>*> blobs_to_check;
for (int i = 0; i < layer.blobs().size(); ++i) {
blobs_to_check.push_back(layer.blobs()[i].get());
}
if (check_bottom < 0) {
for (int i = 0; i < bottom.size(); ++i) {
blobs_to_check.push_back(bottom[i]);
}
} else {
CHECK(check_bottom < bottom.size());
blobs_to_check.push_back(bottom[check_bottom]);
}
// go through the bottom and parameter blobs
// LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
Blob<Dtype>* current_blob = blobs_to_check[blobid];
// LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
// << " parameters.";
// go through the values
for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
// First, obtain the original data
Caffe::set_random_seed(seed_);
layer.Forward(bottom, &top);
Dtype computed_objective = GetObjAndGradient(top, top_id,
top_data_id);
// Get any additional loss from the layer
computed_objective += layer.Backward(top, true, &bottom);
Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
// compute score by adding stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Caffe::set_random_seed(seed_);
layer.Forward(bottom, &top);
Dtype positive_objective = GetObjAndGradient(top, top_id,
top_data_id);
positive_objective += layer.Backward(top, true, &bottom);
// compute score by subtracting stepsize
current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
Caffe::set_random_seed(seed_);
layer.Forward(bottom, &top);
Dtype negative_objective = GetObjAndGradient(top, top_id,
top_data_id);
negative_objective += layer.Backward(top, true, &bottom);
// Recover stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Dtype estimated_gradient = (positive_objective - negative_objective)
/ stepsize_ / 2.;
Dtype feature = current_blob->cpu_data()[feat_id];
// LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
// << current_blob->cpu_diff()[feat_id];
if (kink_ - kink_range_ > feature
|| feature > kink_ + kink_range_) {
// We check relative accuracy, but for too small values, we threshold
// the scale factor by 1.
Dtype scale = max(
max(fabs(computed_gradient), fabs(estimated_gradient)),
1.);
EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << ","
<< feat_id;
EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << ","
<< feat_id;
}
// LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
// LOG(ERROR) << "computed gradient: " << computed_gradient
// << " estimated_gradient: " << estimated_gradient;
}
}
}