本文整理汇总了C++中LayerParameter::add_top方法的典型用法代码示例。如果您正苦于以下问题:C++ LayerParameter::add_top方法的具体用法?C++ LayerParameter::add_top怎么用?C++ LayerParameter::add_top使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LayerParameter
的用法示例。
在下文中一共展示了LayerParameter::add_top方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: layer
TYPED_TEST(HDF5DataLayerTest, TestSkip) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
param.add_top("data");
param.add_top("label");
HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param();
int batch_size = 5;
hdf5_data_param->set_batch_size(batch_size);
hdf5_data_param->set_source(*(this->filename));
Caffe::set_solver_count(8);
for (int dev = 0; dev < Caffe::solver_count(); ++dev) {
Caffe::set_solver_rank(dev);
HDF5DataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
int label = dev;
for (int iter = 0; iter < 1; ++iter) {
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < batch_size; ++i) {
EXPECT_EQ(1 + label, this->blob_top_label_->cpu_data()[i]);
label = (label + Caffe::solver_count()) % (batch_size * 2);
}
}
}
Caffe::set_solver_count(1);
Caffe::set_solver_rank(0);
}
示例2: CreateLayer
TYPED_TEST(HDF5DataLayerTest, TestSkip) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
param.set_type("HDF5Data");
param.add_top("data");
param.add_top("label");
if (std::is_same<Dtype, half_fp>::value) {
param.set_bottom_data_type(CAFFE_FLOAT);
param.set_compute_data_type(CAFFE_FLOAT);
param.set_top_data_type(proto_data_type<Dtype>());
}
HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param();
int batch_size = 5;
hdf5_data_param->set_batch_size(batch_size);
hdf5_data_param->set_source(*(this->filename));
int_tp num_cols = 8;
int_tp height = 6;
int_tp width = 5;
Caffe::set_solver_count(8);
for (int dev = 0; dev < Caffe::solver_count(); ++dev) {
Caffe::set_solver_rank(dev);
shared_ptr<LayerBase> layer = CreateLayer(param);
layer->SetUp(this->blob_bottom_base_vec_, this->blob_top_base_vec_);
EXPECT_EQ(this->blob_top_data_->num(), batch_size);
EXPECT_EQ(this->blob_top_data_->channels(), num_cols);
EXPECT_EQ(this->blob_top_data_->height(), height);
EXPECT_EQ(this->blob_top_data_->width(), width);
EXPECT_EQ(this->blob_top_label_->num_axes(), 2);
EXPECT_EQ(this->blob_top_label_->shape(0), batch_size);
EXPECT_EQ(this->blob_top_label_->shape(1), 1);
int label = dev;
for (int iter = 0; iter < 1; ++iter) {
layer->Forward(this->blob_bottom_base_vec_, this->blob_top_base_vec_,
nullptr);
for (int i = 0; i < batch_size; ++i) {
EXPECT_EQ(1 + label, this->blob_top_label_->cpu_data()[i]);
label = (label + Caffe::solver_count()) % (batch_size * 2);
}
}
}
Caffe::set_solver_count(1);
Caffe::set_solver_rank(0);
}
示例3: LOG
void RecurrentLayer<Dtype, MItype, MOtype>::LayerSetUp(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
CHECK_GE(bottom[0]->num_axes(), 2)
<< "bottom[0] must have at least 2 axes -- (#timesteps, #streams, ...)";
T_ = bottom[0]->shape(0);
N_ = bottom[0]->shape(1);
LOG(INFO) << "Initializing recurrent layer: assuming input batch contains "
<< T_ << " timesteps of " << N_ << " independent streams.";
CHECK_EQ(bottom[1]->num_axes(), 2)
<< "bottom[1] must have exactly 2 axes -- (#timesteps, #streams)";
CHECK_EQ(T_, bottom[1]->shape(0));
CHECK_EQ(N_, bottom[1]->shape(1));
// If expose_hidden is set, we take as input and produce as output
// the hidden state blobs at the first and last timesteps.
expose_hidden_ = this->layer_param_.recurrent_param().expose_hidden();
// Get (recurrent) input/output names.
vector<string> output_names;
OutputBlobNames(&output_names);
vector<string> recur_input_names;
RecurrentInputBlobNames(&recur_input_names);
vector<string> recur_output_names;
RecurrentOutputBlobNames(&recur_output_names);
const int num_recur_blobs = recur_input_names.size();
CHECK_EQ(num_recur_blobs, recur_output_names.size());
// If provided, bottom[2] is a static input to the recurrent net.
const int num_hidden_exposed = expose_hidden_ * num_recur_blobs;
static_input_ = (bottom.size() > 2 + num_hidden_exposed);
if (static_input_) {
CHECK_GE(bottom[2]->num_axes(), 1);
CHECK_EQ(N_, bottom[2]->shape(0));
}
// Create a NetParameter; setup the inputs that aren't unique to particular
// recurrent architectures.
NetParameter net_param;
LayerParameter* input_layer_param = net_param.add_layer();
input_layer_param->set_type("Input");
InputParameter* input_param = input_layer_param->mutable_input_param();
input_layer_param->add_top("X");
BlobShape input_shape;
for (int i = 0; i < bottom[0]->num_axes(); ++i) {
input_shape.add_dim(bottom[0]->shape(i));
}
input_param->add_shape()->CopyFrom(input_shape);
input_shape.Clear();
for (int i = 0; i < bottom[1]->num_axes(); ++i) {
input_shape.add_dim(bottom[1]->shape(i));
}
input_layer_param->add_top("cont");
input_param->add_shape()->CopyFrom(input_shape);
if (static_input_) {
input_shape.Clear();
for (int i = 0; i < bottom[2]->num_axes(); ++i) {
input_shape.add_dim(bottom[2]->shape(i));
}
input_layer_param->add_top("x_static");
input_param->add_shape()->CopyFrom(input_shape);
}
// Call the child's FillUnrolledNet implementation to specify the unrolled
// recurrent architecture.
this->FillUnrolledNet(&net_param);
// Prepend this layer's name to the names of each layer in the unrolled net.
const string& layer_name = this->layer_param_.name();
if (layer_name.size()) {
for (int i = 0; i < net_param.layer_size(); ++i) {
LayerParameter* layer = net_param.mutable_layer(i);
layer->set_name(layer_name + "_" + layer->name());
}
}
// Add "pseudo-losses" to all outputs to force backpropagation.
// (Setting force_backward is too aggressive as we may not need to backprop to
// all inputs, e.g., the sequence continuation indicators.)
vector<string> pseudo_losses(output_names.size());
for (int i = 0; i < output_names.size(); ++i) {
LayerParameter* layer = net_param.add_layer();
pseudo_losses[i] = output_names[i] + "_pseudoloss";
layer->set_name(pseudo_losses[i]);
layer->set_type("Reduction");
layer->add_bottom(output_names[i]);
layer->add_top(pseudo_losses[i]);
layer->add_loss_weight(1);
}
// Create the unrolled net.
unrolled_net_.reset(new Net<Dtype>(net_param, this->device_));
unrolled_net_->set_debug_info(
this->layer_param_.recurrent_param().debug_info());
// Setup pointers to the inputs.
//.........这里部分代码省略.........
示例4: layer
TYPED_TEST(HDF5DataLayerTest, TestRead) {
typedef typename TypeParam::Dtype Dtype;
// Create LayerParameter with the known parameters.
// The data file we are reading has 10 rows and 8 columns,
// with values from 0 to 10*8 reshaped in row-major order.
LayerParameter param;
param.add_top("data");
param.add_top("label");
param.add_top("label2");
HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param();
int batch_size = 5;
hdf5_data_param->set_batch_size(batch_size);
hdf5_data_param->set_source(*(this->filename));
int num_cols = 8;
int height = 6;
int width = 5;
// Test that the layer setup got the correct parameters.
HDF5DataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), batch_size);
EXPECT_EQ(this->blob_top_data_->channels(), num_cols);
EXPECT_EQ(this->blob_top_data_->height(), height);
EXPECT_EQ(this->blob_top_data_->width(), width);
EXPECT_EQ(this->blob_top_label_->num_axes(), 2);
EXPECT_EQ(this->blob_top_label_->shape(0), batch_size);
EXPECT_EQ(this->blob_top_label_->shape(1), 1);
EXPECT_EQ(this->blob_top_label2_->num_axes(), 2);
EXPECT_EQ(this->blob_top_label2_->shape(0), batch_size);
EXPECT_EQ(this->blob_top_label2_->shape(1), 1);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
// Go through the data 10 times (5 batches).
const int data_size = num_cols * height * width;
for (int iter = 0; iter < 10; ++iter) {
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// On even iterations, we're reading the first half of the data.
// On odd iterations, we're reading the second half of the data.
// NB: label is 1-indexed
int label_offset = 1 + ((iter % 2 == 0) ? 0 : batch_size);
int label2_offset = 1 + label_offset;
int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size;
// Every two iterations we are reading the second file,
// which has the same labels, but data is offset by total data size,
// which is 2400 (see generate_sample_data).
int file_offset = (iter % 4 < 2) ? 0 : 2400;
for (int i = 0; i < batch_size; ++i) {
EXPECT_EQ(
label_offset + i,
this->blob_top_label_->cpu_data()[i]);
EXPECT_EQ(
label2_offset + i,
this->blob_top_label2_->cpu_data()[i]);
}
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_cols; ++j) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w) {
int idx = (
i * num_cols * height * width +
j * height * width +
h * width + w);
EXPECT_EQ(
file_offset + data_offset + idx,
this->blob_top_data_->cpu_data()[idx])
<< "debug: i " << i << " j " << j
<< " iter " << iter;
}
}
}
}
}
}