本文整理汇总了C++中LayerParameter类的典型用法代码示例。如果您正苦于以下问题:C++ LayerParameter类的具体用法?C++ LayerParameter怎么用?C++ LayerParameter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LayerParameter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TYPED_TEST
TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
Caffe::set_mode(Caffe::CPU);
LayerParameter param;
DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param();
dummy_data_param->add_num(5);
dummy_data_param->add_channels(3);
dummy_data_param->add_height(2);
dummy_data_param->add_width(4);
this->blob_top_vec_.resize(1);
DummyDataLayer<TypeParam> layer(param);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
EXPECT_EQ(this->blob_top_a_->num(), 5);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_a_->height(), 2);
EXPECT_EQ(this->blob_top_a_->width(), 4);
EXPECT_EQ(this->blob_top_b_->count(), 0);
EXPECT_EQ(this->blob_top_c_->count(), 0);
for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
}
}
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
}
}
}
示例2: GetPoolingLayer
shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
PoolingParameter_Engine engine = param.pooling_param().engine();
if (engine == PoolingParameter_Engine_DEFAULT) {
engine = PoolingParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = PoolingParameter_Engine_CUDNN;
#endif
}
if (engine == PoolingParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == PoolingParameter_Engine_CUDNN) {
PoolingParameter p_param = param.pooling_param();
if (p_param.pad() || p_param.pad_h() || p_param.pad_w() ||
param.top_size() > 1) {
LOG(INFO) << "CUDNN does not support padding or multiple tops. "
<< "Using Caffe's own pooling layer.";
return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
}
return shared_ptr<Layer<Dtype> >(new CuDNNPoolingLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
示例3: TYPED_TEST
TYPED_TEST(HDF5DataLayerTest, TestSkip) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
param.add_top("data");
param.add_top("label");
HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param();
int batch_size = 5;
hdf5_data_param->set_batch_size(batch_size);
hdf5_data_param->set_source(*(this->filename));
Caffe::set_solver_count(8);
for (int dev = 0; dev < Caffe::solver_count(); ++dev) {
Caffe::set_solver_rank(dev);
HDF5DataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
int label = dev;
for (int iter = 0; iter < 1; ++iter) {
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < batch_size; ++i) {
EXPECT_EQ(1 + label, this->blob_top_label_->cpu_data()[i]);
label = (label + Caffe::solver_count()) % (batch_size * 2);
}
}
}
Caffe::set_solver_count(1);
Caffe::set_solver_rank(0);
}
示例4: GetTanHLayer
shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
TanHParameter_Engine engine = param.tanh_param().engine();
// New, more flexible way of providing engine
if (engine == TanHParameter_Engine_DEFAULT && param.engine() != "") {
EngineParser ep(param.engine());
if (ep.isEngine("CAFFE"))
engine = TanHParameter_Engine_CAFFE;
#ifdef USE_CUDNN
if (ep.isEngine("CUDNN"))
engine = TanHParameter_Engine_CUDNN;
#endif
}
if (engine == TanHParameter_Engine_DEFAULT) {
engine = TanHParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = TanHParameter_Engine_CUDNN;
#endif
}
if (engine == TanHParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new TanHLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == TanHParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNTanHLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
return shared_ptr<Layer<Dtype> >();
}
示例5: GetLRNLayer
shared_ptr<Layer<Dtype> > GetLRNLayer(const LayerParameter& param) {
LRNParameter_Engine engine = param.lrn_param().engine();
if (engine == LRNParameter_Engine_DEFAULT) {
engine = LRNParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = LRNParameter_Engine_CUDNN;
#endif
}
if (engine == LRNParameter_Engine_CAFFE
|| Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) {
return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == LRNParameter_Engine_CUDNN) {
LRNParameter lrn_param = param.lrn_param();
if (lrn_param.norm_region() ==LRNParameter_NormRegion_WITHIN_CHANNEL) {
return shared_ptr<Layer<Dtype> >(new CuDNNLCNLayer<Dtype>(param));
} else {
// local size is too big to be handled through cuDNN
if (param.lrn_param().local_size() > CUDNN_LRN_MAX_N) {
return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
} else {
return shared_ptr<Layer<Dtype> >(new CuDNNLRNLayer<Dtype>(param));
}
}
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
示例6: switch
bool BaseProducer::is_valid(const LayerParameter& param) {
if(!param.has_type()) return false;
switch (param.type()) {
case LayerParameter_LayerType_DATA: return true;
default: return false;
}
return false;
}
示例7: CreateLayer
// Get a layer using a LayerParameter.
static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& param) {
LOG(INFO) << "Creating layer " << param.name();
const string& type = param.type();
CreatorRegistry& registry = Registry();
CHECK_EQ(registry.count(type), 1) << "Unknown layer type: " << type
<< " (known types: " << LayerTypeList() << ")";
return registry[type](param);
}
示例8:
AnnotatedDataLayer<Dtype>::AnnotatedDataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param),
//reader_(param) {
offset_() {
db_.reset(db::GetDB(param.data_param().backend()));
db_->Open(param.data_param().source(), db::READ);
cursor_.reset(db_->NewCursor());
}
示例9: TransformingFastHDF5InputLayer
template<typename Dtype> TransformingFastHDF5InputLayer<Dtype>::
TransformingFastHDF5InputLayer(const LayerParameter& param)
: Layer<Dtype>(param) {
// Set the BS to 1 before we create the layer
LayerParameter p = param;
p.mutable_fast_hdf5_input_param()->set_batch_size(1);
input_layer_.reset(new FastHDF5InputLayer<Dtype>(p));
transformation_layer_.reset(new TransformationLayer<Dtype>(p));
}
示例10: return
size_t BasePrefetchingDataLayer<Ftype, Btype>::parser_threads(const LayerParameter& param) {
// Check user's override in prototxt file
size_t parser_threads = param.data_param().parser_threads();
if (!auto_mode(param) && parser_threads == 0U) {
parser_threads = 1U; // input error fix
}
// 1 thread for test net
return (auto_mode(param) || param.phase() == TEST || parser_threads == 0U) ? 1U : parser_threads;
}
示例11: TestReadCropTrainSequenceUnseeded
void TestReadCropTrainSequenceUnseeded() {
LayerParameter param;
param.set_phase(TRAIN);
DataParameter* data_param = param.mutable_data_param();
data_param->set_batch_size(5);
data_param->set_source(filename_->c_str());
data_param->set_backend(backend_);
TransformationParameter* transform_param =
param.mutable_transform_param();
transform_param->set_crop_size(1);
transform_param->set_mirror(true);
// Get crop sequence with Caffe seed 1701, srand seed 1701.
Caffe::set_random_seed(seed_);
srand(seed_);
vector<vector<Dtype> > crop_sequence;
{
DataLayer<Dtype> layer1(param);
layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer1.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
vector<Dtype> iter_crop_sequence;
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
iter_crop_sequence.push_back(
blob_top_data_->cpu_data()[i * 2 + j]);
}
}
crop_sequence.push_back(iter_crop_sequence);
}
} // destroy 1st data layer and unlock the db
// Get crop sequence continuing from previous Caffe RNG state; reseed
// srand with 1701. Check that the sequence differs from the original.
srand(seed_);
DataLayer<Dtype> layer2(param);
layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer2.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
int num_sequence_matches = 0;
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
num_sequence_matches += (crop_sequence[iter][i * 2 + j] ==
blob_top_data_->cpu_data()[i * 2 + j]);
}
}
EXPECT_LT(num_sequence_matches, 10);
}
}
示例12: GetBNLayer
shared_ptr<Layer<Dtype> > GetBNLayer(const LayerParameter& param) {
BNParameter_Type BN_type = param.bn_param().bn_type();
if (BN_type == BNParameter_Type_CHANNEL_WISE) {
return shared_ptr<Layer<Dtype> >(new ChannlWiseBNLayer<Dtype>(param));
} else if (BN_type == BNParameter_Type_ELEMENT_WISE) {
return shared_ptr<Layer<Dtype> >(new EltWiseBNLayer<Dtype>(param));
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown type.";
}
}
示例13: TestRead2
void TestRead2() {
std::cerr << "\ntestRead2\n";
LayerParameter param;
DataParameter* data_param =
param.mutable_data_param();
// half the previous batch size to alternate between 2 different dataset
data_param->set_batch_size(3);
data_param->set_backend(backend_);
data_param->set_source(filename_->c_str());
SparseDataLayer<Dtype> layer(param);
layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 3);
EXPECT_EQ(blob_top_data_->channels(), 6);
EXPECT_EQ(blob_top_data_->height(), 1);
EXPECT_EQ(blob_top_data_->width(), 1);
EXPECT_EQ(blob_top_label_->num(), 3);
EXPECT_EQ(blob_top_label_->channels(), 1);
EXPECT_EQ(blob_top_label_->height(), 1);
EXPECT_EQ(blob_top_label_->width(), 1);
int delta = 0;
for (int iter = 0; iter < 100; ++iter) {
layer.Forward(blob_bottom_vec_, blob_top_vec_);
if (iter % 2) {
delta = 3;
} else {
delta = 0;
}
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(i + delta, blob_top_label_->cpu_data()[i]);
}
EXPECT_EQ(0, blob_top_data_->cpu_ptr()[0]);
if (delta == 0) {
EXPECT_EQ(1, blob_top_data_->cpu_ptr()[1]);
EXPECT_EQ(3, blob_top_data_->cpu_ptr()[2]);
EXPECT_EQ(6, blob_top_data_->cpu_ptr()[3]);
} else {
EXPECT_EQ(4, blob_top_data_->cpu_ptr()[1]);
EXPECT_EQ(9, blob_top_data_->cpu_ptr()[2]);
EXPECT_EQ(15, blob_top_data_->cpu_ptr()[3]);
}
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < i + delta; ++j) {
EXPECT_EQ(j+1,
blob_top_data_->cpu_data()[blob_top_data_->cpu_ptr()[i]+j])
<< "debug data: iter " << iter << " i " << i << " j " << j;
EXPECT_EQ(j,
blob_top_data_->cpu_indices()[blob_top_data_->cpu_ptr()[i]+j])
<< "debug indices: iter " << iter << " i " << i << " j " << j;
}
}
}
}
示例14: TestReadCropTrainSequenceSeeded
void TestReadCropTrainSequenceSeeded() {
LayerParameter param;
param.set_phase(TRAIN);
DataParameter* data_param = param.mutable_data_param();
data_param->set_batch_size(5);
data_param->set_source(filename_->c_str());
data_param->set_backend(backend_);
TransformationParameter* transform_param =
param.mutable_transform_param();
transform_param->set_crop_size(1);
transform_param->set_mirror(true);
// Get crop sequence with Caffe seed 1701.
Caffe::set_random_seed(seed_);
vector<vector<Dtype> > crop_sequence;
{
DataLayer<Dtype> layer1(param);
layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer1.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
vector<Dtype> iter_crop_sequence;
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
iter_crop_sequence.push_back(
blob_top_data_->cpu_data()[i * 2 + j]);
}
}
crop_sequence.push_back(iter_crop_sequence);
}
} // destroy 1st data layer and unlock the db
// Get crop sequence after reseeding Caffe with 1701.
// Check that the sequence is the same as the original.
Caffe::set_random_seed(seed_);
DataLayer<Dtype> layer2(param);
layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer2.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
EXPECT_EQ(crop_sequence[iter][i * 2 + j],
blob_top_data_->cpu_data()[i * 2 + j])
<< "debug: iter " << iter << " i " << i << " j " << j;
}
}
}
}
示例15: GetInvPoolingLayer
shared_ptr<Layer<Dtype> > GetInvPoolingLayer(const LayerParameter& param) {
PoolingParameter_Engine engine = param.pooling_param().engine();
if (engine == PoolingParameter_Engine_DEFAULT) {
engine = PoolingParameter_Engine_CAFFE;
}
if (engine == PoolingParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new InvPoolingLayer<Dtype>(param));
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}