本文整理汇总了C++中NetParameter::layer方法的典型用法代码示例。如果您正苦于以下问题:C++ NetParameter::layer方法的具体用法?C++ NetParameter::layer怎么用?C++ NetParameter::layer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NetParameter
的用法示例。
在下文中一共展示了NetParameter::layer方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: NetNeedsBatchNormUpgrade
bool NetNeedsBatchNormUpgrade(const NetParameter& net_param) {
for (int i = 0; i < net_param.layer_size(); ++i) {
// Check if BatchNorm layers declare three parameters, as required by
// the previous BatchNorm layer definition.
if (net_param.layer(i).type() == "BatchNorm"
&& net_param.layer(i).param_size() == 3) {
return true;
}
}
return false;
}
示例2: state
void Net<Dtype>::filterNet(const NetParameter& param, NetParameter* filtered_param){
NetState state(param.state());
filtered_param->CopyFrom(param);
// remove all layer params and then filter
filtered_param->clear_layer();
for (int i = 0; i < param.layer_size(); i++){
const LayerParameter& layer_param = param.layer(i);
const string& layer_name = layer_param.name();
// usually a layer has not any include/exclude rules
CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0)
<< "Specify either include or exclude rules.";
bool layer_included = (layer_param.include_size() == 0);
// assume 'included' and check if meet any excluded rules
for (int j = 0; layer_included&&j < layer_param.exclude_size(); j++){
if (stateMeetRule(state, layer_param.exclude(j), layer_name)){
// cancel 'included'
layer_included = false;
}
}
// assume 'excluded' and check if meet any included rules
for (int j = 0; !layer_included&&j < layer_param.include_size(); j++){
if (stateMeetRule(state, layer_param.include(j), layer_name)){
// cancel 'excluded'
layer_included = true;
}
}
// copy the included layer to filtered_param
if (layer_included) filtered_param->add_layer()->CopyFrom(layer_param);
}
}
示例3: Init
void NGNet::Init( ) {
input_layer_top_idx_ = 0;
output_layer_top_idx_ = 0;
/* Load the network. */
net_.reset(new Net<float>(model_file_, TEST));
NetParameter param;
CHECK(ReadProtoFromTextFile(model_file_, ¶m))
<< "Failed to parse NetParameter file: " << model_file_;
for (int ip = 0; ip < param.layer_size(); ip++) {
LayerParameter layer_param = param.layer(ip);
if (layer_param.has_inner_product_param()) {
InnerProductParameter* inner_product_param = layer_param.mutable_inner_product_param();
int num_output = inner_product_param->num_output();
if (num_output > 0) {
inner_product_param->set_num_output(num_output * 2);
}
}
}
// //param.mutable_state()->set_phase(phase);
Net<float> * new_net = new Net<float>(param);
net_->CopyTrainedLayersFrom(trained_file_);
int input_layer_idx = -1;
for (size_t layer_id = 0; layer_id < net_->layer_names().size(); ++layer_id) {
if (net_->layer_names()[layer_id] == input_layer_name_) {
input_layer_idx = layer_id;
break;
}
}
if (input_layer_idx == -1) {
LOG(FATAL) << "Unknown layer name " << input_layer_name_;
}
input_layer_idx_ = input_layer_idx;
input_layer_top_idx_ = 0;
Blob<float>* input_layer = net_->top_vecs()[input_layer_idx_][input_layer_top_idx_];
input_layer_dim_ = input_layer->shape(1);
int output_layer_idx = -1;
for (size_t layer_id = 0; layer_id < net_->layer_names().size(); ++layer_id) {
if (net_->layer_names()[layer_id] == output_layer_name_) {
output_layer_idx = layer_id;
break;
}
}
if (output_layer_idx == -1) {
LOG(FATAL) << "Unknown layer name " << output_layer_name_;
}
output_layer_idx_ = output_layer_idx;
}
示例4:
int Net < Dtype >::appendBottom(const NetParameter& param, const int layer_id, const int bottom_id,
set<string>* available_blobs, map<string, int>* blob_name_to_idx){
const LayerParameter& layer_param = param.layer(layer_id);
const string& blob_name = layer_param.bottom(bottom_id);
if (!available_blobs->count(blob_name))
LOG(FATAL) << "Unknown bottom blob: " << blob_name<< " at layer: " << layer_param.name() << ".";
// a bottom blob must share a top blob
const int blob_id = (*blob_name_to_idx)[blob_name];
LOG_IF(INFO, Dragon::get_root_solver())
<< layer_param.name() << "[Layer-Accept] <- " << blob_name << " [Blob-Name]";
bottom_vecs[layer_id].push_back(blobs[blob_id].get());
bottom_id_vecs[layer_id].push_back(blob_id);
// ensure that a top blob must specify only one bottom blob
// SplitLayer can be used to shadow a top blob into several top blobs
available_blobs->erase(blob_name);
bool need_bp = true;
// default(TEST) is false
bottoms_need_backward[layer_id].push_back(need_bp & blobs_need_backward[blob_id]);
return blob_id;
}
示例5: LOG
void ApolloNet<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {
int num_source_layers = param.layer_size();
for (int i = 0; i < num_source_layers; ++i) {
const LayerParameter& source_layer = param.layer(i);
const string& source_layer_name = source_layer.name();
if (layers_map_.find(source_layer_name) == layers_map_.end()) {
LOG(INFO) << "Ignoring source layer " << source_layer_name;
continue;
}
LOG(INFO) << "Copying source layer " << source_layer_name;
vector<shared_ptr<Blob<Dtype> > >& target_blobs =
layers_map_[source_layer_name]->blobs();
ASSERT(target_blobs.size() == source_layer.blobs_size(),
"Incompatible number of blobs for layer " << source_layer_name);
for (int j = 0; j < target_blobs.size(); ++j) {
const bool kReshape = false;
target_blobs[j]->FromProto(source_layer.blobs(j), kReshape);
}
}
}
示例6: InsertSplits
void InsertSplits(const NetParameter& param, NetParameter* param_split) {
// Initialize by copying from the input NetParameter.
param_split->CopyFrom(param);
param_split->clear_layer();
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
for (int i = 0; i < param.layer_size(); ++i) {
const LayerParameter& layer_param = param.layer(i);
layer_idx_to_layer_name[i] = layer_param.name();
for (int j = 0; j < layer_param.bottom_size(); ++j) {
const string& blob_name = layer_param.bottom(j);
if (blob_name_to_last_top_idx.find(blob_name) ==
blob_name_to_last_top_idx.end()) {
LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '"
<< layer_param.name() << "', bottom index " << j << ")";
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
++top_idx_to_bottom_count[top_idx];
}
for (int j = 0; j < layer_param.top_size(); ++j) {
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
// A use of a top blob as a loss should be handled similarly to the use of
// a top blob as a bottom blob to another layer.
const int last_loss =
std::min(layer_param.loss_weight_size(), layer_param.top_size());
for (int j = 0; j < last_loss; ++j) {
const string& blob_name = layer_param.top(j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
if (top_idx_to_loss_weight[top_idx]) {
++top_idx_to_bottom_count[top_idx];
}
}
}
for (int i = 0; i < param.layer_size(); ++i) {
LayerParameter* layer_param = param_split->add_layer();
layer_param->CopyFrom(param.layer(i));
// Replace any shared bottom blobs with split layer outputs.
for (int j = 0; j < layer_param->bottom_size(); ++j) {
const pair<int, int>& top_idx =
bottom_idx_to_source_top_idx[make_pair(i, j)];
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
layer_param->set_bottom(j, SplitBlobName(layer_name,
blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++));
}
}
// Create split layer for any top blobs used by other layer as bottom
// blobs more than once.
for (int j = 0; j < layer_param->top_size(); ++j) {
const pair<int, int>& top_idx = make_pair(i, j);
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[i];
const string& blob_name = layer_param->top(j);
LayerParameter* split_layer_param = param_split->add_layer();
const float loss_weight = top_idx_to_loss_weight[top_idx];
ConfigureSplitLayer(layer_name, blob_name, j, split_count,
loss_weight, split_layer_param);
if (loss_weight) {
layer_param->clear_loss_weight();
top_idx_to_bottom_split_idx[top_idx]++;
}
}
}
}
}
示例7: if
void Net < Dtype >::appendTop(const NetParameter& param, const int layer_id, const int top_id,
set<string>* available_blobs, map<string, int>* blob_name_to_idx){
boost::shared_ptr<LayerParameter> layer_param(
layer_id >= 0 ? new LayerParameter(param.layer(layer_id)) : NULL);
// use (layer_id//top_id) or (-1//top_id) to get a blob name
const string& blob_name = layer_param ?
(top_id<layer_param->top_size() ? layer_param->top(top_id) : "(automatic)") : param.input(top_id);
// in-place case (e.g:
// I0721 10:38 : 16.722070 4692 net.cpp : 84] relu1 <-conv1
// I0721 10:38 : 16.722082 4692 net.cpp : 98] relu1->conv1(in-place)
// check a blob whether at the same postion in both bottom and top
if (blob_name_to_idx && layer_param && top_id < layer_param->bottom_size()
&& blob_name == layer_param->bottom(top_id)){
LOG_IF(INFO, Dragon::get_root_solver())
<< layer_param->name() << "[Layer-Produce]->" << blob_name << " [Blob-Name] (in-place)";
// add into this layer's top blob using blob_name
top_vecs[layer_id].push_back(blobs[(*blob_name_to_idx)[blob_name]].get());
// log the id
top_id_vecs[layer_id].push_back((*blob_name_to_idx)[blob_name]);
}
else if (blob_name_to_idx && (*blob_name_to_idx).count(blob_name) ){
LOG(FATAL) << "Top blob:" << blob_name << " propogate from multiple sources.";
}
// normal top blob stuffing
else{
// debug info
if (Dragon::get_root_solver()){
if (layer_param) LOG(INFO) << layer_param->name() << "[Layer-Produce] ->" << blob_name << " [Blob-Name]";
// special case and only used when viewing a Net's structure
// because they need not specify data source and can not train or test
// virtual data input blobs do not belong to any layers
// see more in insert_splits.cpp/void InsertSplits()
else LOG(INFO) << "Input " << top_id << "[Blob-Code] -> " << blob_name << "[Blob - Name]";
}
// allocate a null blob at first
boost::shared_ptr<Blob<Dtype>> ptr_blob(new Blob<Dtype>());
// store global blob infos
const int blob_id = blobs.size();
blobs.push_back(ptr_blob);
blobs_name.push_back(blob_name);
blobs_need_backward.push_back(false);
// encode index number for a name
// which also represent this top blob is binded from a bottom
// check it before can know whether a top blob has multiple sources(Forbidden)
if (blob_name_to_idx) (*blob_name_to_idx)[blob_name] = blob_id;
// reshape for virtual input blobs solely
// becaude they do not exist into a DataLayer(provide reshape/transfrom service)
if (layer_id == -1){
ptr_blob->reshape(param.input_shape(top_id));
// store solely for virtual input blobs
net_input_blobs.push_back(ptr_blob.get());
net_input_blob_indices.push_back(blob_id);
}
else{
top_vecs[layer_id].push_back(ptr_blob.get());
top_id_vecs[layer_id].push_back(blob_id);
}
}
// a set used for listing all exsiting top blobs
if (available_blobs) available_blobs->insert(blob_name);
}
示例8: CHECK
void Net<Dtype>::appendParam(const NetParameter& param, const int layer_id, const int param_id){
const LayerParameter& layer_param = param.layer(layer_id);
Layer<Dtype>* layer = layers[layer_id].get();
const int param_size = layer_param.param_size();
// default name="" (not set)
string param_name = param_id<param_size? layer_param.param(param_id).name() : "";
// has name
if (param_name.size()) param_display_names.push_back(param_name);
// set param_id as name
else{
ostringstream display_name;
display_name << param_id;
param_display_names.push_back(display_name.str());
}
// each param blob has a net id(both weight and bias)
const int net_param_id = param_blobs.size();
// add param blob which can be used by a net id
param_blobs.push_back(layer->getBlobs()[param_id]);
// store a net id
// param_id_vecs[layer_id][param_id] can get the net_param_id
param_id_vecs[layer_id].push_back(net_param_id);
// store orginal id ( x_th layer/ y_th param )
// param_layer_indices[net_param_id] can get layer_id/param_id
param_layer_indices.push_back(make_pair(layer_id, param_id));
ParamSpec default_hyperparameter;
const ParamSpec* hyperparameter = param_id < param_size ?
&layer_param.param(param_id) : &default_hyperparameter;
// do not have a name or
if (!param_size || !param_name.size() ||
(param_name.size() && !param_names_index.count(param_name))){
param_owners.push_back(-1);
// has a name(non-empty) but has not logged before
if (param_name.size()) param_names_index[param_name] = net_param_id;
const int learnable_param_id = learnable_params.size();
learnable_params.push_back(param_blobs[net_param_id].get());
learnable_param_ids.push_back(learnable_param_id);
has_params_lr.push_back(hyperparameter->has_lr_mult());
has_params_decay.push_back(hyperparameter->has_decay_mult());
params_lr.push_back(hyperparameter->lr_mult());
params_decay.push_back(hyperparameter->decay_mult());
}
else{
// has a name(non-empty) and has logged before
// it means to share this param and we need get the owner id
const int owner_net_param_id = param_names_index[param_name];
param_owners.push_back(owner_net_param_id);
const pair<int, int>& owner_index = param_layer_indices[owner_net_param_id];
const int owner_layer_id = owner_index.first;
const int owner_param_id = owner_index.second;
LOG_IF(INFO, Dragon::get_root_solver())
<< "Share parameter: " << param_name << " ownd by layer: "
<< layer_names[owner_layer_id] << " param index: " << owner_layer_id;
Blob<Dtype>* this_blob = param_blobs[net_param_id].get();
Blob<Dtype>* owner_blob = param_blobs[owner_net_param_id].get();
CHECK(this_blob);CHECK(owner_blob);
// check before sharing
if (layer_param.param(param_id).share_mode() == ParamSpec_DimCheckMode_PERMISSIVE_MODE)
CHECK_EQ(this_blob->count(), owner_blob->count());
else CHECK(this_blob->shape() == owner_blob->shape());
// note that learnable_param_id = owner_net_param_id
const int learnable_param_id = learnable_param_ids[owner_net_param_id];
// store parent id
learnable_param_ids.push_back(learnable_param_id);
// check lr_mult
if (hyperparameter->has_lr_mult()){
if (has_params_lr[learnable_param_id])
CHECK_EQ(hyperparameter->lr_mult(), params_lr[learnable_param_id])
<< "Shared param: " << param_name << " has mismatched lr_mult.";
else{
has_params_lr[learnable_param_id] = true;
params_lr[learnable_param_id] = hyperparameter->lr_mult();
}
}
// check decay_mult
if (hyperparameter->has_decay_mult()){
if (has_params_decay[learnable_param_id])
CHECK_EQ(hyperparameter->decay_mult(), params_decay[learnable_param_id])
<< "Shared param: " << param_name << " has mismatched decay_mult.";
else{
has_params_decay[learnable_param_id] = true;
params_decay[learnable_param_id] = hyperparameter->decay_mult();
}
}
}
}
示例9: insertSplits
void insertSplits(const NetParameter& param, NetParameter* splitted_param){
splitted_param->CopyFrom(param);
splitted_param->clear_layer();
// pair<layer_idx,blob_idx>
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
layer_idx_to_layer_name[-1] = "input";
// scan and stuff all input blobs into a virtual layer named as "input" at -1
// input blobs do not belong to any layers and we stuff them into a virtual layer
// usually use for viewing a Net(e.g: examples\cifar10\cifar10_full.prototxt
// input: "data" *** ¡û_¡û specify it as a temporary data blob ***
// input_shape{ *** ¡û_¡û specify it as shape***
// dim: 1
// dim : 3
// dim : 32
// dim : 32
// }
// pay attention: input blobs should not use in train/test prototxt
// because they are not specified vaild data sources
// you can regard them as viewing toys
for (int i = 0; i < param.input_size(); i++){
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
}
for (int i = 0; i < param.layer_size(); i++){
const LayerParameter& layer_param = param.layer(i);
// bind layer idx to layer name
layer_idx_to_layer_name[i] = layer_param.name();
// a layer has several bottom blobs(e.g DataLayer)
for (int j = 0; j < layer_param.bottom_size(); j++){
const string& blob_name = layer_param.bottom(j);
// ensure that all bottom blobs must have the same name as one top blob
if (!blob_name_to_last_top_idx.count(blob_name)){
LOG(FATAL) << "Unknown bottom blob: " << blob_name
<< " at layer: " << layer_param.name() << ".";
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
// a bottom's name must be as same as one top's name
// find a bottom's parent top (<- backward direction)
// note that top name must declare before bottom name
// or a bottom will bind to layer_{-1}
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
top_idx_to_bottom_count[top_idx]++;
}
// update top name's position for following bottom names
for (int j = 0; j < layer_param.top_size(); j++){
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
const int last_loss = min(layer_param.loss_weight_size(), layer_param.top_size());
// only work in LossLayer
for (int j = 0; j < last_loss; j++){
const string& blob_name = layer_param.top(j);
// updated before
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
// from loss(top) backward to bottom
if (top_idx_to_loss_weight[top_idx]) top_idx_to_bottom_count[top_idx]++;
}
}
// special case: data blob shared by other blobs in the virtual layer
// split it also
for (int i = 0; i < param.input_size(); i++){
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
if (split_count > 1){
// "input"
const string& layer_name = layer_idx_to_layer_name[-1];
const string& blob_name = param.input(i);
// push_back a new param
LayerParameter* split_layer_param = splitted_param->add_layer();
const float kZeroLossWeight = 0;
configureSplitLayer(layer_name, blob_name, i, split_count, kZeroLossWeight, split_layer_param);
}
}
for (int i = 0; i < param.layer_size(); i++){
// push_back a new param
LayerParameter* layer_param = splitted_param->add_layer();
layer_param->CopyFrom(param.layer(i));
for (int j = 0; j < layer_param->bottom_size(); j++){
// call the top before bottom
const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)];
// check top's count
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1){
// previous layer_name
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
// e.g: conv1 => conv1_conv1_0_split_0
// once used then ++ for next
layer_param->set_bottom(j, splitBlobName(layer_name, blob_name, top_idx.second,
top_idx_to_bottom_split_idx[top_idx]++));
}
}
for (int j = 0; j < layer_param->top_size(); j++){
const pair<int, int>& top_idx = make_pair(i, j);
//.........这里部分代码省略.........