本文整理汇总了C++中NetParameter::layer_size方法的典型用法代码示例。如果您正苦于以下问题:C++ NetParameter::layer_size方法的具体用法?C++ NetParameter::layer_size怎么用?C++ NetParameter::layer_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NetParameter
的用法示例。
在下文中一共展示了NetParameter::layer_size方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: state
void Net<Dtype>::filterNet(const NetParameter& param, NetParameter* filtered_param){
NetState state(param.state());
filtered_param->CopyFrom(param);
// remove all layer params and then filter
filtered_param->clear_layer();
for (int i = 0; i < param.layer_size(); i++){
const LayerParameter& layer_param = param.layer(i);
const string& layer_name = layer_param.name();
// usually a layer has not any include/exclude rules
CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0)
<< "Specify either include or exclude rules.";
bool layer_included = (layer_param.include_size() == 0);
// assume 'included' and check if meet any excluded rules
for (int j = 0; layer_included&&j < layer_param.exclude_size(); j++){
if (stateMeetRule(state, layer_param.exclude(j), layer_name)){
// cancel 'included'
layer_included = false;
}
}
// assume 'excluded' and check if meet any included rules
for (int j = 0; !layer_included&&j < layer_param.include_size(); j++){
if (stateMeetRule(state, layer_param.include(j), layer_name)){
// cancel 'excluded'
layer_included = true;
}
}
// copy the included layer to filtered_param
if (layer_included) filtered_param->add_layer()->CopyFrom(layer_param);
}
}
示例2:
SolverParameter ModelServer<Dtype>::prepare_model() {
NetParameter net;
solver->net()->ToProto(&net);
for (int i = 0; i < net.layer_size(); ++i) {
LayerParameter& layer = *net.mutable_layer(i);
layer.clear_blobs();
if ((layer.type().find("Data") != std::string::npos)
&& (layer.has_remote_data_param())) {
layer.set_type("RemoteData");
for (int j = 0; j < layer.top_size(); ++j) {
*layer.mutable_remote_data_param()->add_shape()
= blob_shape_by_name(layer.top(j));
}
}
}
SolverParameter ret = solver->param();
ret.clear_net();
ret.clear_net_param();
ret.clear_test_net();
ret.clear_test_net_param();
ret.clear_train_net();
*ret.mutable_train_net_param() = net;
return ret;
}
示例3: Init
void NGNet::Init( ) {
input_layer_top_idx_ = 0;
output_layer_top_idx_ = 0;
/* Load the network. */
net_.reset(new Net<float>(model_file_, TEST));
NetParameter param;
CHECK(ReadProtoFromTextFile(model_file_, ¶m))
<< "Failed to parse NetParameter file: " << model_file_;
for (int ip = 0; ip < param.layer_size(); ip++) {
LayerParameter layer_param = param.layer(ip);
if (layer_param.has_inner_product_param()) {
InnerProductParameter* inner_product_param = layer_param.mutable_inner_product_param();
int num_output = inner_product_param->num_output();
if (num_output > 0) {
inner_product_param->set_num_output(num_output * 2);
}
}
}
// //param.mutable_state()->set_phase(phase);
Net<float> * new_net = new Net<float>(param);
net_->CopyTrainedLayersFrom(trained_file_);
int input_layer_idx = -1;
for (size_t layer_id = 0; layer_id < net_->layer_names().size(); ++layer_id) {
if (net_->layer_names()[layer_id] == input_layer_name_) {
input_layer_idx = layer_id;
break;
}
}
if (input_layer_idx == -1) {
LOG(FATAL) << "Unknown layer name " << input_layer_name_;
}
input_layer_idx_ = input_layer_idx;
input_layer_top_idx_ = 0;
Blob<float>* input_layer = net_->top_vecs()[input_layer_idx_][input_layer_top_idx_];
input_layer_dim_ = input_layer->shape(1);
int output_layer_idx = -1;
for (size_t layer_id = 0; layer_id < net_->layer_names().size(); ++layer_id) {
if (net_->layer_names()[layer_id] == output_layer_name_) {
output_layer_idx = layer_id;
break;
}
}
if (output_layer_idx == -1) {
LOG(FATAL) << "Unknown layer name " << output_layer_name_;
}
output_layer_idx_ = output_layer_idx;
}
示例4: NetNeedsBatchNormUpgrade
bool NetNeedsBatchNormUpgrade(const NetParameter& net_param) {
for (int i = 0; i < net_param.layer_size(); ++i) {
// Check if BatchNorm layers declare three parameters, as required by
// the previous BatchNorm layer definition.
if (net_param.layer(i).type() == "BatchNorm"
&& net_param.layer(i).param_size() == 3) {
return true;
}
}
return false;
}
示例5: UpgradeV1Net
bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) {
bool is_fully_compatible = true;
if (v1_net_param.layer_size() > 0) {
LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
<< "fields; these will be ignored for the upgrade.";
is_fully_compatible = false;
}
net_param->CopyFrom(v1_net_param);
net_param->clear_layers();
net_param->clear_layer();
for (int i = 0; i < v1_net_param.layers_size(); ++i) {
if (!UpgradeV1LayerParameter(v1_net_param.layers(i),
net_param->add_layer())) {
LOG(ERROR) << "Upgrade of input layer " << i << " failed.";
is_fully_compatible = false;
}
}
return is_fully_compatible;
}
示例6: UpgradeV1Net
bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) {
if (v1_net_param.layer_size() > 0) {
LOG(FATAL) << "Refusing to upgrade inconsistent NetParameter input; "
<< "the definition includes both 'layer' and 'layers' fields. "
<< "The current format defines 'layer' fields with string type like "
<< "layer { type: 'Layer' ... } and not layers { type: LAYER ... }. "
<< "Manually switch the definition to 'layer' format to continue.";
}
bool is_fully_compatible = true;
net_param->CopyFrom(v1_net_param);
net_param->clear_layers();
net_param->clear_layer();
for (int i = 0; i < v1_net_param.layers_size(); ++i) {
if (!UpgradeV1LayerParameter(v1_net_param.layers(i),
net_param->add_layer())) {
LOG(ERROR) << "Upgrade of input layer " << i << " failed.";
is_fully_compatible = false;
}
}
return is_fully_compatible;
}
示例7:
void Net<Dtype>::copyTrainedLayerFrom(const NetParameter& param){
int num_layers = param.layer_size();
for (int i = 0; i < num_layers; i++){
const LayerParameter& source_layer = param.layer(i);
const string& source_layer_name = source_layer.name();
int target_layer_id = 0;
while (target_layer_id != layer_names.size() &&
layer_names[target_layer_id] != source_layer_name){
target_layer_id++;
}
if (target_layer_id == layer_names.size()) continue;
const vector < boost::shared_ptr<Blob<Dtype>>>& target_blobs = layers[target_layer_id]->getBlobs();
for (int j = 0; j < target_blobs.size(); j++){
Blob<Dtype> source_blob;
source_blob.FromProto(source_layer.blobs(j));
Blob<Dtype>* target_blob = target_blobs[j].get();
CHECK(source_blob.shape() == target_blob->shape())
<< "Incompatible shape when sharing trained params.";
target_blob->FromProto(source_layer.blobs(j), false);
}
}
}
示例8: LOG
void ApolloNet<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {
int num_source_layers = param.layer_size();
for (int i = 0; i < num_source_layers; ++i) {
const LayerParameter& source_layer = param.layer(i);
const string& source_layer_name = source_layer.name();
if (layers_map_.find(source_layer_name) == layers_map_.end()) {
LOG(INFO) << "Ignoring source layer " << source_layer_name;
continue;
}
LOG(INFO) << "Copying source layer " << source_layer_name;
vector<shared_ptr<Blob<Dtype> > >& target_blobs =
layers_map_[source_layer_name]->blobs();
ASSERT(target_blobs.size() == source_layer.blobs_size(),
"Incompatible number of blobs for layer " << source_layer_name);
for (int j = 0; j < target_blobs.size(); ++j) {
const bool kReshape = false;
target_blobs[j]->FromProto(source_layer.blobs(j), kReshape);
}
}
}
示例9: InsertSplits
void InsertSplits(const NetParameter& param, NetParameter* param_split) {
// Initialize by copying from the input NetParameter.
param_split->CopyFrom(param);
param_split->clear_layer();
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
for (int i = 0; i < param.layer_size(); ++i) {
const LayerParameter& layer_param = param.layer(i);
layer_idx_to_layer_name[i] = layer_param.name();
for (int j = 0; j < layer_param.bottom_size(); ++j) {
const string& blob_name = layer_param.bottom(j);
if (blob_name_to_last_top_idx.find(blob_name) ==
blob_name_to_last_top_idx.end()) {
LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '"
<< layer_param.name() << "', bottom index " << j << ")";
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
++top_idx_to_bottom_count[top_idx];
}
for (int j = 0; j < layer_param.top_size(); ++j) {
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
// A use of a top blob as a loss should be handled similarly to the use of
// a top blob as a bottom blob to another layer.
const int last_loss =
std::min(layer_param.loss_weight_size(), layer_param.top_size());
for (int j = 0; j < last_loss; ++j) {
const string& blob_name = layer_param.top(j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
if (top_idx_to_loss_weight[top_idx]) {
++top_idx_to_bottom_count[top_idx];
}
}
}
for (int i = 0; i < param.layer_size(); ++i) {
LayerParameter* layer_param = param_split->add_layer();
layer_param->CopyFrom(param.layer(i));
// Replace any shared bottom blobs with split layer outputs.
for (int j = 0; j < layer_param->bottom_size(); ++j) {
const pair<int, int>& top_idx =
bottom_idx_to_source_top_idx[make_pair(i, j)];
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
layer_param->set_bottom(j, SplitBlobName(layer_name,
blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++));
}
}
// Create split layer for any top blobs used by other layer as bottom
// blobs more than once.
for (int j = 0; j < layer_param->top_size(); ++j) {
const pair<int, int>& top_idx = make_pair(i, j);
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[i];
const string& blob_name = layer_param->top(j);
LayerParameter* split_layer_param = param_split->add_layer();
const float loss_weight = top_idx_to_loss_weight[top_idx];
ConfigureSplitLayer(layer_name, blob_name, j, split_count,
loss_weight, split_layer_param);
if (loss_weight) {
layer_param->clear_loss_weight();
top_idx_to_bottom_split_idx[top_idx]++;
}
}
}
}
}
示例10: insertSplits
void insertSplits(const NetParameter& param, NetParameter* splitted_param){
splitted_param->CopyFrom(param);
splitted_param->clear_layer();
// pair<layer_idx,blob_idx>
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
layer_idx_to_layer_name[-1] = "input";
// scan and stuff all input blobs into a virtual layer named as "input" at -1
// input blobs do not belong to any layers and we stuff them into a virtual layer
// usually use for viewing a Net(e.g: examples\cifar10\cifar10_full.prototxt
// input: "data" *** ¡û_¡û specify it as a temporary data blob ***
// input_shape{ *** ¡û_¡û specify it as shape***
// dim: 1
// dim : 3
// dim : 32
// dim : 32
// }
// pay attention: input blobs should not use in train/test prototxt
// because they are not specified vaild data sources
// you can regard them as viewing toys
for (int i = 0; i < param.input_size(); i++){
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
}
for (int i = 0; i < param.layer_size(); i++){
const LayerParameter& layer_param = param.layer(i);
// bind layer idx to layer name
layer_idx_to_layer_name[i] = layer_param.name();
// a layer has several bottom blobs(e.g DataLayer)
for (int j = 0; j < layer_param.bottom_size(); j++){
const string& blob_name = layer_param.bottom(j);
// ensure that all bottom blobs must have the same name as one top blob
if (!blob_name_to_last_top_idx.count(blob_name)){
LOG(FATAL) << "Unknown bottom blob: " << blob_name
<< " at layer: " << layer_param.name() << ".";
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
// a bottom's name must be as same as one top's name
// find a bottom's parent top (<- backward direction)
// note that top name must declare before bottom name
// or a bottom will bind to layer_{-1}
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
top_idx_to_bottom_count[top_idx]++;
}
// update top name's position for following bottom names
for (int j = 0; j < layer_param.top_size(); j++){
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
const int last_loss = min(layer_param.loss_weight_size(), layer_param.top_size());
// only work in LossLayer
for (int j = 0; j < last_loss; j++){
const string& blob_name = layer_param.top(j);
// updated before
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
// from loss(top) backward to bottom
if (top_idx_to_loss_weight[top_idx]) top_idx_to_bottom_count[top_idx]++;
}
}
// special case: data blob shared by other blobs in the virtual layer
// split it also
for (int i = 0; i < param.input_size(); i++){
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
if (split_count > 1){
// "input"
const string& layer_name = layer_idx_to_layer_name[-1];
const string& blob_name = param.input(i);
// push_back a new param
LayerParameter* split_layer_param = splitted_param->add_layer();
const float kZeroLossWeight = 0;
configureSplitLayer(layer_name, blob_name, i, split_count, kZeroLossWeight, split_layer_param);
}
}
for (int i = 0; i < param.layer_size(); i++){
// push_back a new param
LayerParameter* layer_param = splitted_param->add_layer();
layer_param->CopyFrom(param.layer(i));
for (int j = 0; j < layer_param->bottom_size(); j++){
// call the top before bottom
const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)];
// check top's count
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1){
// previous layer_name
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
// e.g: conv1 => conv1_conv1_0_split_0
// once used then ++ for next
layer_param->set_bottom(j, splitBlobName(layer_name, blob_name, top_idx.second,
top_idx_to_bottom_split_idx[top_idx]++));
}
}
for (int j = 0; j < layer_param->top_size(); j++){
const pair<int, int>& top_idx = make_pair(i, j);
//.........这里部分代码省略.........