本文整理汇总了C++中NetParameter::input_size方法的典型用法代码示例。如果您正苦于以下问题:C++ NetParameter::input_size方法的具体用法?C++ NetParameter::input_size怎么用?C++ NetParameter::input_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NetParameter
的用法示例。
在下文中一共展示了NetParameter::input_size方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: UpgradeV0PaddingLayers
void UpgradeV0PaddingLayers(const NetParameter& param,
NetParameter* param_upgraded_pad) {
// Copy everything other than the layers from the original param.
param_upgraded_pad->Clear();
param_upgraded_pad->CopyFrom(param);
param_upgraded_pad->clear_layers();
// Figure out which layer each bottom blob comes from.
map<string, int> blob_name_to_last_top_idx;
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = -1;
}
for (int i = 0; i < param.layers_size(); ++i) {
const V1LayerParameter& layer_connection = param.layers(i);
const V0LayerParameter& layer_param = layer_connection.layer();
// Add the layer to the new net, unless it's a padding layer.
if (layer_param.type() != "padding") {
param_upgraded_pad->add_layers()->CopyFrom(layer_connection);
}
for (int j = 0; j < layer_connection.bottom_size(); ++j) {
const string& blob_name = layer_connection.bottom(j);
if (blob_name_to_last_top_idx.find(blob_name) ==
blob_name_to_last_top_idx.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j;
}
const int top_idx = blob_name_to_last_top_idx[blob_name];
if (top_idx == -1) {
continue;
}
const V1LayerParameter& source_layer = param.layers(top_idx);
if (source_layer.layer().type() == "padding") {
// This layer has a padding layer as input -- check that it is a conv
// layer or a pooling layer and takes only one input. Also check that
// the padding layer input has only one input and one output. Other
// cases have undefined behavior in Caffe.
CHECK((layer_param.type() == "conv") || (layer_param.type() == "pool"))
<< "Padding layer input to "
"non-convolutional / non-pooling layer type "
<< layer_param.type();
CHECK_EQ(layer_connection.bottom_size(), 1)
<< "Conv Layer takes a single blob as input.";
CHECK_EQ(source_layer.bottom_size(), 1)
<< "Padding Layer takes a single blob as input.";
CHECK_EQ(source_layer.top_size(), 1)
<< "Padding Layer produces a single blob as output.";
int layer_index = param_upgraded_pad->layers_size() - 1;
param_upgraded_pad->mutable_layers(layer_index)->mutable_layer()
->set_pad(source_layer.layer().pad());
param_upgraded_pad->mutable_layers(layer_index)
->set_bottom(j, source_layer.bottom(0));
}
}
for (int j = 0; j < layer_connection.top_size(); ++j) {
const string& blob_name = layer_connection.top(j);
blob_name_to_last_top_idx[blob_name] = i;
}
}
}
示例2: NetParameterToPrettyPrint
void NetParameterToPrettyPrint(const NetParameter& param,
NetParameterPrettyPrint* pretty_param) {
pretty_param->Clear();
if (param.has_name()) {
pretty_param->set_name(param.name());
}
if (param.has_force_backward()) {
pretty_param->set_force_backward(param.force_backward());
}
for (int i = 0; i < param.input_size(); ++i) {
pretty_param->add_input(param.input(i));
}
for (int i = 0; i < param.input_dim_size(); ++i) {
pretty_param->add_input_dim(param.input_dim(i));
}
for (int i = 0; i < param.layers_size(); ++i) {
pretty_param->add_layers()->CopyFrom(param.layers(i));
}
}
示例3: InsertSplits
void InsertSplits(const NetParameter& param, NetParameter* param_split) {
// Initialize by copying from the input NetParameter.
param_split->CopyFrom(param);
param_split->clear_layer();
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
layer_idx_to_layer_name[-1] = "input";
// Determine the number of times each blob is used as an input (bottom) blob.
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
}
for (int i = 0; i < param.layer_size(); ++i) {
const LayerParameter& layer_param = param.layer(i);
layer_idx_to_layer_name[i] = layer_param.name();
for (int j = 0; j < layer_param.bottom_size(); ++j) {
const string& blob_name = layer_param.bottom(j);
if (blob_name_to_last_top_idx.find(blob_name) ==
blob_name_to_last_top_idx.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j;
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
++top_idx_to_bottom_count[top_idx];
}
for (int j = 0; j < layer_param.top_size(); ++j) {
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
// A use of a top blob as a loss should be handled similarly to the use of
// a top blob as an input (bottom) blob to another layer.
const int last_loss =
std::min(layer_param.loss_weight_size(), layer_param.top_size());
for (int j = 0; j < last_loss; ++j) {
const string& blob_name = layer_param.top(j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
if (top_idx_to_loss_weight[top_idx]) {
++top_idx_to_bottom_count[top_idx];
}
}
}
// Create split layer for any input blobs used by other layer as bottom
// blobs more than once.
for (int i = 0; i < param.input_size(); ++i) {
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[-1];
const string& blob_name = param.input(i);
LayerParameter* split_layer_param = param_split->add_layer();
const float kZeroLossWeight = 0;
ConfigureSplitLayer(layer_name, blob_name, i, split_count,
kZeroLossWeight, split_layer_param);
}
}
for (int i = 0; i < param.layer_size(); ++i) {
LayerParameter* layer_param = param_split->add_layer();
layer_param->CopyFrom(param.layer(i));
// Replace any shared bottom blobs with split layer outputs.
for (int j = 0; j < layer_param->bottom_size(); ++j) {
const pair<int, int>& top_idx =
bottom_idx_to_source_top_idx[make_pair(i, j)];
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
layer_param->set_bottom(j, SplitBlobName(layer_name,
blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++));
}
}
// Create split layer for any top blobs used by other layer as bottom
// blobs more than once.
for (int j = 0; j < layer_param->top_size(); ++j) {
const pair<int, int>& top_idx = make_pair(i, j);
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[i];
const string& blob_name = layer_param->top(j);
LayerParameter* split_layer_param = param_split->add_layer();
const float loss_weight = top_idx_to_loss_weight[top_idx];
ConfigureSplitLayer(layer_name, blob_name, j, split_count,
loss_weight, split_layer_param);
if (loss_weight) {
layer_param->clear_loss_weight();
top_idx_to_bottom_split_idx[top_idx]++;
}
}
}
}
}
示例4: if
void Net<Dtype>::Init(const NetParameter& in_param) {
// Create a copy of in_param with splits added where necessary.
NetParameter param;
InsertSplits(in_param, ¶m);
// Basically, build all the layers and set up its connections.
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
int num_layers = param.layers_size();
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
<< "Incorrect bottom blob dimension specifications.";
size_t memory_used = 0;
// set the input blobs
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
shared_ptr<Blob<Dtype> > blob_pointer(
new Blob<Dtype>(param.input_dim(i * 4),
param.input_dim(i * 4 + 1),
param.input_dim(i * 4 + 2),
param.input_dim(i * 4 + 3)));
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
net_input_blob_indices_.push_back(i);
net_input_blobs_.push_back(blob_pointer.get());
blob_name_to_idx[blob_name] = i;
available_blobs.insert(blob_name);
memory_used += blob_pointer->count();
}
DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype);
// For each layer, set up their input and output
bottom_vecs_.resize(param.layers_size());
top_vecs_.resize(param.layers_size());
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int i = 0; i < param.layers_size(); ++i) {
bool in_place = false;
const LayerParameter& layer_param = param.layers(i);
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
LOG(INFO) << "Creating Layer " << layer_param.name();
bool need_backward = param.force_backward();
// Figure out this layer's input
for (int j = 0; j < layer_param.bottom_size(); ++j) {
const string& blob_name = layer_param.bottom(j);
const int blob_id = blob_name_to_idx[blob_name];
if (available_blobs.find(blob_name) == available_blobs.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name <<
" to layer" << j;
}
LOG(INFO) << layer_param.name() << " <- " << blob_name;
bottom_vecs_[i].push_back(blobs_[blob_id].get());
bottom_id_vecs_[i].push_back(blob_id);
// If a blob needs backward, this layer should provide it.
need_backward |= blob_need_backward_[blob_id];
available_blobs.erase(blob_name);
}
// Figure out this layer's output
for (int j = 0; j < layer_param.top_size(); ++j) {
const string& blob_name = layer_param.top(j);
// Check if we are doing in-place computation
if (layer_param.bottom_size() > j &&
blob_name == layer_param.bottom(j)) {
// In-place computation
LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
in_place = true;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(
blobs_[blob_name_to_idx[blob_name]].get());
top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
} else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
// If we are not doing in-place computation but has duplicated blobs,
// raise an error.
LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
} else {
// Normal output.
LOG(INFO) << layer_param.name() << " -> " << blob_name;
shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
blob_name_to_idx[blob_name] = blob_names_.size() - 1;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
top_id_vecs_[i].push_back(blob_names_.size() - 1);
}
}
// After this layer is connected, set it up.
//LOG(INFO) << "Setting up " << layer_names_[i];
layers_[i]->SetUp(bottom_vecs_[i], &(top_vecs_[i]));
//.........这里部分代码省略.........
示例5: insertSplits
void insertSplits(const NetParameter& param, NetParameter* splitted_param){
splitted_param->CopyFrom(param);
splitted_param->clear_layer();
// pair<layer_idx,blob_idx>
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, float> top_idx_to_loss_weight;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
layer_idx_to_layer_name[-1] = "input";
// scan and stuff all input blobs into a virtual layer named as "input" at -1
// input blobs do not belong to any layers and we stuff them into a virtual layer
// usually use for viewing a Net(e.g: examples\cifar10\cifar10_full.prototxt
// input: "data" *** ¡û_¡û specify it as a temporary data blob ***
// input_shape{ *** ¡û_¡û specify it as shape***
// dim: 1
// dim : 3
// dim : 32
// dim : 32
// }
// pay attention: input blobs should not use in train/test prototxt
// because they are not specified vaild data sources
// you can regard them as viewing toys
for (int i = 0; i < param.input_size(); i++){
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
}
for (int i = 0; i < param.layer_size(); i++){
const LayerParameter& layer_param = param.layer(i);
// bind layer idx to layer name
layer_idx_to_layer_name[i] = layer_param.name();
// a layer has several bottom blobs(e.g DataLayer)
for (int j = 0; j < layer_param.bottom_size(); j++){
const string& blob_name = layer_param.bottom(j);
// ensure that all bottom blobs must have the same name as one top blob
if (!blob_name_to_last_top_idx.count(blob_name)){
LOG(FATAL) << "Unknown bottom blob: " << blob_name
<< " at layer: " << layer_param.name() << ".";
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
// a bottom's name must be as same as one top's name
// find a bottom's parent top (<- backward direction)
// note that top name must declare before bottom name
// or a bottom will bind to layer_{-1}
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
top_idx_to_bottom_count[top_idx]++;
}
// update top name's position for following bottom names
for (int j = 0; j < layer_param.top_size(); j++){
const string& blob_name = layer_param.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
const int last_loss = min(layer_param.loss_weight_size(), layer_param.top_size());
// only work in LossLayer
for (int j = 0; j < last_loss; j++){
const string& blob_name = layer_param.top(j);
// updated before
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j);
// from loss(top) backward to bottom
if (top_idx_to_loss_weight[top_idx]) top_idx_to_bottom_count[top_idx]++;
}
}
// special case: data blob shared by other blobs in the virtual layer
// split it also
for (int i = 0; i < param.input_size(); i++){
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
if (split_count > 1){
// "input"
const string& layer_name = layer_idx_to_layer_name[-1];
const string& blob_name = param.input(i);
// push_back a new param
LayerParameter* split_layer_param = splitted_param->add_layer();
const float kZeroLossWeight = 0;
configureSplitLayer(layer_name, blob_name, i, split_count, kZeroLossWeight, split_layer_param);
}
}
for (int i = 0; i < param.layer_size(); i++){
// push_back a new param
LayerParameter* layer_param = splitted_param->add_layer();
layer_param->CopyFrom(param.layer(i));
for (int j = 0; j < layer_param->bottom_size(); j++){
// call the top before bottom
const pair<int, int>& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)];
// check top's count
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1){
// previous layer_name
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_param->bottom(j);
// e.g: conv1 => conv1_conv1_0_split_0
// once used then ++ for next
layer_param->set_bottom(j, splitBlobName(layer_name, blob_name, top_idx.second,
top_idx_to_bottom_split_idx[top_idx]++));
}
}
for (int j = 0; j < layer_param->top_size(); j++){
const pair<int, int>& top_idx = make_pair(i, j);
//.........这里部分代码省略.........
示例6: LOG
void Net<Dtype>::Init(const NetParameter& param) {
// Basically, build all the layers and set up its connections.
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
int num_layers = param.layers_size();
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
<< "Incorrect bottom blob dimension specifications.";
// set the input blobs
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
shared_ptr<Blob<Dtype> > blob_pointer(
new Blob<Dtype>(param.input_dim(i * 4),
param.input_dim(i * 4 + 1),
param.input_dim(i * 4 + 2),
param.input_dim(i * 4 + 3)));
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
net_input_blob_indices_.push_back(i);
net_input_blobs_.push_back(blob_pointer.get());
blob_name_to_idx[blob_name] = i;
available_blobs.insert(blob_name);
}
// For each layer, set up their input and output
bottom_vecs_.resize(param.layers_size());
top_vecs_.resize(param.layers_size());
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int i = 0; i < param.layers_size(); ++i) {
const LayerConnection& layer_connection = param.layers(i);
const LayerParameter& layer_param = layer_connection.layer();
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
LOG(INFO) << "Creating Layer " << layer_param.name();
bool need_backward = param.force_backward();
// Figure out this layer's input and output
for (int j = 0; j < layer_connection.bottom_size(); ++j) {
const string& blob_name = layer_connection.bottom(j);
const int blob_id = blob_name_to_idx[blob_name];
if (available_blobs.find(blob_name) == available_blobs.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name <<
" to layer" << j;
}
LOG(INFO) << layer_param.name() << " <- " << blob_name;
bottom_vecs_[i].push_back(
blobs_[blob_id].get());
bottom_id_vecs_[i].push_back(blob_id);
// If a blob needs backward, this layer should provide it.
need_backward |= blob_need_backward_[blob_id];
available_blobs.erase(blob_name);
}
for (int j = 0; j < layer_connection.top_size(); ++j) {
const string& blob_name = layer_connection.top(j);
// Check if we are doing in-place computation
if (layer_connection.bottom_size() > j &&
blob_name == layer_connection.bottom(j)) {
// In-place computation
LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
available_blobs.insert(blob_name);
top_vecs_[i].push_back(
blobs_[blob_name_to_idx[blob_name]].get());
top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
} else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
// If we are not doing in-place computation but has duplicated blobs,
// raise an error.
LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
} else {
// Normal output.
LOG(INFO) << layer_param.name() << " -> " << blob_name;
shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
blob_name_to_idx[blob_name] = blob_names_.size() - 1;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
top_id_vecs_[i].push_back(blob_names_.size() - 1);
}
}
// After this layer is connected, set it up.
// LOG(INFO) << "Setting up " << layer_names_[i];
layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]);
for (int topid = 0; topid < top_vecs_[i].size(); ++topid) {
LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->channels() << " "
<< top_vecs_[i][topid]->height() << " "
<< top_vecs_[i][topid]->width();
}
// Check if this layer needs backward operation itself
for (int j = 0; j < layers_[i]->layer_param().blobs_lr_size(); ++j) {
need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0);
}
// Finally, set the backward flag
layer_need_backward_.push_back(need_backward);
if (need_backward) {
LOG(INFO) << layer_names_[i] << " needs backward computation.";
for (int j = 0; j < top_id_vecs_[i].size(); ++j) {
blob_need_backward_[top_id_vecs_[i][j]] = true;
}
} else {
//.........这里部分代码省略.........
示例7: insert_splits
void insert_splits(const NetParameter& param, NetParameter* param_split) {
// Initialize by copying from the input NetParameter.
param_split->CopyFrom(param);
param_split->clear_layers();
map<string, pair<int, int> > blob_name_to_last_top_idx;
map<pair<int, int>, pair<int, int> > bottom_idx_to_source_top_idx;
map<pair<int, int>, int> top_idx_to_bottom_count;
map<pair<int, int>, int> top_idx_to_bottom_split_idx;
map<int, string> layer_idx_to_layer_name;
layer_idx_to_layer_name[-1] = "input";
// Determine the number of times each blob is used as an input (bottom) blob.
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
blob_name_to_last_top_idx[blob_name] = make_pair(-1, i);
}
for (int i = 0; i < param.layers_size(); ++i) {
const LayerConnection& layer_connection = param.layers(i);
layer_idx_to_layer_name[i] = layer_connection.layer().name();
for (int j = 0; j < layer_connection.bottom_size(); ++j) {
const string& blob_name = layer_connection.bottom(j);
if (blob_name_to_last_top_idx.find(blob_name) ==
blob_name_to_last_top_idx.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j;
}
const pair<int, int>& bottom_idx = make_pair(i, j);
const pair<int, int>& top_idx = blob_name_to_last_top_idx[blob_name];
bottom_idx_to_source_top_idx[bottom_idx] = top_idx;
++top_idx_to_bottom_count[top_idx];
}
for (int j = 0; j < layer_connection.top_size(); ++j) {
const string& blob_name = layer_connection.top(j);
blob_name_to_last_top_idx[blob_name] = make_pair(i, j);
}
}
// Create split layer for any input blobs used by other layers as bottom
// blobs more than once.
for (int i = 0; i < param.input_size(); ++i) {
const int split_count = top_idx_to_bottom_count[make_pair(-1, i)];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[-1];
const string& blob_name = param.input(i);
LayerConnection* split_layer_connection = param_split->add_layers();
configure_split_layer(layer_name, blob_name, i, split_count,
split_layer_connection);
}
}
for (int i = 0; i < param.layers_size(); ++i) {
LayerConnection* layer_connection = param_split->add_layers();
layer_connection->CopyFrom(param.layers(i));
// Replace any shared bottom blobs with split layer outputs.
for (int j = 0; j < layer_connection->bottom_size(); ++j) {
const pair<int, int>& top_idx =
bottom_idx_to_source_top_idx[make_pair(i, j)];
const int split_count = top_idx_to_bottom_count[top_idx];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[top_idx.first];
const string& blob_name = layer_connection->bottom(j);
layer_connection->set_bottom(j, get_split_blob_name(layer_name,
blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++));
}
}
// Create split layer for any top blobs used by other layers as bottom
// blobs more than once.
for (int j = 0; j < layer_connection->top_size(); ++j) {
const int split_count = top_idx_to_bottom_count[make_pair(i, j)];
if (split_count > 1) {
const string& layer_name = layer_idx_to_layer_name[i];
const string& blob_name = layer_connection->top(j);
LayerConnection* split_layer_connection = param_split->add_layers();
configure_split_layer(layer_name, blob_name, j, split_count,
split_layer_connection);
}
}
}
}