本文整理汇总了C++中LayerPtr::forward方法的典型用法代码示例。如果您正苦于以下问题:C++ LayerPtr::forward方法的具体用法?C++ LayerPtr::forward怎么用?C++ LayerPtr::forward使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LayerPtr
的用法示例。
在下文中一共展示了LayerPtr::forward方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: doOneConvtTest
// Do one forward pass of convTrans layer and check to see if its output
// matches the given result
void doOneConvtTest(size_t imgSize,
size_t output_x,
size_t stride,
size_t padding,
size_t filter_size,
MatrixPtr& result) {
TestConfig configt;
configt.biasSize = 1;
configt.layerConfig.set_type("exconvt");
configt.layerConfig.set_num_filters(1);
configt.layerConfig.set_partial_sum(1);
configt.layerConfig.set_shared_biases(true);
configt.inputDefs.push_back(
{INPUT_DATA, "layer_0", output_x * output_x, filter_size * filter_size});
LayerInputConfig* input = configt.layerConfig.add_inputs();
ConvConfig* conv = input->mutable_conv_conf();
conv->set_filter_size(filter_size);
conv->set_filter_size_y(filter_size);
conv->set_channels(1);
conv->set_padding(padding);
conv->set_padding_y(padding);
conv->set_stride(stride);
conv->set_stride_y(stride);
conv->set_groups(1);
conv->set_filter_channels(1);
conv->set_img_size(imgSize);
conv->set_output_x(output_x);
configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
configt.layerConfig.num_filters());
configt.layerConfig.set_name("convTrans");
std::vector<DataLayerPtr> dataLayers;
LayerMap layerMap;
vector<Argument> datas;
initDataLayer(
configt, &dataLayers, &datas, &layerMap, "convTrans", 1, false, false);
dataLayers[0]->getOutputValue()->zeroMem();
dataLayers[0]->getOutputValue()->add(1.0);
// test layer initialize
std::vector<ParameterPtr> parameters;
LayerPtr convtLayer;
initTestLayer(configt, &layerMap, ¶meters, &convtLayer);
convtLayer->getBiasParameter()->zeroMem();
convtLayer->getParameters()[0]->zeroMem();
convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->add(1.0);
convtLayer->forward(PASS_GC);
checkMatrixEqual(convtLayer->getOutputValue(), result);
}
示例2: doOnePriorBoxTest
// Do one forward pass of priorBox layer and check to see if its output
// matches the given result
void doOnePriorBoxTest(size_t feature_map_width,
size_t feature_map_height,
size_t image_width,
size_t image_height,
vector<int> min_size,
vector<int> max_size,
vector<real> aspect_ratio,
vector<real> variance,
bool use_gpu,
MatrixPtr& result) {
// Setting up the priorbox layer
TestConfig configt;
configt.layerConfig.set_type("priorbox");
configt.inputDefs.push_back({INPUT_DATA, "featureMap", 1, 0});
LayerInputConfig* input = configt.layerConfig.add_inputs();
configt.inputDefs.push_back({INPUT_DATA, "image", 1, 0});
configt.layerConfig.add_inputs();
PriorBoxConfig* pb = input->mutable_priorbox_conf();
for (size_t i = 0; i < min_size.size(); i++) pb->add_min_size(min_size[i]);
for (size_t i = 0; i < max_size.size(); i++) pb->add_max_size(max_size[i]);
for (size_t i = 0; i < variance.size(); i++) pb->add_variance(variance[i]);
for (size_t i = 0; i < aspect_ratio.size(); i++)
pb->add_aspect_ratio(aspect_ratio[i]);
// data layer initialize
std::vector<DataLayerPtr> dataLayers;
LayerMap layerMap;
vector<Argument> datas;
initDataLayer(
configt, &dataLayers, &datas, &layerMap, "priorbox", 1, false, use_gpu);
dataLayers[0]->getOutput().setFrameHeight(feature_map_height);
dataLayers[0]->getOutput().setFrameWidth(feature_map_width);
dataLayers[1]->getOutput().setFrameHeight(image_height);
dataLayers[1]->getOutput().setFrameWidth(image_width);
// test layer initialize
std::vector<ParameterPtr> parameters;
LayerPtr priorboxLayer;
initTestLayer(configt, &layerMap, ¶meters, &priorboxLayer);
priorboxLayer->forward(PASS_GC);
checkMatrixEqual(priorboxLayer->getOutputValue(), result);
}
示例3: createCTCLayer
LayerPtr createCTCLayer(string name,
size_t numClasses,
bool useGpu,
bool normByTimes,
LayerPtr dataLayer,
LayerPtr labelLayer) {
LayerMap layerMap;
layerMap[dataLayer->getName()] = dataLayer;
layerMap[labelLayer->getName()] = labelLayer;
ParameterMap parameterMap;
LayerConfig layerConfig;
layerConfig.set_name(name);
layerConfig.set_type("ctc");
layerConfig.set_size(numClasses);
layerConfig.set_norm_by_times(normByTimes);
layerConfig.add_inputs();
LayerInputConfig& input0 = *(layerConfig.mutable_inputs(0));
input0.set_input_layer_name(dataLayer->getName());
layerConfig.add_inputs();
LayerInputConfig& input1 = *(layerConfig.mutable_inputs(1));
input1.set_input_layer_name(labelLayer->getName());
LayerPtr layer = LayerPtr(new CTCLayer(layerConfig));
layerMap[layer->getName()] = layer;
layer->init(layerMap, parameterMap);
ActivationFunction* softmaxActivation = ActivationFunction::create("softmax");
softmaxActivation->forward(dataLayer->getOutput()).check();
layer->forward(PASS_GC);
layer->backward();
softmaxActivation->backward(dataLayer->getOutput()).check();
return layer;
}
示例4: createWarpCTCLayer
LayerPtr createWarpCTCLayer(string name,
size_t numClasses,
bool useGpu,
bool normByTimes,
LayerPtr dataLayer,
LayerPtr labelLayer) {
LayerMap layerMap;
layerMap[dataLayer->getName()] = dataLayer;
layerMap[labelLayer->getName()] = labelLayer;
ParameterMap parameterMap;
LayerConfig layerConfig;
layerConfig.set_name(name);
layerConfig.set_type("warp_ctc");
layerConfig.set_size(numClasses);
layerConfig.set_blank(numClasses - 1);
layerConfig.set_norm_by_times(normByTimes);
layerConfig.add_inputs();
LayerInputConfig& input0 = *(layerConfig.mutable_inputs(0));
input0.set_input_layer_name(dataLayer->getName());
layerConfig.add_inputs();
LayerInputConfig& input1 = *(layerConfig.mutable_inputs(1));
input1.set_input_layer_name(labelLayer->getName());
LayerPtr layer = LayerPtr(new WarpCTCLayer(layerConfig));
layerMap[layer->getName()] = layer;
layer->init(layerMap, parameterMap);
layer->forward(PASS_GC);
layer->backward();
return layer;
}
示例5: initDataLayer
// Test that the convTrans forward is the same as conv backward
TEST(Layer, convTransLayerFwd) {
// Setting up conv-trans layer
TestConfig configt;
configt.biasSize = 3;
configt.layerConfig.set_type("exconvt");
configt.layerConfig.set_num_filters(3);
configt.layerConfig.set_partial_sum(1);
configt.layerConfig.set_shared_biases(true);
configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
LayerInputConfig* input = configt.layerConfig.add_inputs();
ConvConfig* conv = input->mutable_conv_conf();
conv->set_filter_size(2);
conv->set_filter_size_y(4);
conv->set_channels(16);
conv->set_padding(0);
conv->set_padding_y(1);
conv->set_stride(2);
conv->set_stride_y(2);
conv->set_groups(1);
conv->set_filter_channels(3 / conv->groups());
conv->set_img_size(16);
conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
conv->padding(), conv->stride(),
/* caffeMode */ true));
configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
configt.layerConfig.num_filters());
configt.layerConfig.set_name("convTrans");
// data layer initialize
std::vector<DataLayerPtr> dataLayers;
LayerMap layerMap;
vector<Argument> datas;
initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans",
100, false, false);
// test layer initialize
std::vector<ParameterPtr> parameters;
LayerPtr convtLayer;
initTestLayer(configt, &layerMap, ¶meters, &convtLayer);
convtLayer->getBiasParameter()->zeroMem();
convtLayer->forward(PASS_GC);
// Setting up conv-layer config
TestConfig config;
config.biasSize = 16;
config.layerConfig.set_type("exconv");
config.layerConfig.set_num_filters(16);
config.layerConfig.set_partial_sum(1);
config.layerConfig.set_shared_biases(true);
config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384});
input = config.layerConfig.add_inputs();
conv = input->mutable_conv_conf();
conv->set_filter_size(2);
conv->set_filter_size_y(4);
conv->set_channels(3);
conv->set_padding(0);
conv->set_padding_y(1);
conv->set_stride(2);
conv->set_stride_y(2);
conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups());
conv->set_img_size(16);
conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
conv->padding(), conv->stride(),
/* caffeMode */ true));
config.layerConfig.set_size(conv->output_x() * conv->output_x() *
config.layerConfig.num_filters());
config.layerConfig.set_name("conv");
// data layer initialize
std::vector<DataLayerPtr> dataLayers2;
LayerMap layerMap2;
vector<Argument> datas2;
initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv",
100, false, false);
// test layer initialize
std::vector<ParameterPtr> parameters2;
LayerPtr convLayer;
initTestLayer(config, &layerMap2, ¶meters2, &convLayer);
// Sync convLayer and convtLayer parameter
convLayer->getBiasParameter()->zeroMem();
convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(
*(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)));
// Set convLayer outputGrad as convTransLayer input value
convLayer->forward(PASS_GC);
convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue()));
vector<int> callbackFlags(parameters2.size(), 0);
auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; };
convLayer->backward(callback);
// Check that the convLayer backward is the same as convTransLayer forward
checkMatrixEqual(convtLayer->getOutputValue(),
dataLayers2[0]->getOutputGrad());
}