本文整理汇总了C++中LayerParameter::convolution_param方法的典型用法代码示例。如果您正苦于以下问题:C++ LayerParameter::convolution_param方法的具体用法?C++ LayerParameter::convolution_param怎么用?C++ LayerParameter::convolution_param使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LayerParameter
的用法示例。
在下文中一共展示了LayerParameter::convolution_param方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetDeconvolutionLayer
shared_ptr<Layer<Dtype> > GetDeconvolutionLayer(const LayerParameter& param) {
ConvolutionParameter conv_param = param.convolution_param();
ConvolutionParameter_Engine engine = conv_param.engine();
#ifdef USE_CUDNN
bool use_dilation = false;
for (int i = 0; i < conv_param.dilation_size(); ++i) {
if (conv_param.dilation(i) > 1) {
use_dilation = true;
}
}
#endif
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
if (!use_dilation) {
engine = ConvolutionParameter_Engine_CUDNN;
}
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new DeconvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
if (use_dilation) {
LOG(FATAL) << "CuDNN doesn't support the dilated deconvolution at Layer "
<< param.name();
}
return shared_ptr<Layer<Dtype> >(new CuDNNDeconvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
throw; // Avoids missing return warning
}
}
示例2: GetConvolutionLayer
shared_ptr<Layer<Dtype> > GetConvolutionLayer(const LayerParameter& param) {
ConvolutionParameter_Engine engine = param.convolution_param().engine();
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = ConvolutionParameter_Engine_CUDNN;
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE
|| Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL
|| checkConvolutionDilated(param.convolution_param())) {
return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
if (checkConvolutionDilated(param.convolution_param())) {
LOG(FATAL) << "CuDNN doesn't support the dilated convolution at Layer "
<< param.name();
}
return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL)<< "Layer " << param.name() << " has unknown engine.";
}
}
示例3: ep
shared_ptr<Layer<Dtype> > GetDeconvolutionLayer(
const LayerParameter& param) {
ConvolutionParameter conv_param = param.convolution_param();
ConvolutionParameter_Engine engine = conv_param.engine();
#if defined(MKL2017_SUPPORTED)
bool use_dilation = false;
for (int i = 0; i < conv_param.dilation_size(); ++i) {
if (conv_param.dilation(i) > 1) {
use_dilation = true;
}
}
#endif
// New, more flexible way of providing engine
if (engine == ConvolutionParameter_Engine_DEFAULT && param.engine() != "") {
EngineParser ep(param.engine());
if (ep.isEngine("CAFFE")) {
engine = ConvolutionParameter_Engine_CAFFE;
}
#ifdef MKL2017_SUPPORTED
else if (!use_dilation && ep.isEngine("MKL2017")) {
engine = ConvolutionParameter_Engine_MKL2017;
}
#endif
}
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
}
if (engine == ConvolutionParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new DeconvolutionLayer<Dtype>(param));
#ifdef MKL2017_SUPPORTED
} else if (engine == ConvolutionParameter_Engine_MKL2017) {
if (use_dilation) {
LOG(FATAL) << "MKL2017 doesn't support the dilated convolution at Layer "
<< param.name();
}
return shared_ptr<Layer<Dtype> >(new MKLDeconvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
return shared_ptr<Layer<Dtype> >();
}
示例4: copy_trained_layer
bool ConvolutionLayer::copy_trained_layer(const LayerParameter& param) {
if(param.blobs_size() < 1) return true;
if(layer_param_.blobs_size() < 1)
layer_param_.add_blobs()->CopyFrom(param.blobs(0));
else
layer_param_.mutable_blobs(0)->CopyFrom(param.blobs(0));
if(layer_param_.convolution_param().bias_term() &&
param.convolution_param().bias_term()) {
if(layer_param_.blobs_size() < 2)
layer_param_.add_blobs()->CopyFrom(param.blobs(1));
else
layer_param_.mutable_blobs(1)->CopyFrom(param.blobs(1));
}
initialized = false;
init();
return true;
}
示例5: GetConvolutionLayer
shared_ptr<Layer<Dtype> > GetConvolutionLayer(
const LayerParameter& param) {
ConvolutionParameter_Engine engine = param.convolution_param().engine();
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = ConvolutionParameter_Engine_CUDNN;
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
示例6: GetConvolutionLayer
shared_ptr<Layer<Dtype> > GetConvolutionLayer(const LayerParameter& param) {
ConvolutionParameter_Engine engine = param.convolution_param().engine();
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = ConvolutionParameter_Engine_CUDNN;
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE
|| Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) {
return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL)<< "Layer " << param.name() << " has unknown engine.";
}
}
示例7: GetConvolutionLayer
shared_ptr<Layer<Dtype> > GetConvolutionLayer(
const LayerParameter& param) {
ConvolutionParameter conv_param = param.convolution_param();
ConvolutionParameter_Engine engine = conv_param.engine();
#if defined(USE_CUDNN) || defined(USE_MKL2017_AS_DEFAULT_ENGINE)
bool use_dilation = false;
for (int i = 0; i < conv_param.dilation_size(); ++i) {
if (conv_param.dilation(i) > 1) {
use_dilation = true;
}
}
#endif
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
if (!use_dilation) {
engine = ConvolutionParameter_Engine_CUDNN;
}
#elif defined(USE_MKL2017_AS_DEFAULT_ENGINE)
if (!use_dilation) {
engine = ConvolutionParameter_Engine_MKL2017;
}
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
if (use_dilation) {
LOG(FATAL) << "CuDNN doesn't support the dilated convolution at Layer "
<< param.name();
}
return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
#ifdef MKL2017_SUPPORTED
} else if (engine == ConvolutionParameter_Engine_MKL2017) {
return shared_ptr<Layer<Dtype> >(new MKLConvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}