本文整理汇总了Java中org.deeplearning4j.nn.conf.ConvolutionMode.Same方法的典型用法代码示例。如果您正苦于以下问题:Java ConvolutionMode.Same方法的具体用法?Java ConvolutionMode.Same怎么用?Java ConvolutionMode.Same使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.conf.ConvolutionMode
的用法示例。
在下文中一共展示了ConvolutionMode.Same方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getConvolutionModeFromConfig
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
/**
* Get convolution border mode from Keras layer configuration.
*
* @param layerConfig dictionary containing Keras layer configuration
* @return
* @throws InvalidKerasConfigurationException
*/
public static ConvolutionMode getConvolutionModeFromConfig(Map<String, Object> layerConfig,
KerasLayerConfiguration conf)
throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
Map<String, Object> innerConfig = KerasLayerUtils.getInnerLayerConfigFromConfig(layerConfig, conf);
if (!innerConfig.containsKey(conf.getLAYER_FIELD_BORDER_MODE()))
throw new InvalidKerasConfigurationException("Could not determine convolution border mode: no "
+ conf.getLAYER_FIELD_BORDER_MODE() + " field found");
String borderMode = (String) innerConfig.get(conf.getLAYER_FIELD_BORDER_MODE());
ConvolutionMode convolutionMode = null;
if (borderMode.equals(conf.getLAYER_BORDER_MODE_SAME())) {
/* Keras relies upon the Theano and TensorFlow border mode definitions and operations:
* TH: http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d
* TF: https://www.tensorflow.org/api_docs/python/nn/convolution#conv2d
*/
convolutionMode = ConvolutionMode.Same;
} else if (borderMode.equals(conf.getLAYER_BORDER_MODE_VALID()) ||
borderMode.equals(conf.getLAYER_BORDER_MODE_FULL())) {
convolutionMode = ConvolutionMode.Truncate;
} else {
throw new UnsupportedKerasConfigurationException("Unsupported convolution border mode: " + borderMode);
}
return convolutionMode;
}
示例2: getOutputSize
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
/**
* Get the output size (height/width) for the given input data and CNN configuration
*
* @param inputData Input data
* @param kernel Kernel size (height/width)
* @param strides Strides (height/width)
* @param padding Padding (height/width)
* @param convolutionMode Convolution mode (Same, Strict, Truncate)
* @param dilation Kernel dilation (height/width)
* @return Output size: int[2] with output height/width
*/
public static int[] getOutputSize(INDArray inputData, int[] kernel, int[] strides, int[] padding,
ConvolutionMode convolutionMode, int[] dilation) {
int inH = inputData.size(2);
int inW = inputData.size(3);
//Determine the effective kernel size, accounting for dilation
//http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
int[] eKernel = effectiveKernelSize(kernel, dilation);
boolean atrous = (eKernel == kernel);
validateShapes(inputData, eKernel, strides, padding, convolutionMode, dilation, inH, inW, atrous);
if (convolutionMode == ConvolutionMode.Same) {
//'Same' padding mode:
//outH = ceil(inHeight / strideH) decimal division
//outW = ceil(inWidth / strideW) decimal division
//padHeightSum = ((outH - 1) * strideH + kH - inHeight)
//padTop = padHeightSum / 2 integer division
//padBottom = padHeghtSum - padTop
//padWidthSum = ((outW - 1) * strideW + kW - inWidth)
//padLeft = padWidthSum / 2 integer division
//padRight = padWidthSum - padLeft
int outH = (int) Math.ceil(inH / ((double) strides[0]));
int outW = (int) Math.ceil(inW / ((double) strides[1]));
return new int[] {outH, outW};
}
int hOut = (inH - eKernel[0] + 2 * padding[0]) / strides[0] + 1;
int wOut = (inW - eKernel[1] + 2 * padding[1]) / strides[1] + 1;
return new int[] {hOut, wOut};
}
示例3: validateConvolutionModePadding
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
/**
* Check that the convolution mode is consistent with the padding specification
*
*/
public static void validateConvolutionModePadding(ConvolutionMode mode, int[] padding) {
if (mode == ConvolutionMode.Same) {
boolean nullPadding = true;
for (int i : padding){
if (i != 0) nullPadding = false;
}
if (!nullPadding)
throw new IllegalArgumentException("Padding cannot be used when using the `same' convolution mode");
}
}
示例4: activate
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
@Override
public INDArray activate(boolean training) {
if (training && !dropoutApplied && layerConf().getIDropout() != null) {
applyDropOutIfNecessary(true);
}
//Input validation: expect rank 4 matrix
if (input.rank() != 4) {
throw new DL4JInvalidInputException("Got rank " + input.rank()
+ " array as input to SubsamplingLayer with shape " + Arrays.toString(input.shape())
+ ". Expected rank 4 array with shape [minibatchSize, depth, inputHeight, inputWidth]. "
+ layerId());
}
int miniBatch = input.size(0);
int inDepth = input.size(1);
int inH = input.size(2);
int inW = input.size(3);
int[] kernel = layerConf().getKernelSize();
int[] strides = layerConf().getStride();
int[] dilation = layerConf().getDilation();
int[] pad;
int[] outSize;
if (convolutionMode == ConvolutionMode.Same) {
outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation); //Also performs validation
pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {inH, inW}, kernel, strides, dilation);
} else {
pad = layerConf().getPadding();
outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation); //Also performs validation
}
int outH = outSize[0];
int outW = outSize[1];
if (helper != null) {
INDArray ret = helper.activate(input, training, kernel, strides, pad, layerConf().getPoolingType(),
convolutionMode, dilation);
if (ret != null) {
return ret;
}
}
//Similar to convolution layer forward pass: do im2col, but permute so that pooling can be done with efficient strides...
//Current im2col implementation expects input with shape [miniBatch,depth,kH,kW,outH,outW]
INDArray output = Nd4j.create(miniBatch, inDepth, outH, outW);
LegacyPooling2D.Pooling2DType pt;
double extra = 0.0;
switch (layerConf().getPoolingType()){
case MAX:
pt = LegacyPooling2D.Pooling2DType.MAX;
break;
case AVG:
pt = LegacyPooling2D.Pooling2DType.AVG;
break;
case PNORM:
pt = LegacyPooling2D.Pooling2DType.PNORM;
extra = layerConf().getPnorm();
break;
default:
throw new UnsupportedOperationException("Not supported: " + layerConf().getPoolingType());
}
Op op = new LegacyPooling2D(input, kernel[0], kernel[1], strides[0], strides[1], pad[0], pad[1], dilation[0], dilation[1],
convolutionMode == ConvolutionMode.Same, pt, extra, output);
Nd4j.getExecutioner().exec(op);
return output;
}
示例5: testGlobalLocalConfigCompGraph
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
@Test
public void testGlobalLocalConfigCompGraph() {
for (ConvolutionMode cm : new ConvolutionMode[] {ConvolutionMode.Strict, ConvolutionMode.Truncate,
ConvolutionMode.Same}) {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(cm).graphBuilder().addInputs("in")
.addLayer("0", new ConvolutionLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0)
.nIn(3).nOut(
3)
.build(), "in")
.addLayer("1", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Strict)
.kernelSize(3, 3).stride(3, 3).padding(0, 0)
.nIn(3).nOut(
3)
.build(), "0")
.addLayer("2", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Truncate)
.kernelSize(3, 3).stride(3, 3).padding(0, 0)
.nIn(3).nOut(
3)
.build(), "1")
.addLayer("3", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Same)
.kernelSize(3, 3).stride(3, 3).padding(0, 0).nIn(3).nOut(3).build(), "2")
.addLayer("4", new SubsamplingLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0)
.build(), "3")
.addLayer("5", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Strict)
.kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "4")
.addLayer("6", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Truncate)
.kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "5")
.addLayer("7", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Same)
.kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "6")
.addLayer("8", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
.nOut(3).build(), "7")
.setOutputs("8").build();
assertEquals(cm, ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Strict,
((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Truncate,
((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Same,
((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("3")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(cm, ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("4")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Strict,
((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("5")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Truncate,
((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("6")).getLayerConf().getLayer())
.getConvolutionMode());
assertEquals(ConvolutionMode.Same,
((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("7")).getLayerConf().getLayer())
.getConvolutionMode());
}
}
示例6: activate
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入方法依赖的package包/类
@Override
public INDArray activate(INDArray input, boolean training, int[] kernel, int[] strides, int[] pad,
PoolingType poolingType, ConvolutionMode convolutionMode, int[] dilation) {
if(dilation[0] != 1 || dilation[1] != 1){
//CuDNN doesn't support dilated subsampling
return null;
}
int miniBatch = input.size(0);
int inDepth = input.size(1);
int inH = input.size(2);
int inW = input.size(3);
int[] outSize;
if (convolutionMode == ConvolutionMode.Same) {
outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation); //Also performs validation
pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {input.size(2), input.size(3)}, kernel,
strides, dilation);
} else {
outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation); //Also performs validation
}
int outH = outSize[0];
int outW = outSize[1];
int poolingMode;
switch (poolingType) {
case AVG:
poolingMode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
break;
case MAX:
poolingMode = CUDNN_POOLING_MAX;
break;
default:
return null;
}
if (Nd4j.getExecutioner() instanceof GridExecutioner)
((GridExecutioner) Nd4j.getExecutioner()).flushQueue();
int[] srcStride = input.stride();
checkCudnn(cudnnSetPooling2dDescriptor(cudnnContext.poolingDesc, poolingMode, CUDNN_PROPAGATE_NAN, kernel[0],
kernel[1], pad[0], pad[1], strides[0], strides[1]));
checkCudnn(cudnnSetTensor4dDescriptorEx(cudnnContext.srcTensorDesc, dataType, miniBatch, inDepth, inH, inW,
srcStride[0], srcStride[1], srcStride[2], srcStride[3]));
if (Nd4j.getWorkspaceManager().checkIfWorkspaceExistsAndActive(ComputationGraph.workspaceExternal)) {
try (MemoryWorkspace workspace = Nd4j.getWorkspaceManager()
.getWorkspaceForCurrentThread(ComputationGraph.workspaceExternal).notifyScopeBorrowed()) {
reduced = Nd4j.createUninitialized(new int[] {miniBatch, inDepth, outH, outW}, 'c');
}
} else
reduced = Nd4j.createUninitialized(new int[] {miniBatch, inDepth, outH, outW}, 'c');
int[] dstStride = reduced.stride();
checkCudnn(cudnnSetTensor4dDescriptorEx(cudnnContext.dstTensorDesc, dataType, miniBatch, inDepth, outH, outW,
dstStride[0], dstStride[1], dstStride[2], dstStride[3]));
Allocator allocator = AtomicAllocator.getInstance();
CudaContext context = allocator.getFlowController().prepareAction(input, reduced);
Pointer srcData = allocator.getPointer(input, context);
Pointer dstData = allocator.getPointer(reduced, context);
checkCudnn(cudnnSetStream(cudnnContext, new CUstream_st(context.getOldStream())));
checkCudnn(cudnnPoolingForward(cudnnContext, cudnnContext.poolingDesc, alpha, cudnnContext.srcTensorDesc,
srcData, beta, cudnnContext.dstTensorDesc, dstData));
allocator.registerAction(context, reduced, input);
if (CudaEnvironment.getInstance().getConfiguration().isDebug())
context.syncOldStream();
return reduced;
}