本文整理汇总了Java中org.deeplearning4j.nn.conf.ConvolutionMode类的典型用法代码示例。如果您正苦于以下问题:Java ConvolutionMode类的具体用法?Java ConvolutionMode怎么用?Java ConvolutionMode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ConvolutionMode类属于org.deeplearning4j.nn.conf包,在下文中一共展示了ConvolutionMode类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ConvolutionLayer
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/** Constructor for setting some defaults. */
public ConvolutionLayer() {
setLayerName("Convolution layer");
setActivationFunction(new ActivationIdentity());
setLearningRate(Double.NaN);
setBiasLearningRate(Double.NaN);
setMomentum(Double.NaN);
setBiasInit(Double.NaN);
setAdamMeanDecay(Double.NaN);
setAdamVarDecay(Double.NaN);
setEpsilon(Double.NaN);
setRmsDecay(Double.NaN);
setL1(Double.NaN);
setL2(Double.NaN);
setRho(Double.NaN);
setGradientNormalization(null);
setGradientNormalizationThreshold(Double.NaN);
setConvolutionMode(ConvolutionMode.Truncate);
setKernelSize(new int[] {5, 5});
setStride(new int[] {1, 1});
setPadding(new int[] {0, 0});
this.cudnnAlgoMode = ConvolutionLayer.AlgoMode.PREFER_FASTEST;
}
示例2: testTextCnnTextSingleConv
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
@Test
public void testTextCnnTextSingleConv() throws Exception {
CnnTextEmbeddingInstanceIterator cnnTextIter = new CnnTextEmbeddingInstanceIterator();
cnnTextIter.setTrainBatchSize(64);
cnnTextIter.setWordVectorLocation(DatasetLoader.loadGoogleNewsVectors());
clf.setInstanceIterator(cnnTextIter);
// final WordVectors wordVectors = cnnTextIter.getWordVectors();
// int vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;
int vectorSize = 300;
ConvolutionLayer conv1 = new ConvolutionLayer();
conv1.setKernelSize(new int[] {4, vectorSize});
conv1.setNOut(10);
conv1.setStride(new int[] {1, vectorSize});
conv1.setConvolutionMode(ConvolutionMode.Same);
conv1.setDropOut(0.2);
conv1.setActivationFn(new ActivationReLU());
GlobalPoolingLayer gpl = new GlobalPoolingLayer();
OutputLayer out = new OutputLayer();
clf.setLayers(conv1, gpl, out);
clf.setCacheMode(CacheMode.MEMORY);
final Instances data = DatasetLoader.loadAnger();
TestUtil.holdout(clf, data);
}
示例3: getConvolutionModeFromConfig
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/**
* Get convolution border mode from Keras layer configuration.
*
* @param layerConfig dictionary containing Keras layer configuration
* @return
* @throws InvalidKerasConfigurationException
*/
public static ConvolutionMode getConvolutionModeFromConfig(Map<String, Object> layerConfig,
KerasLayerConfiguration conf)
throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
Map<String, Object> innerConfig = KerasLayerUtils.getInnerLayerConfigFromConfig(layerConfig, conf);
if (!innerConfig.containsKey(conf.getLAYER_FIELD_BORDER_MODE()))
throw new InvalidKerasConfigurationException("Could not determine convolution border mode: no "
+ conf.getLAYER_FIELD_BORDER_MODE() + " field found");
String borderMode = (String) innerConfig.get(conf.getLAYER_FIELD_BORDER_MODE());
ConvolutionMode convolutionMode = null;
if (borderMode.equals(conf.getLAYER_BORDER_MODE_SAME())) {
/* Keras relies upon the Theano and TensorFlow border mode definitions and operations:
* TH: http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d
* TF: https://www.tensorflow.org/api_docs/python/nn/convolution#conv2d
*/
convolutionMode = ConvolutionMode.Same;
} else if (borderMode.equals(conf.getLAYER_BORDER_MODE_VALID()) ||
borderMode.equals(conf.getLAYER_BORDER_MODE_FULL())) {
convolutionMode = ConvolutionMode.Truncate;
} else {
throw new UnsupportedKerasConfigurationException("Unsupported convolution border mode: " + borderMode);
}
return convolutionMode;
}
示例4: buildPooling2DLayer
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
void buildPooling2DLayer(KerasLayerConfiguration conf, Integer kerasVersion) throws Exception {
Map<String, Object> layerConfig = new HashMap<String, Object>();
layerConfig.put(conf.getLAYER_FIELD_CLASS_NAME(), conf.getLAYER_CLASS_NAME_MAX_POOLING_2D());
Map<String, Object> config = new HashMap<>();
config.put(conf.getLAYER_FIELD_NAME(), LAYER_NAME);
List<Integer> kernelSizeList = new ArrayList<>();
kernelSizeList.add(KERNEL_SIZE[0]);
kernelSizeList.add(KERNEL_SIZE[1]);
config.put(conf.getLAYER_FIELD_POOL_SIZE(), kernelSizeList);
List<Integer> subsampleList = new ArrayList<>();
subsampleList.add(STRIDE[0]);
subsampleList.add(STRIDE[1]);
config.put(conf.getLAYER_FIELD_POOL_STRIDES(), subsampleList);
config.put(conf.getLAYER_FIELD_BORDER_MODE(), BORDER_MODE_VALID);
layerConfig.put(conf.getLAYER_FIELD_CONFIG(), config);
layerConfig.put(conf.getLAYER_FIELD_KERAS_VERSION(), kerasVersion);
SubsamplingLayer layer = new KerasPooling2D(layerConfig).getSubsampling2DLayer();
assertEquals(LAYER_NAME, layer.getLayerName());
assertArrayEquals(KERNEL_SIZE, layer.getKernelSize());
assertArrayEquals(STRIDE, layer.getStride());
assertEquals(POOLING_TYPE, layer.getPoolingType());
assertEquals(ConvolutionMode.Truncate, layer.getConvolutionMode());
assertArrayEquals(VALID_PADDING, layer.getPadding());
}
示例5: getOriginalNet
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
public static MultiLayerNetwork getOriginalNet(int seed){
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.list()
.layer(new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build())
.layer(new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new DenseLayer.Builder().nOut(64).build())
.layer(new DenseLayer.Builder().nIn(64).nOut(64).build())
.layer(new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.convolutionalFlat(28,28,1))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例6: getOriginalGraph
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.graphBuilder()
.addInputs("in")
.layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
.layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
.layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
.layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
.layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
.layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
.setOutputs("5")
.setInputTypes(InputType.convolutionalFlat(28,28,1))
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
return net;
}
示例7: SubsamplingLayer
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/** Constructor for setting some defaults. */
public SubsamplingLayer() {
setLayerName("Subsampling layer");
setConvolutionMode(ConvolutionMode.Truncate);
setKernelSize(new int[] {1, 1});
setStride(new int[] {2, 2});
setPadding(new int[] {0, 0});
setPoolingType(org.deeplearning4j.nn.conf.layers.PoolingType.MAX);
setEps(1e-8);
setPnorm(1);
}
示例8: mode
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
@OptionMetadata(
displayName = "convolution mode",
description = "The convolution mode (default = Truncate).",
commandLineParamName = "mode",
commandLineParamSynopsis = "-mode <string>",
displayOrder = 1
)
public ConvolutionMode getConvolutionMode() {
return this.convolutionMode;
}
示例9: mode
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
@OptionMetadata(
displayName = "convolution mode",
description = "The convolution mode (default = Truncate).",
commandLineParamName = "mode",
commandLineParamSynopsis = "-mode <string>",
displayOrder = 2
)
public ConvolutionMode getConvolutionMode() {
return this.convolutionMode;
}
示例10: validateLayers
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/**
* Validate whether the layers comply with the currently chosen instance iterator
*
* @param layers New set of layers
*/
protected void validateLayers(Layer[] layers) throws InvalidNetworkArchitectureException {
// Check if the layers contain convolution/subsampling
Set<Layer> layerSet = new HashSet<>(Arrays.asList(layers));
final boolean containsConvLayer = layerSet.stream().allMatch(this::isNDLayer);
final boolean isConvItertor = getInstanceIterator() instanceof Convolutional;
if (containsConvLayer && !isConvItertor) {
throw new InvalidNetworkArchitectureException(
"A convolution/subsampling layer was set using "
+ "the wrong instance iterator. Please select either "
+ "ImageInstanceIterator for image files or "
+ "ConvolutionInstanceIterator for ARFF files.");
}
// Check if conv layers have ConvolutionMode.Same for CnnTextEmbeddingInstanceIterator
if (getInstanceIterator() instanceof CnnTextEmbeddingInstanceIterator){
for (Layer l : layerSet){
if (l instanceof ConvolutionLayer) {
final ConvolutionLayer conv = (ConvolutionLayer) l;
boolean correctMode = conv.getConvolutionMode().equals(ConvolutionMode.Same);
if (!correctMode){
throw new RuntimeException(
"CnnText iterators require ConvolutionMode.Same for all ConvolutionLayer. Layer "
+ conv.getLayerName() + " has ConvolutionMode: " + conv.getConvolutionMode()
);
}
}
}
// Check that layers start with convolution
if (layers.length > 0 && !(layers[0] instanceof ConvolutionLayer)){
throw new InvalidNetworkArchitectureException("CnnText iterator requires ConvolutionLayer.");
}
}
}
示例11: testTextCnnTextFilesRegression
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
@Test
public void testTextCnnTextFilesRegression() throws Exception {
CnnTextFilesEmbeddingInstanceIterator cnnTextIter = new CnnTextFilesEmbeddingInstanceIterator();
cnnTextIter.setTrainBatchSize(64);
cnnTextIter.setWordVectorLocation(DatasetLoader.loadGoogleNewsVectors());
cnnTextIter.setTextsLocation(DatasetLoader.loadAngerFilesDir());
clf.setInstanceIterator(cnnTextIter);
cnnTextIter.initialize();
final WordVectors wordVectors = cnnTextIter.getWordVectors();
int vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;
ConvolutionLayer conv1 = new ConvolutionLayer();
conv1.setKernelSize(new int[] {3, vectorSize});
conv1.setNOut(10);
conv1.setStride(new int[] {1, vectorSize});
conv1.setConvolutionMode(ConvolutionMode.Same);
ConvolutionLayer conv2 = new ConvolutionLayer();
conv2.setKernelSize(new int[] {2, vectorSize});
conv2.setNOut(10);
conv2.setStride(new int[] {1, vectorSize});
conv2.setConvolutionMode(ConvolutionMode.Same);
GlobalPoolingLayer gpl = new GlobalPoolingLayer();
OutputLayer out = new OutputLayer();
out.setLossFn(new LossMSE());
out.setActivationFn(new ActivationIdentity());
clf.setLayers(conv1, conv2, gpl, out);
clf.setCacheMode(CacheMode.MEMORY);
final Instances data = DatasetLoader.loadAngerMeta();
TestUtil.holdout(clf, data);
}
示例12: testTextCnnTextFilesClassification
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
@Test
public void testTextCnnTextFilesClassification() throws Exception {
CnnTextFilesEmbeddingInstanceIterator cnnTextIter = new CnnTextFilesEmbeddingInstanceIterator();
cnnTextIter.setTrainBatchSize(64);
cnnTextIter.setWordVectorLocation(DatasetLoader.loadGoogleNewsVectors());
cnnTextIter.setTextsLocation(DatasetLoader.loadAngerFilesDir());
clf.setInstanceIterator(cnnTextIter);
cnnTextIter.initialize();
final WordVectors wordVectors = cnnTextIter.getWordVectors();
int vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;
ConvolutionLayer conv1 = new ConvolutionLayer();
conv1.setKernelSize(new int[] {4, vectorSize});
conv1.setNOut(10);
conv1.setStride(new int[] {1, vectorSize});
conv1.setConvolutionMode(ConvolutionMode.Same);
conv1.setDropOut(0.2);
conv1.setActivationFn(new ActivationReLU());
ConvolutionLayer conv2 = new ConvolutionLayer();
conv2.setKernelSize(new int[] {3, vectorSize});
conv2.setNOut(10);
conv2.setStride(new int[] {1, vectorSize});
conv2.setConvolutionMode(ConvolutionMode.Same);
conv2.setDropOut(0.2);
conv2.setActivationFn(new ActivationReLU());
GlobalPoolingLayer gpl = new GlobalPoolingLayer();
gpl.setDropOut(0.33);
OutputLayer out = new OutputLayer();
clf.setLayers(conv1, conv2, gpl, out);
clf.setCacheMode(CacheMode.MEMORY);
final Instances data = DatasetLoader.loadAngerMetaClassification();
TestUtil.holdout(clf, data);
}
示例13: getOutputSize
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/**
* Get the output size (height/width) for the given input data and CNN configuration
*
* @param inputData Input data
* @param kernel Kernel size (height/width)
* @param strides Strides (height/width)
* @param padding Padding (height/width)
* @param convolutionMode Convolution mode (Same, Strict, Truncate)
* @param dilation Kernel dilation (height/width)
* @return Output size: int[2] with output height/width
*/
public static int[] getOutputSize(INDArray inputData, int[] kernel, int[] strides, int[] padding,
ConvolutionMode convolutionMode, int[] dilation) {
int inH = inputData.size(2);
int inW = inputData.size(3);
//Determine the effective kernel size, accounting for dilation
//http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
int[] eKernel = effectiveKernelSize(kernel, dilation);
boolean atrous = (eKernel == kernel);
validateShapes(inputData, eKernel, strides, padding, convolutionMode, dilation, inH, inW, atrous);
if (convolutionMode == ConvolutionMode.Same) {
//'Same' padding mode:
//outH = ceil(inHeight / strideH) decimal division
//outW = ceil(inWidth / strideW) decimal division
//padHeightSum = ((outH - 1) * strideH + kH - inHeight)
//padTop = padHeightSum / 2 integer division
//padBottom = padHeghtSum - padTop
//padWidthSum = ((outW - 1) * strideW + kW - inWidth)
//padLeft = padWidthSum / 2 integer division
//padRight = padWidthSum - padLeft
int outH = (int) Math.ceil(inH / ((double) strides[0]));
int outW = (int) Math.ceil(inW / ((double) strides[1]));
return new int[] {outH, outW};
}
int hOut = (inH - eKernel[0] + 2 * padding[0]) / strides[0] + 1;
int wOut = (inW - eKernel[1] + 2 * padding[1]) / strides[1] + 1;
return new int[] {hOut, wOut};
}
示例14: validateConvolutionModePadding
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
/**
* Check that the convolution mode is consistent with the padding specification
*
*/
public static void validateConvolutionModePadding(ConvolutionMode mode, int[] padding) {
if (mode == ConvolutionMode.Same) {
boolean nullPadding = true;
for (int i : padding){
if (i != 0) nullPadding = false;
}
if (!nullPadding)
throw new IllegalArgumentException("Padding cannot be used when using the `same' convolution mode");
}
}
示例15: getNet1
import org.deeplearning4j.nn.conf.ConvolutionMode; //导入依赖的package包/类
private MultiLayerNetwork getNet1(boolean train) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.convolutionMode(ConvolutionMode.Same)
.activation(Activation.TANH)
.weightInit(WeightInit.XAVIER)
.updater(new Sgd(0.1))
.list()
.layer(new ConvolutionLayer.Builder().nIn(3).nOut(5).kernelSize(2, 2).stride(1, 1).build())
.layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).build())
.layer(new DenseLayer.Builder().nOut(32).build())
.layer(new OutputLayer.Builder().nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.convolutional(10, 10, 3))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
if(train) {
for (int i = 0; i < 3; i++) {
INDArray f = Nd4j.rand(new int[]{8, 3, 10, 10});
INDArray l = Nd4j.rand(8, 10);
net.fit(f, l);
}
}
return net;
}