本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.ConvolutionLayer类的典型用法代码示例。如果您正苦于以下问题:Java ConvolutionLayer类的具体用法?Java ConvolutionLayer怎么用?Java ConvolutionLayer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ConvolutionLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了ConvolutionLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getConfiguration
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
.layer(0,
new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
.activation("relu").weightInit(WeightInit.RELU).build())
.layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
.weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例2: getConfiguration
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
.layer(0,
new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
.weightInit(WeightInit.XAVIER).activation("relu").build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例3: getOriginalNet
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public static MultiLayerNetwork getOriginalNet(int seed){
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.list()
.layer(new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build())
.layer(new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new DenseLayer.Builder().nOut(64).build())
.layer(new DenseLayer.Builder().nIn(64).nOut(64).build())
.layer(new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.convolutionalFlat(28,28,1))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例4: getOriginalGraph
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.graphBuilder()
.addInputs("in")
.layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
.layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
.layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
.layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
.layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
.layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
.setOutputs("5")
.setInputTypes(InputType.convolutionalFlat(28,28,1))
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
return net;
}
示例5: testMultiCNNLayer
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Test
public void testMultiCNNLayer() throws Exception {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list()
.layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
.activation(Activation.RELU).build())
.layer(1, new LocalResponseNormalization.Builder().build()).layer(2,
new DenseLayer.Builder()
.nOut(2).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(10)
.build())
.backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
DataSetIterator iter = new MnistDataSetIterator(2, 2);
DataSet next = iter.next();
network.fit(next);
}
示例6: getCNNMLNConfig
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
private static MultiLayerNetwork getCNNMLNConfig(boolean backprop, boolean pretrain) {
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder conf =
new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new ConvolutionLayer.Builder(new int[] {10, 10}).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX,
new int[] {2, 2}).stride(1, 1).build())
.layer(2, new OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(outputNum).weightInit(WeightInit.XAVIER)
.activation(Activation.SOFTMAX).build())
.setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(backprop)
.pretrain(pretrain);
MultiLayerNetwork model = new MultiLayerNetwork(conf.build());
model.init();
return model;
}
示例7: testMultiChannel
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Test
public void testMultiChannel() throws Exception {
INDArray in = Nd4j.rand(new int[] {10, 3, 28, 28});
INDArray labels = Nd4j.rand(10, 2);
DataSet next = new DataSet(in, labels);
NeuralNetConfiguration.ListBuilder builder = (NeuralNetConfiguration.ListBuilder) incompleteLFW();
builder.setInputType(InputType.convolutional(28, 28, 3));
MultiLayerConfiguration conf = builder.build();
ConvolutionLayer layer2 = (ConvolutionLayer) conf.getConf(2).getLayer();
assertEquals(6, layer2.getNIn());
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
network.fit(next);
}
示例8: testLRN
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Test
public void testLRN() throws Exception {
List<String> labels = new ArrayList<>(Arrays.asList("Zico", "Ziwang_Xu"));
String rootDir = new ClassPathResource("lfwtest").getFile().getAbsolutePath();
RecordReader reader = new ImageRecordReader(28, 28, 3);
reader.initialize(new FileSplit(new File(rootDir)));
DataSetIterator recordReader = new RecordReaderDataSetIterator(reader, 10, 1, labels.size());
labels.remove("lfwtest");
NeuralNetConfiguration.ListBuilder builder = (NeuralNetConfiguration.ListBuilder) incompleteLRN();
builder.setInputType(InputType.convolutional(28, 28, 3));
MultiLayerConfiguration conf = builder.build();
ConvolutionLayer layer2 = (ConvolutionLayer) conf.getConf(3).getLayer();
assertEquals(6, layer2.getNIn());
}
示例9: incompleteLRN
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration.Builder incompleteLRN() {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(2, new LocalResponseNormalization.Builder().build())
.layer(3, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2)
.build());
return builder;
}
示例10: incompleteLFW
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration.Builder incompleteLFW() {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2)
.build());
return builder;
}
示例11: incompleteMnistLenet
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration.Builder incompleteMnistLenet() {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(1).nOut(20).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}, new int[] {2, 2}).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(20).nOut(50).build())
.layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}, new int[] {2, 2}).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(500)
.build())
.layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX).nOut(10)
.build());
return builder;
}
示例12: mnistLenet
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration mnistLenet() {
MultiLayerConfiguration builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(1).nOut(6).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {5, 5}, new int[] {2, 2}).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(1).nOut(6).build())
.layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {5, 5}, new int[] {2, 2}).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(150)
.nOut(10).build())
.build();
return builder;
}
示例13: inComplete
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration.Builder inComplete() {
int nChannels = 1;
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10},
new int[] {2, 2}).nIn(nChannels).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.build())
.backprop(true).pretrain(false);
return builder;
}
示例14: complete
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
public MultiLayerConfiguration.Builder complete() {
final int numRows = 28;
final int numColumns = 28;
int nChannels = 1;
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10},
new int[] {2, 2}).nIn(nChannels).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nIn(5 * 5 * 1 * 6) //216
.nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.build())
.inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
.inputPreProcessor(2, new CnnToFeedForwardPreProcessor(5, 5, 6)).backprop(true).pretrain(false);
return builder;
}
示例15: testSubSamplingWithPadding
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; //导入依赖的package包/类
@Test
public void testSubSamplingWithPadding() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer(0, new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
.layer(2, new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(8, proc.getInputHeight());
assertEquals(8, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}