当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerConfiguration.Builder方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.MultiLayerConfiguration.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerConfiguration.Builder方法的具体用法?Java MultiLayerConfiguration.Builder怎么用?Java MultiLayerConfiguration.Builder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.conf.MultiLayerConfiguration的用法示例。


在下文中一共展示了MultiLayerConfiguration.Builder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getConfiguration

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.iterations(parameters.getIterations())
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
	.layer(0,
		new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
			.activation("relu").weightInit(WeightInit.RELU).build())
	.layer(1,
		new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
			.weightInit(WeightInit.XAVIER).activation("softmax").build())
	.backprop(true).pretrain(false);

new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());

return builder.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:20,代码来源:ConvolutionalNetModel.java

示例2: getConfiguration

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.iterations(parameters.getIterations())
	.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
	.layer(0,
		new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
			.weightInit(WeightInit.XAVIER).activation("relu").build())
	.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
	.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
		.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
	.backprop(true).pretrain(false);

new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());

return builder.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:21,代码来源:ConvolutionalNetModel.java

示例3: testDeconv2D

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testDeconv2D() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer( new Deconvolution2D.Builder(2, 2)
                    .padding(0, 0)
                    .stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
            .layer(2, new OutputLayer.Builder().nOut(3).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(8, proc.getInputHeight());
    assertEquals(8, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:ConvolutionLayerSetupTest.java

示例4: incompleteLFW

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
public MultiLayerConfiguration.Builder incompleteLFW() {
    MultiLayerConfiguration.Builder builder =
                    new NeuralNetConfiguration.Builder().seed(3)
                                    .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
                                    .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
                                                    new int[] {5, 5}).nOut(6).build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
                                                    new int[] {2, 2}).build())
                                    .layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
                                                    new int[] {5, 5}).nOut(6).build())
                                    .layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
                                                    new int[] {2, 2}).build())
                                    .layer(4, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                                    LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2)
                                                                    .build());
    return builder;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:18,代码来源:ConvolutionLayerSetupTest.java

示例5: complete

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
public MultiLayerConfiguration.Builder complete() {
    final int numRows = 28;
    final int numColumns = 28;
    int nChannels = 1;
    int outputNum = 10;
    int seed = 123;

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
                    .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10},
                                    new int[] {2, 2}).nIn(nChannels).nOut(6).build())
                    .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
                                    .build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                                    .nIn(5 * 5 * 1 * 6) //216
                                    .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                    .build())
                    .inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
                    .inputPreProcessor(2, new CnnToFeedForwardPreProcessor(5, 5, 6)).backprop(true).pretrain(false);

    return builder;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:ConvolutionLayerSetupTest.java

示例6: testSubSamplingWithPadding

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSubSamplingWithPadding() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
                    .layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
                    .layer(2, new OutputLayer.Builder().nOut(3).build())
                    .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(8, proc.getInputHeight());
    assertEquals(8, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:21,代码来源:ConvolutionLayerSetupTest.java

示例7: testUpsampling

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testUpsampling() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer(new Upsampling2D.Builder().size(3).build()) // 14 * 3 = 42!
            .layer(new OutputLayer.Builder().nOut(3).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(42, proc.getInputHeight());
    assertEquals(42, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(42 * 42 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:21,代码来源:ConvolutionLayerSetupTest.java

示例8: testSeparableConv2D

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSeparableConv2D() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer( new SeparableConvolution2D.Builder(2, 2)
                    .depthMultiplier(2)
                    .padding(0, 0)
                    .stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
            .layer(2, new OutputLayer.Builder().nOut(3).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(8, proc.getInputHeight());
    assertEquals(8, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:24,代码来源:ConvolutionLayerSetupTest.java

示例9: getConfiguration

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
public static MultiLayerConfiguration getConfiguration() {

        final int numRows = 28;
        final int numColumns = 28;
        int nChannels = 1;
        int outputNum = 10;
        int batchSize = 100;
        int iterations = 10;
        int seed = 123;

        MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .batchSize(batchSize)
                .iterations(iterations)
                .constrainGradientToUnitNorm(true)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .list(3)
                .layer(0, new ConvolutionLayer.Builder(10, 10)
                        .nIn(nChannels)
                        .nOut(6)
                        .weightInit(WeightInit.XAVIER)
                        .activation("relu")
                        .build())
                .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{2, 2})
                        .build())
                .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                        .nIn(150)
                        .nOut(outputNum)
                        .weightInit(WeightInit.XAVIER)
                        .activation("softmax")
                        .build())
                .backprop(true).pretrain(false);
        new ConvolutionLayerSetup(builder, numRows, numColumns, nChannels);

        MultiLayerConfiguration conf = builder.build();
        return conf;
    }
 
开发者ID:nitish11,项目名称:deeplearning4j-spark-ml-examples,代码行数:38,代码来源:JavaMnistClassification.java

示例10: testGradient2dSimple

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testGradient2dSimple() {
    DataNormalization scaler = new NormalizerMinMaxScaler();
    DataSetIterator iter = new IrisDataSetIterator(150, 150);
    scaler.fit(iter);
    iter.setPreProcessor(scaler);
    DataSet ds = iter.next();
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    MultiLayerConfiguration.Builder builder =
                    new NeuralNetConfiguration.Builder().updater(new NoOp())
                            .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 1)).list()
                                    .layer(0, new DenseLayer.Builder().nIn(4).nOut(3)
                                                    .activation(Activation.IDENTITY).build())
                                    .layer(1, new BatchNormalization.Builder().nOut(3).build())
                                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(3).nOut(3).build())
                                    .pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:36,代码来源:BNGradientCheckTest.java

示例11: testGradientCnnSimple

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testGradientCnnSimple() {
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 1;
    int hw = 4;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .updater(new NoOp()).seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(depth).nOut(2)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:BNGradientCheckTest.java

示例12: testLfwModel

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testLfwModel() throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int numChannels = 3;
    int outputNum = LFWLoader.NUM_LABELS;
    int numSamples = LFWLoader.NUM_IMAGES;
    int batchSize = 2;
    int seed = 123;
    int listenerFreq = 1;

    LFWDataSetIterator lfw = new LFWDataSetIterator(batchSize, numSamples,
                    new int[] {numRows, numColumns, numChannels}, outputNum, false, true, 1.0, new Random(seed));

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                    .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(numChannels).nOut(6)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.RELU).build())
                    .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
                                    .stride(1, 1).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                                    .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                    .build())
                    .setInputType(InputType.convolutionalFlat(numRows, numColumns, numChannels)).backprop(true)
                    .pretrain(false);

    MultiLayerNetwork model = new MultiLayerNetwork(builder.build());
    model.init();

    model.setListeners(new ScoreIterationListener(listenerFreq));

    model.fit(lfw.next());

    DataSet dataTest = lfw.next();
    INDArray output = model.output(dataTest.getFeatureMatrix());
    Evaluation eval = new Evaluation(outputNum);
    eval.eval(dataTest.getLabels(), output);
    System.out.println(eval.stats());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:41,代码来源:DataSetIteratorTest.java

示例13: testGradientCnnFixedGammaBeta

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testGradientCnnFixedGammaBeta() {
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 1;
    int hw = 4;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(depth).nOut(2)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().lockGammaBeta(true).gamma(2.0).beta(0.5).build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:BNGradientCheckTest.java

示例14: testCNNActivationsFrozen

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCNNActivationsFrozen() throws Exception {

    int nChannels = 1;
    int outputNum = 10;
    int batchSize = 64;
    int nEpochs = 10;
    int seed = 123;

    log.info("Load data....");
    DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345);

    log.info("Build model....");
    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
            .l2(0.0005)
            .weightInit(WeightInit.XAVIER)
            .updater(new Nesterovs(0.01, 0.9)).list()
            .layer(0, new FrozenLayer(new ConvolutionLayer.Builder(5, 5)
                    //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
                    .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()))
            .layer(1, new FrozenLayer(new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2)
                    .stride(2, 2).build()))
            .layer(2, new FrozenLayer(new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()))
            .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                    .nOut(outputNum).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutionalFlat(28, 28, nChannels));

    MultiLayerConfiguration conf = builder.build();
    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    log.info("Train model....");
    model.setListeners(new ConvolutionalIterationListener(1));

    for (int i = 0; i < nEpochs; i++) {
        model.fit(mnistTrain);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:ManualTests.java

示例15: testTwdFirstLayer

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testTwdFirstLayer() throws Exception {
    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).l2(2e-4)
                    .updater(new Nesterovs(0.9)).dropOut(0.5)
                    .list().layer(0,
                                    new ConvolutionLayer.Builder(8, 8) //16 filters kernel size 8 stride 4
                                                    .stride(4, 4).nOut(16).dropOut(0.5)
                                                    .activation(Activation.RELU).weightInit(
                                                                    WeightInit.XAVIER)
                                                    .build())
                    .layer(1, new ConvolutionLayer.Builder(4, 4) //32 filters kernel size 4 stride 2
                                    .stride(2, 2).nOut(32).dropOut(0.5).activation(Activation.RELU)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(2, new DenseLayer.Builder() //fully connected with 256 rectified units
                                    .nOut(256).activation(Activation.RELU).weightInit(WeightInit.XAVIER)
                                    .dropOut(0.5).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS) //output layer
                                    .nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(true).pretrain(false);

    DataSetIterator iter = new MnistDataSetIterator(10, 10);
    MultiLayerConfiguration conf = builder.build();
    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    DataSet ds = iter.next();
    for( int i=0; i<5; i++ ) {
        network.fit(ds);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:31,代码来源:ConvolutionLayerTest.java


注:本文中的org.deeplearning4j.nn.conf.MultiLayerConfiguration.Builder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。