当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerNetwork.feedForward方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.feedForward方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.feedForward方法的具体用法?Java MultiLayerNetwork.feedForward怎么用?Java MultiLayerNetwork.feedForward使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.multilayer.MultiLayerNetwork的用法示例。


在下文中一共展示了MultiLayerNetwork.feedForward方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testForwardPass

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testForwardPass() {

    int[][] encLayerSizes = new int[][] {{12}, {12, 13}, {12, 13, 14}};
    for (int i = 0; i < encLayerSizes.length; i++) {

        MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list().layer(0,
                        new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder().nIn(10)
                                        .nOut(5).encoderLayerSizes(encLayerSizes[i]).decoderLayerSizes(13).build())
                        .build();

        NeuralNetConfiguration c = mlc.getConf(0);
        org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae =
                        (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer();

        MultiLayerNetwork net = new MultiLayerNetwork(mlc);
        net.init();

        INDArray in = Nd4j.rand(1, 10);

        //        net.output(in);
        List<INDArray> out = net.feedForward(in);
        assertArrayEquals(new int[] {1, 10}, out.get(0).shape());
        assertArrayEquals(new int[] {1, 5}, out.get(1).shape());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:TestVAE.java

示例2: testWithPreprocessorsMLN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testWithPreprocessorsMLN(){
    for(WorkspaceMode wm : WorkspaceMode.values()) {
        System.out.println(wm);
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .trainingWorkspaceMode(wm)
                .inferenceWorkspaceMode(wm)
                .list()
                .layer(new GravesLSTM.Builder().nIn(10).nOut(5).build())
                .layer(new GravesLSTM.Builder().nIn(5).nOut(8).build())
                .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nOut(3).build())
                .inputPreProcessor(0, new DupPreProcessor())
                .setInputType(InputType.recurrent(10))
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();


        INDArray input = Nd4j.zeros(1, 10, 5);

        for( boolean train : new boolean[]{false, true}){
            net.clear();
            net.feedForward(input, train);
        }

        net.setInput(input);
        net.setLabels(Nd4j.rand(1, 3, 5));
        net.computeGradientAndScore();
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:32,代码来源:WorkspaceTests.java

示例3: testSameModeActivationSizes

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testSameModeActivationSizes() {
    int inH = 3;
    int inW = 4;
    int inDepth = 3;
    int minibatch = 5;

    int sH = 2;
    int sW = 2;
    int kH = 3;
    int kW = 3;

    Layer[] l = new Layer[2];
    l[0] = new ConvolutionLayer.Builder().nOut(4).kernelSize(kH, kW).stride(sH, sW).build();
    l[1] = new SubsamplingLayer.Builder().kernelSize(kH, kW).stride(sH, sW).build();

    for (int i = 0; i < l.length; i++) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Same)
                        .list().layer(0, l[i]).layer(1, new OutputLayer.Builder().nOut(3).build())
                        .setInputType(InputType.convolutional(inH, inW, inDepth)).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inData = Nd4j.create(minibatch, inDepth, inH, inW);
        List<INDArray> activations = net.feedForward(inData);
        INDArray actL0 = activations.get(1);

        int outH = (int) Math.ceil(inH / ((double) sH));
        int outW = (int) Math.ceil(inW / ((double) sW));

        System.out.println(Arrays.toString(actL0.shape()));
        assertArrayEquals(new int[] {minibatch, (i == 0 ? 4 : inDepth), outH, outW}, actL0.shape());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:37,代码来源:TestConvolutionModes.java

示例4: testForwardBasicIris

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testForwardBasicIris() {

    ComputationGraphConfiguration configuration = getIrisGraphConfiguration();
    ComputationGraph graph = new ComputationGraph(configuration);
    graph.init();

    MultiLayerConfiguration mlc = getIrisMLNConfiguration();
    MultiLayerNetwork net = new MultiLayerNetwork(mlc);
    net.init();

    DataSetIterator iris = new IrisDataSetIterator(150, 150);
    DataSet ds = iris.next();

    graph.setInput(0, ds.getFeatureMatrix());
    Map<String, INDArray> activations = graph.feedForward(false);
    assertEquals(3, activations.size()); //2 layers + 1 input node
    assertTrue(activations.containsKey("input"));
    assertTrue(activations.containsKey("firstLayer"));
    assertTrue(activations.containsKey("outputLayer"));

    //Now: set parameters of both networks to be identical. Then feedforward, and check we get the same outputs
    Nd4j.getRandom().setSeed(12345);
    int nParams = getNumParams();
    INDArray params = Nd4j.rand(1, nParams);
    graph.setParams(params.dup());
    net.setParams(params.dup());

    List<INDArray> mlnAct = net.feedForward(ds.getFeatureMatrix(), false);
    activations = graph.feedForward(ds.getFeatureMatrix(), false);

    assertEquals(mlnAct.get(0), activations.get("input"));
    assertEquals(mlnAct.get(1), activations.get("firstLayer"));
    assertEquals(mlnAct.get(2), activations.get("outputLayer"));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:36,代码来源:TestComputationGraphNetwork.java

示例5: testDropoutSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropoutSimple() throws Exception {
    //Testing dropout with a single layer
    //Layer input: values should be set to either 0.0 or 2.0x original value

    int nIn = 8;
    int nOut = 8;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .updater(new Sgd())
                    .dropOut(0.5).list()
                    .layer(0, new OutputLayer.Builder().activation(Activation.IDENTITY)
                                    .lossFunction(LossFunctions.LossFunction.MSE).nIn(nIn).nOut(nOut)
                                    .weightInit(WeightInit.XAVIER).build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    net.getLayer(0).getParam("W").assign(Nd4j.eye(nIn));

    int nTests = 15;

    Nd4j.getRandom().setSeed(12345);
    int noDropoutCount = 0;
    for (int i = 0; i < nTests; i++) {
        INDArray in = Nd4j.rand(1, nIn);
        INDArray out = Nd4j.rand(1, nOut);
        INDArray inCopy = in.dup();

        List<INDArray> l = net.feedForward(in, true);

        INDArray postDropout = l.get(l.size() - 1);
        //Dropout occurred. Expect inputs to be either scaled 2x original, or set to 0.0 (with dropout = 0.5)
        for (int j = 0; j < inCopy.length(); j++) {
            double origValue = inCopy.getDouble(j);
            double doValue = postDropout.getDouble(j);
            if (doValue > 0.0) {
                //Input was kept -> should be scaled by factor of (1.0/0.5 = 2)
                assertEquals(origValue * 2.0, doValue, 0.0001);
            }
        }

        //Do forward pass
        //(1) ensure dropout ISN'T being applied for forward pass at test time
        //(2) ensure dropout ISN'T being applied for test time scoring
        //If dropout is applied at test time: outputs + score will differ between passes
        INDArray in2 = Nd4j.rand(1, nIn);
        INDArray out2 = Nd4j.rand(1, nOut);
        INDArray outTest1 = net.output(in2, false);
        INDArray outTest2 = net.output(in2, false);
        INDArray outTest3 = net.output(in2, false);
        assertEquals(outTest1, outTest2);
        assertEquals(outTest1, outTest3);

        double score1 = net.score(new DataSet(in2, out2), false);
        double score2 = net.score(new DataSet(in2, out2), false);
        double score3 = net.score(new DataSet(in2, out2), false);
        assertEquals(score1, score2, 0.0);
        assertEquals(score1, score3, 0.0);
    }

    if (noDropoutCount >= nTests / 3) {
        //at 0.5 dropout ratio and more than a few inputs, expect only a very small number of instances where
        //no dropout occurs, just due to random chance
        fail("Too many instances of dropout not being applied");
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:TestDropout.java

示例6: testSingleExample

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testSingleExample() {
    Nd4j.getRandom().setSeed(12345);

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .updater(new Sgd(0.1)).seed(12345).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().activation(Activation.TANH)
                                    .nIn(2).nOut(2).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder()
                                    .lossFunction(LossFunctions.LossFunction.MSE).nIn(2).nOut(1)
                                    .activation(Activation.TANH).build())
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in1 = Nd4j.rand(new int[] {1, 2, 4});
    INDArray in2 = Nd4j.rand(new int[] {1, 2, 5});
    in2.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4)}, in1);

    assertEquals(in1, in2.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4)));

    INDArray labels1 = Nd4j.rand(new int[] {1, 1, 4});
    INDArray labels2 = Nd4j.create(1, 1, 5);
    labels2.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4)}, labels1);
    assertEquals(labels1, labels2.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4)));

    INDArray out1 = net.output(in1);
    INDArray out2 = net.output(in2);

    System.out.println(Arrays.toString(net.output(in1).data().asFloat()));
    System.out.println(Arrays.toString(net.output(in2).data().asFloat()));

    List<INDArray> activations1 = net.feedForward(in1);
    List<INDArray> activations2 = net.feedForward(in2);

    for (int i = 0; i < 3; i++) {
        System.out.println("-----\n" + i);
        System.out.println(Arrays.toString(activations1.get(i).dup().data().asDouble()));
        System.out.println(Arrays.toString(activations2.get(i).dup().data().asDouble()));

        System.out.println(activations1.get(i));
        System.out.println(activations2.get(i));
    }



    //Expect first 4 time steps to be indentical...
    for (int i = 0; i < 4; i++) {
        double d1 = out1.getDouble(i);
        double d2 = out2.getDouble(i);
        assertEquals(d1, d2, 0.0);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:56,代码来源:GravesLSTMTest.java

示例7: testDenseActivationLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDenseActivationLayer() throws Exception {
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();

    // Run without separate activation layer
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new DenseLayer.Builder().nIn(28 * 28 * 1).nOut(10).activation(Activation.RELU)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER)
                                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.fit(next);


    // Run with separate activation layer
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new DenseLayer.Builder().nIn(28 * 28 * 1).nOut(10).activation(Activation.IDENTITY)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.ActivationLayer.Builder()
                                    .activation(Activation.RELU).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(10).nOut(10)
                                    .build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork network2 = new MultiLayerNetwork(conf2);
    network2.init();
    network2.fit(next);

    // check parameters
    assertEquals(network.getLayer(0).getParam("W"), network2.getLayer(0).getParam("W"));
    assertEquals(network.getLayer(1).getParam("W"), network2.getLayer(2).getParam("W"));
    assertEquals(network.getLayer(0).getParam("b"), network2.getLayer(0).getParam("b"));
    assertEquals(network.getLayer(1).getParam("b"), network2.getLayer(2).getParam("b"));

    // check activations
    network.init();
    network.setInput(next.getFeatureMatrix());
    List<INDArray> activations = network.feedForward(true);

    network2.init();
    network2.setInput(next.getFeatureMatrix());
    List<INDArray> activations2 = network2.feedForward(true);

    assertEquals(activations.get(1).reshape(activations2.get(2).shape()), activations2.get(2));
    assertEquals(activations.get(2), activations2.get(3));


}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:59,代码来源:ActivationLayerTest.java

示例8: testAutoEncoderActivationLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testAutoEncoderActivationLayer() throws Exception {

    int minibatch = 3;
    int nIn = 5;
    int layerSize = 5;
    int nOut = 3;

    INDArray next = Nd4j.rand(new int[] {minibatch, nIn});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, i % nOut, 1.0);
    }

    // Run without separate activation layer
    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0)
                                    .activation(Activation.SIGMOID).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY)
                                                    .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
                                                    .build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.fit(next, labels); //Labels are necessary for this test: layer activation function affect pretraining results, otherwise


    // Run with separate activation layer
    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.ActivationLayer.Builder()
                                    .activation(Activation.SIGMOID).build())
                    .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY)
                                                    .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
                                                    .build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork network2 = new MultiLayerNetwork(conf2);
    network2.init();
    network2.fit(next, labels);

    // check parameters
    assertEquals(network.getLayer(0).getParam("W"), network2.getLayer(0).getParam("W"));
    assertEquals(network.getLayer(1).getParam("W"), network2.getLayer(2).getParam("W"));
    assertEquals(network.getLayer(0).getParam("b"), network2.getLayer(0).getParam("b"));
    assertEquals(network.getLayer(1).getParam("b"), network2.getLayer(2).getParam("b"));

    // check activations
    network.init();
    network.setInput(next);
    List<INDArray> activations = network.feedForward(true);

    network2.init();
    network2.setInput(next);
    List<INDArray> activations2 = network2.feedForward(true);

    assertEquals(activations.get(1).reshape(activations2.get(2).shape()), activations2.get(2));
    assertEquals(activations.get(2), activations2.get(3));


}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:72,代码来源:ActivationLayerTest.java

示例9: testCNNActivationLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCNNActivationLayer() throws Exception {
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();

    // Run without separate activation layer
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20)
                                    .activation(Activation.RELU).weightInit(WeightInit.XAVIER).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER)
                                                    .activation(Activation.SOFTMAX).nOut(10).build())
                    .backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.fit(next);


    // Run with separate activation layer
    MultiLayerConfiguration conf2 =
                    new NeuralNetConfiguration.Builder()
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .seed(123).list()
                                    .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20)
                                                    .activation(Activation.IDENTITY).weightInit(WeightInit.XAVIER)
                                                    .build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.ActivationLayer.Builder()
                                                    .activation(Activation.RELU).build())
                                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                                    .nOut(10).build())
                                    .backprop(true).pretrain(false)
                                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).build();

    MultiLayerNetwork network2 = new MultiLayerNetwork(conf2);
    network2.init();
    network2.fit(next);

    // check parameters
    assertEquals(network.getLayer(0).getParam("W"), network2.getLayer(0).getParam("W"));
    assertEquals(network.getLayer(1).getParam("W"), network2.getLayer(2).getParam("W"));
    assertEquals(network.getLayer(0).getParam("b"), network2.getLayer(0).getParam("b"));

    // check activations
    network.init();
    network.setInput(next.getFeatureMatrix());
    List<INDArray> activations = network.feedForward(true);

    network2.init();
    network2.setInput(next.getFeatureMatrix());
    List<INDArray> activations2 = network2.feedForward(true);

    assertEquals(activations.get(1).reshape(activations2.get(2).shape()), activations2.get(2));
    assertEquals(activations.get(2), activations2.get(3));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:59,代码来源:ActivationLayerTest.java

示例10: testDropoutLayerWithoutTraining

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropoutLayerWithoutTraining() throws Exception {
    MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder().seed(3648)
                    .list().layer(0,
                                    new ConvolutionLayer.Builder(1, 1).stride(1, 1).nIn(1).nOut(1).dropOut(0.25)
                                                    .activation(Activation.IDENTITY).weightInit(WeightInit.XAVIER)
                                                    .build())
                    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.IDENTITY).dropOut(0.25)
                                    .nOut(4).build())
                    .backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(2, 2, 1)).build();

    MultiLayerNetwork netIntegrated = new MultiLayerNetwork(confIntegrated);
    netIntegrated.init();
    netIntegrated.getLayer(0).setParam("W", Nd4j.eye(1));
    netIntegrated.getLayer(0).setParam("b", Nd4j.zeros(1, 1));
    netIntegrated.getLayer(1).setParam("W", Nd4j.eye(4));
    netIntegrated.getLayer(1).setParam("b", Nd4j.zeros(4, 1));

    MultiLayerConfiguration confSeparate =
                    new NeuralNetConfiguration.Builder()
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .seed(3648)
                                    .list().layer(0,
                                                    new DropoutLayer.Builder(0.25)
                                                                    .build())
                                    .layer(1, new ConvolutionLayer.Builder(1, 1).stride(1, 1).nIn(1).nOut(1)
                                                    .activation(Activation.IDENTITY).weightInit(WeightInit.XAVIER)
                                                    .build())
                                    .layer(2, new DropoutLayer.Builder(0.25).build())
                                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .weightInit(WeightInit.XAVIER).activation(Activation.IDENTITY)
                                                    .nOut(4).build())
                                    .backprop(true).pretrain(false)
                                    .setInputType(InputType.convolutionalFlat(2, 2, 1)).build();

    MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate);
    netSeparate.init();
    netSeparate.getLayer(1).setParam("W", Nd4j.eye(1));
    netSeparate.getLayer(1).setParam("b", Nd4j.zeros(1, 1));
    netSeparate.getLayer(3).setParam("W", Nd4j.eye(4));
    netSeparate.getLayer(3).setParam("b", Nd4j.zeros(4, 1));

    INDArray in = Nd4j.arange(1, 5);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainIntegrated = netIntegrated.feedForward(in.dup(), true);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainSeparate = netSeparate.feedForward(in.dup(), true);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestIntegrated = netIntegrated.feedForward(in.dup(), false);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestSeparate = netSeparate.feedForward(in.dup(), false);

    assertEquals(actTrainIntegrated.get(1), actTrainSeparate.get(2));
    assertEquals(actTrainIntegrated.get(2), actTrainSeparate.get(4));
    assertEquals(actTestIntegrated.get(1), actTestSeparate.get(2));
    assertEquals(actTestIntegrated.get(2), actTestSeparate.get(4));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:59,代码来源:DropoutLayerTest.java

示例11: testDropoutLayerWithDenseMnist

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropoutLayerWithDenseMnist() throws Exception {
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();

    // Run without separate activation layer
    MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new DenseLayer.Builder().nIn(28 * 28 * 1).nOut(10)
                                    .activation(Activation.RELU).weightInit(
                                                    WeightInit.XAVIER)
                                    .build())
                    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).dropOut(0.25)
                                    .nIn(10).nOut(10).build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork netIntegrated = new MultiLayerNetwork(confIntegrated);
    netIntegrated.init();
    netIntegrated.fit(next);

    // Run with separate activation layer
    MultiLayerConfiguration confSeparate = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
                    .list()
                    .layer(0, new DenseLayer.Builder().nIn(28 * 28 * 1).nOut(10).activation(Activation.RELU)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(1, new DropoutLayer.Builder(0.25).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(10).nOut(10)
                                    .build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate);
    netSeparate.init();
    netSeparate.fit(next);

    // check parameters
    assertEquals(netIntegrated.getLayer(0).getParam("W"), netSeparate.getLayer(0).getParam("W"));
    assertEquals(netIntegrated.getLayer(0).getParam("b"), netSeparate.getLayer(0).getParam("b"));
    assertEquals(netIntegrated.getLayer(1).getParam("W"), netSeparate.getLayer(2).getParam("W"));
    assertEquals(netIntegrated.getLayer(1).getParam("b"), netSeparate.getLayer(2).getParam("b"));

    // check activations
    netIntegrated.setInput(next.getFeatureMatrix());
    netSeparate.setInput(next.getFeatureMatrix());

    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainIntegrated = netIntegrated.feedForward(true);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainSeparate = netSeparate.feedForward(true);
    assertEquals(actTrainIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTrainIntegrated.get(2), actTrainSeparate.get(3));

    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestIntegrated = netIntegrated.feedForward(false);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestSeparate = netSeparate.feedForward(false);
    assertEquals(actTestIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTestIntegrated.get(2), actTestSeparate.get(3));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:63,代码来源:DropoutLayerTest.java

示例12: testDropoutLayerWithConvMnist

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropoutLayerWithConvMnist() throws Exception {
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();

    // Run without separate activation layer
    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder().seed(123)
                    .list().layer(0,
                                    new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20)
                                                    .activation(Activation.TANH).weightInit(WeightInit.XAVIER)
                                                    .build())
                    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).dropOut(0.5)
                                    .nOut(10).build())
                    .backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();

    // Run with separate activation layer
    Nd4j.getRandom().setSeed(12345);

    //Manually configure preprocessors
    //This is necessary, otherwise CnnToFeedForwardPreprocessor will be in different locatinos
    //i.e., dropout on 4d activations in latter, and dropout on 2d activations in former
    Map<Integer, InputPreProcessor> preProcessorMap = new HashMap<>();
    preProcessorMap.put(1, new CnnToFeedForwardPreProcessor(13, 13, 20));

    MultiLayerConfiguration confSeparate = new NeuralNetConfiguration.Builder().seed(123).list()
                    .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20)
                                    .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
                    .layer(1, new DropoutLayer.Builder(0.5).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build())
                    .inputPreProcessors(preProcessorMap).backprop(true).pretrain(false)
                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).build();


    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork netIntegrated = new MultiLayerNetwork(confIntegrated);
    netIntegrated.init();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate);
    netSeparate.init();

    assertEquals(netIntegrated.params(), netSeparate.params());

    Nd4j.getRandom().setSeed(12345);
    netIntegrated.fit(next);

    Nd4j.getRandom().setSeed(12345);
    netSeparate.fit(next);

    assertEquals(netIntegrated.params(), netSeparate.params());

    // check parameters
    assertEquals(netIntegrated.getLayer(0).getParam("W"), netSeparate.getLayer(0).getParam("W"));
    assertEquals(netIntegrated.getLayer(0).getParam("b"), netSeparate.getLayer(0).getParam("b"));
    assertEquals(netIntegrated.getLayer(1).getParam("W"), netSeparate.getLayer(2).getParam("W"));
    assertEquals(netIntegrated.getLayer(1).getParam("b"), netSeparate.getLayer(2).getParam("b"));

    // check activations
    netIntegrated.setInput(next.getFeatureMatrix());
    netSeparate.setInput(next.getFeatureMatrix());

    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainIntegrated = netIntegrated.feedForward(true);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTrainSeparate = netSeparate.feedForward(true);
    assertEquals(actTrainIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTrainIntegrated.get(2), actTrainSeparate.get(3));

    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestIntegrated = netIntegrated.feedForward(false);
    Nd4j.getRandom().setSeed(12345);
    List<INDArray> actTestSeparate = netSeparate.feedForward(false);
    assertEquals(actTestIntegrated.get(1), actTrainSeparate.get(1));
    assertEquals(actTestIntegrated.get(2), actTestSeparate.get(3));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:79,代码来源:DropoutLayerTest.java

示例13: testEmbeddingForwardPass

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testEmbeddingForwardPass() {
    //With the same parameters, embedding layer should have same activations as the equivalent one-hot representation
    // input with a DenseLayer

    int nClassesIn = 10;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
                    .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build())
                    .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
                    .build();
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
                    .layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build())
                    .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net.init();
    net2.init();

    net2.setParams(net.params().dup());

    int batchSize = 3;
    INDArray inEmbedding = Nd4j.create(batchSize, 1);
    INDArray inOneHot = Nd4j.create(batchSize, nClassesIn);

    Random r = new Random(12345);
    for (int i = 0; i < batchSize; i++) {
        int classIdx = r.nextInt(nClassesIn);
        inEmbedding.putScalar(i, classIdx);
        inOneHot.putScalar(new int[] {i, classIdx}, 1.0);
    }

    List<INDArray> activationsEmbedding = net.feedForward(inEmbedding, false);
    List<INDArray> activationsDense = net2.feedForward(inOneHot, false);
    for (int i = 1; i < 3; i++) {
        INDArray actE = activationsEmbedding.get(i);
        INDArray actD = activationsDense.get(i);
        assertEquals(actE, actD);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:43,代码来源:EmbeddingLayerTest.java


注:本文中的org.deeplearning4j.nn.multilayer.MultiLayerNetwork.feedForward方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。