当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerNetwork.setLabels方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.setLabels方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.setLabels方法的具体用法?Java MultiLayerNetwork.setLabels怎么用?Java MultiLayerNetwork.setLabels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.multilayer.MultiLayerNetwork的用法示例。


在下文中一共展示了MultiLayerNetwork.setLabels方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWithPreprocessorsMLN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testWithPreprocessorsMLN(){
    for(WorkspaceMode wm : WorkspaceMode.values()) {
        System.out.println(wm);
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .trainingWorkspaceMode(wm)
                .inferenceWorkspaceMode(wm)
                .list()
                .layer(new GravesLSTM.Builder().nIn(10).nOut(5).build())
                .layer(new GravesLSTM.Builder().nIn(5).nOut(8).build())
                .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nOut(3).build())
                .inputPreProcessor(0, new DupPreProcessor())
                .setInputType(InputType.recurrent(10))
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();


        INDArray input = Nd4j.zeros(1, 10, 5);

        for( boolean train : new boolean[]{false, true}){
            net.clear();
            net.feedForward(input, train);
        }

        net.setInput(input);
        net.setLabels(Nd4j.rand(1, 3, 5));
        net.computeGradientAndScore();
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:32,代码来源:WorkspaceTests.java

示例2: testGradientCNNMLN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradientCNNMLN() {
    //Parameterized test, testing combinations of:
    // (a) activation function
    // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
    // (c) Loss function (with specified output activations)
    Activation[] activFns = {Activation.SIGMOID, Activation.TANH};
    boolean[] characteristic = {false, true}; //If true: run some backprop steps first

    LossFunctions.LossFunction[] lossFunctions =
            {LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE};
    Activation[] outputActivations = {Activation.SOFTMAX, Activation.TANH}; //i.e., lossFunctions[i] used with outputActivations[i] here

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    ds.normalizeZeroMeanZeroUnitVariance();
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    for (Activation afn : activFns) {
        for (boolean doLearningFirst : characteristic) {
            for (int i = 0; i < lossFunctions.length; i++) {
                LossFunctions.LossFunction lf = lossFunctions[i];
                Activation outputActivation = outputActivations[i];

                MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                        .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                        .weightInit(WeightInit.XAVIER).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder(1, 1).nOut(6).activation(afn).build())
                        .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3).build())
                        .setInputType(InputType.convolutionalFlat(1, 4, 1)).pretrain(false).backprop(true);

                MultiLayerConfiguration conf = builder.build();

                MultiLayerNetwork mln = new MultiLayerNetwork(conf);
                mln.init();
                String name = new Object() {
                }.getClass().getEnclosingMethod().getName();

                if (doLearningFirst) {
                    //Run a number of iterations of learning
                    mln.setInput(ds.getFeatures());
                    mln.setLabels(ds.getLabels());
                    mln.computeGradientAndScore();
                    double scoreBefore = mln.score();
                    for (int j = 0; j < 10; j++)
                        mln.fit(ds);
                    mln.computeGradientAndScore();
                    double scoreAfter = mln.score();
                    //Can't test in 'characteristic mode of operation' if not learning
                    String msg = name + " - score did not (sufficiently) decrease during learning - activationFn="
                            + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                            + ", doLearningFirst= " + doLearningFirst + " (before=" + scoreBefore
                            + ", scoreAfter=" + scoreAfter + ")";
                    assertTrue(msg, scoreAfter < 0.8 * scoreBefore);
                }

                if (PRINT_RESULTS) {
                    System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation="
                            + outputActivation + ", doLearningFirst=" + doLearningFirst);
                    for (int j = 0; j < mln.getnLayers(); j++)
                        System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
                }

                boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                assertTrue(gradOK);
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:72,代码来源:CNNGradientCheckTest.java

示例3: testCustomUpdater

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCustomUpdater() {

    //Create a simple custom updater, equivalent to SGD updater

    double lr = 0.03;

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345)
                    .activation(Activation.TANH).updater(new CustomIUpdater(lr)) //Specify custom IUpdater
                    .list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                    .layer(1, new OutputLayer.Builder().nIn(10).nOut(10)
                                    .lossFunction(LossFunctions.LossFunction.MSE).build())
                    .build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345)
                    .activation(Activation.TANH).updater(new Sgd(lr)).list()
                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder()
                                    .nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
                    .build();

    //First: Check updater config
    assertTrue(((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater() instanceof CustomIUpdater);
    assertTrue(((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater() instanceof CustomIUpdater);
    assertTrue(((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater() instanceof Sgd);
    assertTrue(((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater() instanceof Sgd);

    CustomIUpdater u0_0 = (CustomIUpdater) ((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater();
    CustomIUpdater u0_1 = (CustomIUpdater) ((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater();
    assertEquals(lr, u0_0.getLearningRate(), 1e-6);
    assertEquals(lr, u0_1.getLearningRate(), 1e-6);

    Sgd u1_0 = (Sgd) ((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater();
    Sgd u1_1 = (Sgd) ((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater();
    assertEquals(lr, u1_0.getLearningRate(), 1e-6);
    assertEquals(lr, u1_1.getLearningRate(), 1e-6);


    //Second: check JSON
    String asJson = conf1.toJson();
    MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson);
    assertEquals(conf1, fromJson);

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
    net1.init();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net2.init();


    //Third: check gradients are equal
    INDArray in = Nd4j.rand(5, 10);
    INDArray labels = Nd4j.rand(5, 10);

    net1.setInput(in);
    net2.setInput(in);

    net1.setLabels(labels);
    net2.setLabels(labels);

    net1.computeGradientAndScore();
    net2.computeGradientAndScore();;

    assertEquals(net1.getFlattenedGradients(), net2.getFlattenedGradients());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:TestCustomUpdater.java

示例4: testMlnToCompGraph

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMlnToCompGraph() {
    Nd4j.getRandom().setSeed(12345);

    for( int i=0; i<3; i++ ){
        MultiLayerNetwork n;
        switch (i){
            case 0:
                n = getNet1(false);
                break;
            case 1:
                n = getNet1(true);
                break;
            case 2:
                n = getNet2();
                break;
            default:
                throw new RuntimeException();
        }
        INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10}));
        INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10}));

        ComputationGraph cg = n.toComputationGraph();

        INDArray out1 = n.output(in);
        INDArray out2 = cg.outputSingle(in);
        assertEquals(out1, out2);


        n.setInput(in);
        n.setLabels(labels);

        cg.setInputs(in);
        cg.setLabels(labels);

        n.computeGradientAndScore();
        cg.computeGradientAndScore();

        assertEquals(n.score(), cg.score(), 1e-6);

        assertEquals(n.gradient().gradient(), cg.gradient().gradient());

        n.fit(in, labels);
        cg.fit(new INDArray[]{in}, new INDArray[]{labels});

        assertEquals(n.params(), cg.params());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:49,代码来源:TestNetConversion.java

示例5: testSerialization

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testSerialization() throws Exception {

    for(WorkspaceMode wsm : WorkspaceMode.values()) {
        log.info("*** Starting workspace mode: " + wsm);

        Nd4j.getRandom().setSeed(12345);

        MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder()
                .activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER)
                .trainingWorkspaceMode(wsm)
                .inferenceWorkspaceMode(wsm)
                .updater(new Adam())
                .list()
                .layer(new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()))
                .layer(new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()))
                .layer(new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                        .nIn(10).nOut(10).build())
                .build();

        MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
        net1.init();

        INDArray in = Nd4j.rand(new int[]{3, 10, 5});
        INDArray labels = Nd4j.rand(new int[]{3, 10, 5});

        net1.fit(in, labels);

        byte[] bytes;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            ModelSerializer.writeModel(net1, baos, true);
            bytes = baos.toByteArray();
        }


        MultiLayerNetwork net2 = ModelSerializer.restoreMultiLayerNetwork(new ByteArrayInputStream(bytes), true);


        in = Nd4j.rand(new int[]{3, 10, 5});
        labels = Nd4j.rand(new int[]{3, 10, 5});

        INDArray out1 = net1.output(in);
        INDArray out2 = net2.output(in);

        assertEquals(out1, out2);

        net1.setInput(in);
        net2.setInput(in);
        net1.setLabels(labels);
        net2.setLabels(labels);

        net1.computeGradientAndScore();
        net2.computeGradientAndScore();

        assertEquals(net1.score(), net2.score(), 1e-6);
        assertEquals(net1.gradient().gradient(), net2.gradient().gradient());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:60,代码来源:BidirectionalTest.java

示例6: testCompareRnnOutputRnnLoss

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCompareRnnOutputRnnLoss(){
    Nd4j.getRandom().setSeed(12345);

    int timeSeriesLength = 4;
    int nIn = 5;
    int layerSize = 6;
    int nOut = 6;
    int miniBatchSize = 3;

    MultiLayerConfiguration conf1 =
            new NeuralNetConfiguration.Builder().seed(12345L)
                    .updater(new NoOp())
                    .list()
                    .layer(new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build())
                    .layer(new DenseLayer.Builder().nIn(layerSize).nOut(nOut).activation(Activation.IDENTITY).build())
                    .layer(new RnnLossLayer.Builder(LossFunction.MCXENT)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln = new MultiLayerNetwork(conf1);
    mln.init();


    MultiLayerConfiguration conf2 =
            new NeuralNetConfiguration.Builder().seed(12345L)
                    .updater(new NoOp())
                    .list()
                    .layer(new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build())
                    .layer(new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder(LossFunction.MCXENT)
                            .activation(Activation.SOFTMAX)
                            .nIn(layerSize).nOut(nOut)
                            .build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf2);
    mln2.init();

    mln2.setParams(mln.params());

    INDArray in = Nd4j.rand(new int[]{miniBatchSize, nIn, timeSeriesLength});

    INDArray out1 = mln.output(in);
    INDArray out2 = mln.output(in);

    assertEquals(out1, out2);

    Random r = new Random(12345);
    INDArray labels = Nd4j.create(miniBatchSize, nOut, timeSeriesLength);
    for( int i=0; i<miniBatchSize; i++ ){
        for( int j=0; j<timeSeriesLength; j++ ){
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    mln.setInput(in);
    mln.setLabels(labels);

    mln2.setInput(in);
    mln2.setLabels(labels);

    mln.computeGradientAndScore();
    mln2.computeGradientAndScore();

    assertEquals(mln.gradient().gradient(), mln2.gradient().gradient());
    assertEquals(mln.score(), mln2.score(), 1e-6);

    TestUtils.testModelSerialization(mln);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:75,代码来源:OutputLayerTest.java

示例7: testEmbeddingBackwardPass

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testEmbeddingBackwardPass() {
    //With the same parameters, embedding layer should have same activations as the equivalent one-hot representation
    // input with a DenseLayer

    int nClassesIn = 10;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
                    .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build()).layer(1,
                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4)
                                                    .activation(Activation.SOFTMAX).build())
                    .pretrain(false).backprop(true).build();
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build()).layer(1,
                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4)
                                                    .activation(Activation.SOFTMAX).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net.init();
    net2.init();

    net2.setParams(net.params().dup());

    int batchSize = 3;
    INDArray inEmbedding = Nd4j.create(batchSize, 1);
    INDArray inOneHot = Nd4j.create(batchSize, nClassesIn);
    INDArray outLabels = Nd4j.create(batchSize, 4);

    Random r = new Random(12345);
    for (int i = 0; i < batchSize; i++) {
        int classIdx = r.nextInt(nClassesIn);
        inEmbedding.putScalar(i, classIdx);
        inOneHot.putScalar(new int[] {i, classIdx}, 1.0);

        int labelIdx = r.nextInt(4);
        outLabels.putScalar(new int[] {i, labelIdx}, 1.0);
    }

    net.setInput(inEmbedding);
    net2.setInput(inOneHot);
    net.setLabels(outLabels);
    net2.setLabels(outLabels);

    net.computeGradientAndScore();
    net2.computeGradientAndScore();

    System.out.println(net.score() + "\t" + net2.score());
    assertEquals(net2.score(), net.score(), 1e-6);

    Map<String, INDArray> gradient = net.gradient().gradientForVariable();
    Map<String, INDArray> gradient2 = net2.gradient().gradientForVariable();
    assertEquals(gradient.size(), gradient2.size());

    for (String s : gradient.keySet()) {
        assertEquals(gradient2.get(s), gradient.get(s));
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:61,代码来源:EmbeddingLayerTest.java

示例8: testEmbeddingLayerRNN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testEmbeddingLayerRNN() {

    int nClassesIn = 10;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
                    .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build())
                    .layer(1, new GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.SOFTSIGN).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(7).nOut(4)
                                    .activation(Activation.SOFTMAX).build())
                    .inputPreProcessor(0, new RnnToFeedForwardPreProcessor())
                    .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()).pretrain(false).backprop(true)
                    .build();
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build())
                    .layer(1, new GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.SOFTSIGN).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(7).nOut(4)
                                    .activation(Activation.SOFTMAX).build())
                    .inputPreProcessor(0, new RnnToFeedForwardPreProcessor())
                    .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()).pretrain(false).backprop(true)
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net.init();
    net2.init();

    net2.setParams(net.params().dup());

    int batchSize = 3;
    int timeSeriesLength = 8;
    INDArray inEmbedding = Nd4j.create(batchSize, 1, timeSeriesLength);
    INDArray inOneHot = Nd4j.create(batchSize, nClassesIn, timeSeriesLength);
    INDArray outLabels = Nd4j.create(batchSize, 4, timeSeriesLength);

    Random r = new Random(12345);
    for (int i = 0; i < batchSize; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            int classIdx = r.nextInt(nClassesIn);
            inEmbedding.putScalar(new int[] {i, 0, j}, classIdx);
            inOneHot.putScalar(new int[] {i, classIdx, j}, 1.0);

            int labelIdx = r.nextInt(4);
            outLabels.putScalar(new int[] {i, labelIdx, j}, 1.0);
        }
    }

    net.setInput(inEmbedding);
    net2.setInput(inOneHot);
    net.setLabels(outLabels);
    net2.setLabels(outLabels);

    net.computeGradientAndScore();
    net2.computeGradientAndScore();

    System.out.println(net.score() + "\t" + net2.score());
    assertEquals(net2.score(), net.score(), 1e-6);

    Map<String, INDArray> gradient = net.gradient().gradientForVariable();
    Map<String, INDArray> gradient2 = net2.gradient().gradientForVariable();
    assertEquals(gradient.size(), gradient2.size());

    for (String s : gradient.keySet()) {
        assertEquals(gradient2.get(s), gradient.get(s));
    }

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:EmbeddingLayerTest.java

示例9: testMaskingCnnDim3_SingleExample

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim3_SingleExample() {
    //Test masking, where mask is along dimension 3

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 3;
    int width = 6;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
                                        .stride(height, 1).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = width - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
        assertArrayEquals(new int[] {1, depthIn, height, 5}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:63,代码来源:GlobalPoolingMaskingTests.java

示例10: testMaskingCnnDim2_SingleExample

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim2_SingleExample() {
    //Test masking, where mask is along dimension 2

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 6;
    int width = 3;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
                                        .stride(1, width).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = height - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
        assertArrayEquals(new int[] {1, depthIn, 5, width}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:63,代码来源:GlobalPoolingMaskingTests.java

示例11: testBackwardIrisBasic

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testBackwardIrisBasic() {
    ComputationGraphConfiguration configuration = getIrisGraphConfiguration();
    ComputationGraph graph = new ComputationGraph(configuration);
    graph.init();

    MultiLayerConfiguration mlc = getIrisMLNConfiguration();
    MultiLayerNetwork net = new MultiLayerNetwork(mlc);
    net.init();

    DataSetIterator iris = new IrisDataSetIterator(150, 150);
    DataSet ds = iris.next();

    //Now: set parameters of both networks to be identical. Then feedforward, and check we get the same outputs
    Nd4j.getRandom().setSeed(12345);
    int nParams = (4 * 5 + 5) + (5 * 3 + 3);
    INDArray params = Nd4j.rand(1, nParams);
    graph.setParams(params.dup());
    net.setParams(params.dup());

    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();
    graph.setInput(0, input.dup());
    graph.setLabel(0, labels.dup());

    net.setInput(input.dup());
    net.setLabels(labels.dup());

    //Compute gradients
    net.computeGradientAndScore();
    Pair<Gradient, Double> netGradScore = net.gradientAndScore();

    graph.computeGradientAndScore();
    Pair<Gradient, Double> graphGradScore = graph.gradientAndScore();

    assertEquals(netGradScore.getSecond(), graphGradScore.getSecond(), 1e-3);

    //Compare gradients
    Gradient netGrad = netGradScore.getFirst();
    Gradient graphGrad = graphGradScore.getFirst();

    assertNotNull(graphGrad);
    assertEquals(netGrad.gradientForVariable().size(), graphGrad.gradientForVariable().size());

    assertEquals(netGrad.getGradientFor("0_W"), graphGrad.getGradientFor("firstLayer_W"));
    assertEquals(netGrad.getGradientFor("0_b"), graphGrad.getGradientFor("firstLayer_b"));
    assertEquals(netGrad.getGradientFor("1_W"), graphGrad.getGradientFor("outputLayer_W"));
    assertEquals(netGrad.getGradientFor("1_b"), graphGrad.getGradientFor("outputLayer_b"));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:50,代码来源:TestComputationGraphNetwork.java

示例12: validateImplSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void validateImplSimple() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 3;
    int nOut = 2;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().inferenceWorkspaceMode(WorkspaceMode.NONE)
                    .trainingWorkspaceMode(WorkspaceMode.NONE).updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    f.set(l0, null);
    assertNull(f.get(l0));

    l0 = mln2.getLayer(0);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);


    INDArray out1 = mln1.output(input);
    INDArray out2 = mln2.output(input);

    assertEquals(out1, out2);


    mln1.setInput(input);
    mln1.setLabels(labels);

    mln2.setInput(input);
    mln2.setLabels(labels);

    mln1.computeGradientAndScore();
    mln2.computeGradientAndScore();

    assertEquals(mln1.score(), mln2.score(), 1e-8);

    Gradient g1 = mln1.gradient();
    Gradient g2 = mln2.gradient();

    for (Map.Entry<String, INDArray> entry : g1.gradientForVariable().entrySet()) {
        INDArray exp = entry.getValue();
        INDArray act = g2.gradientForVariable().get(entry.getKey());

        System.out.println(entry.getKey() + "\t" + exp.equals(act));
    }

    assertEquals(mln1.getFlattenedGradients(), mln2.getFlattenedGradients());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:78,代码来源:ValidateCudnnLSTM.java


注:本文中的org.deeplearning4j.nn.multilayer.MultiLayerNetwork.setLabels方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。