当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerNetwork.getnLayers方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getnLayers方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.getnLayers方法的具体用法?Java MultiLayerNetwork.getnLayers怎么用?Java MultiLayerNetwork.getnLayers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.multilayer.MultiLayerNetwork的用法示例。


在下文中一共展示了MultiLayerNetwork.getnLayers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setLearningRate

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
private static void setLearningRate(MultiLayerNetwork net, double newLr, ISchedule lrSchedule) {
    int nLayers = net.getnLayers();
    for (int i = 0; i < nLayers; i++) {
        setLearningRate(net, i, newLr, lrSchedule, false);
    }
    refreshUpdater(net);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:8,代码来源:NetworkUtils.java

示例2: testEmbeddingLayerSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testEmbeddingLayerSimple() {
    Random r = new Random(12345);
    int nExamples = 5;
    INDArray input = Nd4j.zeros(nExamples, 1);
    INDArray labels = Nd4j.zeros(nExamples, 3);
    for (int i = 0; i < nExamples; i++) {
        input.putScalar(i, r.nextInt(4));
        labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L)
                    .list().layer(0,
                                    new EmbeddingLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER)
                                                    .dist(new NormalDistribution(0, 1))
                                                    .updater(new NoOp()).activation(
                                                                    Activation.TANH)
                                                    .build())
                    .layer(1, new OutputLayer.Builder(LossFunction.MCXENT).nIn(3).nOut(3)
                                    .weightInit(WeightInit.XAVIER).dist(new NormalDistribution(0, 1))
                                    .updater(new NoOp()).activation(Activation.SOFTMAX).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    if (PRINT_RESULTS) {
        System.out.println("testEmbeddingLayerSimple");
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    String msg = "testEmbeddingLayerSimple";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:40,代码来源:GradientCheckTests.java

示例3: testGradient2dFixedGammaBeta

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradient2dFixedGammaBeta() {
    DataNormalization scaler = new NormalizerMinMaxScaler();
    DataSetIterator iter = new IrisDataSetIterator(150, 150);
    scaler.fit(iter);
    iter.setPreProcessor(scaler);
    DataSet ds = iter.next();
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 1)).list()
                    .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().lockGammaBeta(true).gamma(2.0).beta(0.5).nOut(3)
                                    .build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(3).nOut(3).build())
                    .pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:35,代码来源:BNGradientCheckTest.java

示例4: testGradientCnnFixedGammaBeta

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradientCnnFixedGammaBeta() {
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 1;
    int hw = 4;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(depth).nOut(2)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().lockGammaBeta(true).gamma(2.0).beta(0.5).build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:BNGradientCheckTest.java

示例5: testGradientLRNSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradientLRNSimple() {
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 6;
    int hw = 5;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().nOut(6).kernelSize(2, 2).stride(1, 1)
                                    .activation(Activation.TANH).build())
                    .layer(1, new LocalResponseNormalization.Builder().build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:LRNGradientCheckTests.java

示例6: testLSTMGlobalPoolingBasicMultiLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTMGlobalPoolingBasicMultiLayer() {
    //Basic test of global pooling w/ LSTM
    Nd4j.getRandom().setSeed(12345L);

    int timeSeriesLength = 10;
    int nIn = 5;
    int layerSize = 4;
    int nOut = 2;

    int[] minibatchSizes = new int[] {1, 3};
    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.AVG, PoolingType.SUM, PoolingType.MAX, PoolingType.PNORM};

    for (int miniBatchSize : minibatchSizes) {
        for (PoolingType pt : poolingTypes) {

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                            .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                            .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                            .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                                            .build())
                            .layer(1, new GlobalPoolingLayer.Builder().poolingType(pt).build())
                            .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                            .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
                            .pretrain(false).backprop(true).build();

            MultiLayerNetwork mln = new MultiLayerNetwork(conf);
            mln.init();

            Random r = new Random(12345L);
            INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
            for (int i = 0; i < miniBatchSize; i++) {
                for (int j = 0; j < nIn; j++) {
                    for (int k = 0; k < timeSeriesLength; k++) {
                        input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5);
                    }
                }
            }

            INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
            for (int i = 0; i < miniBatchSize; i++) {
                int idx = r.nextInt(nOut);
                labels.putScalar(i, idx, 1.0);
            }

            if (PRINT_RESULTS) {
                System.out.println("testLSTMGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = "
                                + miniBatchSize);
                for (int j = 0; j < mln.getnLayers(); j++)
                    System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(gradOK);

        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:62,代码来源:GlobalPoolingGradientCheckTests.java

示例7: testCnn1DWithZeroPadding1D

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCnn1DWithZeroPadding1D() {
    Nd4j.getRandom().setSeed(1337);

    int[] minibatchSizes = {1, 3};
    int length = 7;
    int convNIn = 2;
    int convNOut1 = 3;
    int convNOut2 = 4;
    int finalNOut = 4;


    int[] kernels = {1, 2, 4};
    int stride = 1;
    int pnorm = 2;

    int padding = 0;
    int zeroPadding = 2;
    int paddedLength = length + 2 * zeroPadding;

    Activation[] activations = {Activation.SIGMOID};
    SubsamplingLayer.PoolingType[] poolingTypes =
            new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
                    SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

    for (Activation afn : activations) {
        for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
            for (int minibatchSize : minibatchSizes) {
                for (int kernel : kernels) {
                    INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
                    INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, paddedLength);
                    for (int i = 0; i < minibatchSize; i++) {
                        for (int j = 0; j < paddedLength; j++) {
                            labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
                        }
                    }

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                            .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                            .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
                            .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                    .stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
                                    .build())
                            .layer(new ZeroPadding1DLayer.Builder(zeroPadding).build())
                            .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                    .stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
                                    .build())
                            .layer(new ZeroPadding1DLayer.Builder(0).build())
                            .layer(new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
                                    .stride(stride).padding(padding).pnorm(pnorm).build())
                            .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                            .setInputType(InputType.recurrent(convNIn)).build();

                    String json = conf.toJson();
                    MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                    assertEquals(conf, c2);

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                            + afn + ", kernel = " + kernel;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
                        for (int j = 0; j < net.getnLayers(); j++)
                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);
                }
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:80,代码来源:CNN1DGradientCheckTest.java

示例8: testLSTMWithMasking

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTMWithMasking() {
    //Basic test of GravesLSTM layer
    Nd4j.getRandom().setSeed(12345L);

    int timeSeriesLength = 10;
    int nIn = 5;
    int layerSize = 4;
    int nOut = 2;

    int miniBatchSize = 3;
    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.AVG, PoolingType.SUM, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                        .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                        .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                                        .build())
                        .layer(1, new GlobalPoolingLayer.Builder().poolingType(pt).build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        Random r = new Random(12345L);
        INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < nIn; j++) {
                for (int k = 0; k < timeSeriesLength; k++) {
                    input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5);
                }
            }
        }

        INDArray featuresMask = Nd4j.create(miniBatchSize, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            int to = timeSeriesLength - i;
            for (int j = 0; j < to; j++) {
                featuresMask.putScalar(i, j, 1.0);
            }
        }

        INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
        for (int i = 0; i < miniBatchSize; i++) {
            int idx = r.nextInt(nOut);
            labels.putScalar(i, idx, 1.0);
        }

        mln.setLayerMaskArrays(featuresMask, null);

        if (PRINT_RESULTS) {
            System.out.println("testLSTMGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize);
            for (int j = 0; j < mln.getnLayers(); j++)
                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels, featuresMask, null);

        assertTrue(gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:68,代码来源:GlobalPoolingGradientCheckTests.java

示例9: testBatchNormCnn

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testBatchNormCnn() throws Exception {
    //Note: CuDNN batch norm supports 4d only, as per 5.1 (according to api reference documentation)
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 1;
    int hw = 4;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(depth).nOut(2)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.normalization.BatchNormalization.class.getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.normalization.BatchNormalization b =
                    (org.deeplearning4j.nn.layers.normalization.BatchNormalization) mln.getLayer(1);
    BatchNormalizationHelper bn = (BatchNormalizationHelper) f.get(b);
    assertTrue(bn instanceof CudnnBatchNormalizationHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(b, null);
    //        assertNull(f.get(b));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:54,代码来源:CuDNNGradientChecks.java

示例10: testBidirectionalLSTMMasking

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testBidirectionalLSTMMasking() {
    Nd4j.getRandom().setSeed(12345L);

    int timeSeriesLength = 5;
    int nIn = 5;
    int layerSize = 4;
    int nOut = 3;

    int miniBatchSize = 3;

    INDArray[] masks = new INDArray[] {null,
                    Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}),
                    Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}}),
                    Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {0, 1, 1, 1, 1}, {0, 0, 1, 1, 1}})};

    int testNum = 0;
    for (INDArray mask : masks) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                        .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                        .layer(0, new GravesBidirectionalLSTM.Builder().nIn(nIn).nOut(layerSize)
                                        .activation(Activation.TANH).build())
                        .layer(1, new GravesBidirectionalLSTM.Builder().nIn(layerSize).nOut(layerSize)
                                        .activation(Activation.TANH).build())
                        .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        Random r = new Random(12345L);
        INDArray input = Nd4j.rand(new int[]{miniBatchSize, nIn, timeSeriesLength}, 'f').subi(0.5);

        INDArray labels = Nd4j.zeros(miniBatchSize, nOut, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < nIn; j++) {
                labels.putScalar(i, r.nextInt(nOut), j, 1.0);
            }
        }

        if (PRINT_RESULTS) {
            System.out.println("testBidirectionalLSTMMasking() - testNum = " + testNum++);
            for (int j = 0; j < mln.getnLayers(); j++)
                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels, mask, mask);

        assertTrue(gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:56,代码来源:GradientCheckTestsMasking.java

示例11: testLSTMBasicMultiLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTMBasicMultiLayer() {
    //Basic test of GravesLSTM layer
    Nd4j.getRandom().setSeed(12345L);

    int timeSeriesLength = 4;
    int nIn = 2;
    int layerSize = 2;
    int nOut = 2;
    int miniBatchSize = 5;

    boolean[] gravesLSTM = new boolean[] {true, false};

    for (boolean graves : gravesLSTM) {

        Layer l0;
        Layer l1;
        if (graves) {
            l0 = new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.SIGMOID)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build();
            l1 = new GravesLSTM.Builder().nIn(layerSize).nOut(layerSize).activation(Activation.SIGMOID)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build();
        } else {
            l0 = new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.SIGMOID)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build();
            l1 = new LSTM.Builder().nIn(layerSize).nOut(layerSize).activation(Activation.SIGMOID)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build();
        }

        MultiLayerConfiguration conf =
                        new NeuralNetConfiguration.Builder().seed(12345L).list()
                                        .layer(0, l0).layer(1,
                                                        l1)
                                        .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
                                                        .weightInit(WeightInit.DISTRIBUTION)
                                                        .dist(new NormalDistribution(0, 1.0)).updater(new NoOp())
                                                        .build())
                                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        Random r = new Random(12345L);
        INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < nIn; j++) {
                for (int k = 0; k < timeSeriesLength; k++) {
                    input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5);
                }
            }
        }

        INDArray labels = Nd4j.zeros(miniBatchSize, nOut, timeSeriesLength);
        for (int i = 0; i < miniBatchSize; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                int idx = r.nextInt(nOut);
                labels.putScalar(new int[] {i, idx, j}, 1.0);
            }
        }

        String testName = "testLSTMBasic(" + (graves ? "GravesLSTM" : "LSTM") + ")";
        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < mln.getnLayers(); j++)
                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:79,代码来源:LSTMGradientCheckTests.java

示例12: testGradientCnnFfRnn

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradientCnnFfRnn() {
    //Test gradients with CNN -> FF -> LSTM -> RnnOutputLayer
    //time series input/output (i.e., video classification or similar)

    int nChannelsIn = 3;
    int inputSize = 10 * 10 * nChannelsIn; //10px x 10px x 3 channels
    int miniBatchSize = 4;
    int timeSeriesLength = 10;
    int nClasses = 3;

    //Generate
    Nd4j.getRandom().setSeed(12345);
    INDArray input = Nd4j.rand(new int[] {miniBatchSize, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(miniBatchSize, nClasses, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < miniBatchSize; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            int idx = r.nextInt(nClasses);
            labels.putScalar(new int[] {i, idx, j}, 1.0);
        }
    }


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()).seed(12345)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-2, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(3).nOut(5).stride(1, 1)
                                    .activation(Activation.TANH).build()) //Out: (10-5)/1+1 = 6 -> 6x6x5
                    .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2)
                                    .stride(1, 1).build()) //Out: (6-2)/1+1 = 5 -> 5x5x5
                    .layer(2, new DenseLayer.Builder().nIn(5 * 5 * 5).nOut(4).activation(Activation.TANH).build())
                    .layer(3, new GravesLSTM.Builder().nIn(4).nOut(3).activation(Activation.TANH).build())
                    .layer(4, new RnnOutputLayer.Builder().lossFunction(LossFunction.MCXENT).nIn(3).nOut(nClasses)
                                    .activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutional(10, 10, 3)).pretrain(false).backprop(true).build();

    //Here: ConvolutionLayerSetup in config builder doesn't know that we are expecting time series input, not standard FF input -> override it here
    conf.getInputPreProcessors().put(0, new RnnToCnnPreProcessor(10, 10, 3));

    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    System.out.println("Params per layer:");
    for (int i = 0; i < mln.getnLayers(); i++) {
        System.out.println("layer " + i + "\t" + mln.getLayer(i).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);
    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:52,代码来源:LSTMGradientCheckTests.java

示例13: testCnn1DWithSubsampling1D

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCnn1DWithSubsampling1D() {
    Nd4j.getRandom().setSeed(12345);

    int[] minibatchSizes = {1, 3};
    int length = 7;
    int convNIn = 2;
    int convNOut1 = 3;
    int convNOut2 = 4;
    int finalNOut = 4;

    int[] kernels = {1, 2, 4};
    int stride = 1;
    int padding = 0;
    int pnorm = 2;

    Activation[] activations = {Activation.SIGMOID, Activation.TANH};
    SubsamplingLayer.PoolingType[] poolingTypes =
                    new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
                                    SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

    for (Activation afn : activations) {
        for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
            for (int minibatchSize : minibatchSizes) {
                for (int kernel : kernels) {
                    INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
                    INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, length);
                    for (int i = 0; i < minibatchSize; i++) {
                        for (int j = 0; j < length; j++) {
                            labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
                        }
                    }

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                    .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
                                    .layer(0, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                                    .stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
                                                    .build())
                                    .layer(1, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                                    .stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
                                                    .build())
                                    .layer(2, new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
                                                    .stride(stride).padding(padding).pnorm(pnorm).build())
                                    .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                                    .setInputType(InputType.recurrent(convNIn)).build();

                    String json = conf.toJson();
                    MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                    assertEquals(conf, c2);

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                                    + afn + ", kernel = " + kernel;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
                        for (int j = 0; j < net.getnLayers(); j++)
                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);
                }
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:74,代码来源:CNN1DGradientCheckTest.java

示例14: testLRN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLRN() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 6;
    int hw = 5;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().nOut(6).kernelSize(2, 2).stride(1, 1)
                                    .activation(Activation.TANH).build())
                    .layer(1, new LocalResponseNormalization.Builder().build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization.class
                    .getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization l =
                    (org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization) mln.getLayer(1);
    LocalResponseNormalizationHelper lrn = (LocalResponseNormalizationHelper) f.get(l);
    assertTrue(lrn instanceof CudnnLocalResponseNormalizationHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(l, null);
    //        assertNull(f.get(l));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:54,代码来源:CuDNNGradientChecks.java

示例15: testConvolutionalNoBias

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testConvolutionalNoBias() throws Exception {
    int[] minibatchSizes = {1, 4};
    int width = 6;
    int height = 6;
    int inputDepth = 2;
    int nOut = 3;

    Field f = org.deeplearning4j.nn.layers.convolution.ConvolutionLayer.class.getDeclaredField("helper");
    f.setAccessible(true);

    Random r = new Random(12345);
    for (int minibatchSize : minibatchSizes) {
        for (boolean convHasBias : new boolean[]{true, false}) {

            INDArray input = Nd4j.rand(new int[]{minibatchSize, inputDepth, height, width});
            INDArray labels = Nd4j.zeros(minibatchSize, nOut);
            for (int i = 0; i < minibatchSize; i++) {
                labels.putScalar(i, r.nextInt(nOut), 1.0);
            }

            MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-1, 1))
                    .updater(new NoOp()).seed(12345L)
                    .list()
                    .layer(0, new ConvolutionLayer.Builder(2, 2).stride(2, 2).padding(1, 1).nOut(3)
                            .hasBias(convHasBias)
                            .activation(Activation.TANH).build())
                    .layer(1, new ConvolutionLayer.Builder(2, 2).stride(2, 2).padding(0, 0).nOut(3)
                            .hasBias(convHasBias)
                            .activation(Activation.TANH).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                            .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(height, width, inputDepth)).pretrain(false)
                    .backprop(true);

            MultiLayerConfiguration conf = builder.build();

            MultiLayerNetwork mln = new MultiLayerNetwork(conf);
            mln.init();

            org.deeplearning4j.nn.layers.convolution.ConvolutionLayer c0 =
                    (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) mln.getLayer(0);
            ConvolutionHelper ch0 = (ConvolutionHelper) f.get(c0);
            assertTrue(ch0 instanceof CudnnConvolutionHelper);

            org.deeplearning4j.nn.layers.convolution.ConvolutionLayer c1 =
                    (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) mln.getLayer(1);
            ConvolutionHelper ch1 = (ConvolutionHelper) f.get(c1);
            assertTrue(ch1 instanceof CudnnConvolutionHelper);


            String name = new Object() {}.getClass().getEnclosingMethod().getName() + ", minibatch = "
                    + minibatchSize + ", convHasBias = " + convHasBias;

            if (PRINT_RESULTS) {
                System.out.println(name);
                for (int j = 0; j < mln.getnLayers(); j++)
                    System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(name, gradOK);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:CuDNNGradientChecks.java


注:本文中的org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getnLayers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。