当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerNetwork.getLayer方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getLayer方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.getLayer方法的具体用法?Java MultiLayerNetwork.getLayer怎么用?Java MultiLayerNetwork.getLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.multilayer.MultiLayerNetwork的用法示例。


在下文中一共展示了MultiLayerNetwork.getLayer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testEmbeddingLayerConfig

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testEmbeddingLayerConfig() {

    for(boolean hasBias : new boolean[]{true, false}){
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
                .layer(0, new EmbeddingLayer.Builder().hasBias(hasBias).nIn(10).nOut(5).build())
                .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        Layer l0 = net.getLayer(0);

        assertEquals(org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer.class, l0.getClass());
        assertEquals(10, ((FeedForwardLayer) l0.conf().getLayer()).getNIn());
        assertEquals(5, ((FeedForwardLayer) l0.conf().getLayer()).getNOut());

        INDArray weights = l0.getParam(DefaultParamInitializer.WEIGHT_KEY);
        INDArray bias = l0.getParam(DefaultParamInitializer.BIAS_KEY);
        assertArrayEquals(new int[]{10, 5}, weights.shape());
        if(hasBias){
            assertArrayEquals(new int[]{1, 5}, bias.shape());
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:EmbeddingLayerTest.java

示例2: getVaeLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
    MultiLayerNetwork network =
                    new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue()));
    network.init();
    INDArray val = ((INDArray) params.value()).unsafeDuplication();
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParameters(val);

    Layer l = network.getLayer(0);
    if (!(l instanceof VariationalAutoencoder)) {
        throw new RuntimeException(
                        "Cannot use VaeReconstructionProbWithKeyFunction on network that doesn't have a VAE "
                                        + "layer as layer 0. Layer type: " + l.getClass());
    }
    return (VariationalAutoencoder) l;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:VaeReconstructionProbWithKeyFunction.java

示例3: getVaeLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
    MultiLayerNetwork network =
                    new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue()));
    network.init();
    INDArray val = ((INDArray) params.value()).unsafeDuplication();
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParameters(val);

    Layer l = network.getLayer(0);
    if (!(l instanceof VariationalAutoencoder)) {
        throw new RuntimeException(
                        "Cannot use VaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE "
                                        + "layer as layer 0. Layer type: " + l.getClass());
    }
    return (VariationalAutoencoder) l;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:VaeReconstructionErrorWithKeyFunction.java

示例4: isCompatible

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
protected boolean isCompatible (MultiLayerNetwork model,
                                Norms norms)
{
    // Check input numbers for norms
    final int normsIn = norms.means.columns();

    if (normsIn != 1) {
        logger.warn("Incompatible norms count:{} expected:{}", normsIn, 1);

        return false;
    }

    // Check input numbers for model
    final org.deeplearning4j.nn.layers.convolution.ConvolutionLayer inputLayer = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) model.getLayer(
            0);
    final org.deeplearning4j.nn.conf.layers.ConvolutionLayer confInputLayer = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) inputLayer.conf()
            .getLayer();
    final int modelIn = confInputLayer.getNIn();

    if (modelIn != 1) {
        logger.warn("Incompatible features count:{} expected:{}", modelIn, 1);

        return false;
    }

    // Check output numbers for model
    final org.deeplearning4j.nn.layers.OutputLayer outputLayer = (org.deeplearning4j.nn.layers.OutputLayer) model.getOutputLayer();
    final org.deeplearning4j.nn.conf.layers.OutputLayer confOutputLayer = (org.deeplearning4j.nn.conf.layers.OutputLayer) outputLayer.conf()
            .getLayer();
    final int modelOut = confOutputLayer.getNOut();

    if (modelOut != SHAPE_COUNT) {
        logger.warn("Incompatible shape count model:{} expected:{}", modelOut, SHAPE_COUNT);

        return false;
    }

    return true;
}
 
开发者ID:Audiveris,项目名称:audiveris,代码行数:41,代码来源:DeepClassifier.java

示例5: testDropConnectValues

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropConnectValues() {
    Nd4j.getRandom().setSeed(12345);

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .weightInit(WeightInit.ONES)
            .list()
            .layer(new OutputLayer.Builder().nIn(10).nOut(10).build())
            .build();
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    Layer l = net.getLayer(0);
    DropConnect d = new DropConnect(0.5);

    INDArray outTest = d.getParameter(l, "W", 0, 0, false);
    assertTrue(l.getParam("W") == outTest);    //Should be same object
    INDArray outTrain = d.getParameter(l, "W", 0, 0, true);
    assertNotEquals(l.getParam("W"), outTrain);

    assertEquals(l.getParam("W"), Nd4j.ones(10, 10));

    int countZeros = Nd4j.getExecutioner().exec(new MatchCondition(outTrain, Conditions.equals(0))).z().getInt(0);
    int countOnes = Nd4j.getExecutioner().exec(new MatchCondition(outTrain, Conditions.equals(1))).z().getInt(0);

    assertEquals(100, countZeros + countOnes);  //Should only be 0 or 2
    //Stochastic, but this should hold for most cases
    assertTrue(countZeros >= 25 && countZeros <= 75);
    assertTrue(countOnes >= 25 && countOnes <= 75);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:31,代码来源:TestWeightNoise.java

示例6: testCnnZeroPaddingLayer

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCnnZeroPaddingLayer() {
    Nd4j.getRandom().setSeed(12345);
    int nOut = 4;

    int[] minibatchSizes = {1, 3};
    int width = 6;
    int height = 6;
    int[] inputDepths = {1, 3};

    int[] kernel = {2, 2};
    int[] stride = {1, 1};
    int[] padding = {0, 0};

    int[][] zeroPadLayer = new int[][]{{0, 0, 0, 0}, {1, 1, 0, 0}, {2, 2, 2, 2}};

    for (int inputDepth : inputDepths) {
        for (int minibatchSize : minibatchSizes) {
            INDArray input = Nd4j.rand(new int[]{minibatchSize, inputDepth, height, width});
            INDArray labels = Nd4j.zeros(minibatchSize, nOut);
            for (int i = 0; i < minibatchSize; i++) {
                labels.putScalar(new int[]{i, i % nOut}, 1.0);
            }
            for (int[] zeroPad : zeroPadLayer) {

                MultiLayerConfiguration conf =
                        new NeuralNetConfiguration.Builder().updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                                .dist(new NormalDistribution(0, 1)).list()
                                .layer(0, new ConvolutionLayer.Builder(kernel, stride, padding)
                                        .nIn(inputDepth).nOut(3).build())//output: (6-2+0)/1+1 = 5
                                .layer(1, new ZeroPaddingLayer.Builder(zeroPad).build()).layer(2,
                                new ConvolutionLayer.Builder(kernel, stride,
                                        padding).nIn(3).nOut(3).build())//output: (6-2+0)/1+1 = 5
                                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nOut(4).build())
                                .setInputType(InputType.convolutional(height, width, inputDepth))
                                .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                //Check zero padding activation shape
                org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer zpl =
                        (org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer) net.getLayer(1);
                int[] expShape = new int[]{minibatchSize, inputDepth, height + zeroPad[0] + zeroPad[1],
                        width + zeroPad[2] + zeroPad[3]};
                INDArray out = zpl.activate(input);
                assertArrayEquals(expShape, out.shape());

                String msg = "minibatch=" + minibatchSize + ", depth=" + inputDepth + ", zeroPad = "
                        + Arrays.toString(zeroPad);

                if (PRINT_RESULTS) {
                    System.out.println(msg);
                    for (int j = 0; j < net.getnLayers(); j++)
                        System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                }

                boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                assertTrue(msg, gradOK);
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:67,代码来源:CNNGradientCheckTest.java

示例7: testYoloActivateScoreBasic

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testYoloActivateScoreBasic() {

    //Note that we expect some NaNs here - 0/0 for example in IOU calculation. This is handled explicitly in the
    //implementation
    //Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.ANY_PANIC);

    int mb = 3;
    int b = 4;
    int c = 3;
    int depth = b * (5 + c);
    int w = 6;
    int h = 6;

    INDArray bbPrior = Nd4j.rand(b, 2).muliRowVector(Nd4j.create(new double[]{w, h}));


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .list()
            .layer(new ConvolutionLayer.Builder().nIn(1).nOut(1).kernelSize(1,1).build())
            .layer(new Yolo2OutputLayer.Builder()
                    .boundingBoxPriors(bbPrior)
                    .build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer y2impl = (org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer) net.getLayer(1);

    INDArray input = Nd4j.rand(new int[]{mb, depth, h, w});

    INDArray out = y2impl.activate(input);
    assertNotNull(out);
    assertArrayEquals(input.shape(), out.shape());



    //Check score method (simple)
    int labelDepth = 4 + c;
    INDArray labels = Nd4j.zeros(mb, labelDepth, h, w);
    //put 1 object per minibatch, at positions (0,0), (1,1) etc.
    //Positions for label boxes: (1,1) to (2,2), (2,2) to (4,4) etc
    labels.putScalar(0, 4 + 0, 0, 0, 1);
    labels.putScalar(1, 4 + 1, 1, 1, 1);
    labels.putScalar(2, 4 + 2, 2, 2, 1);

    labels.putScalar(0, 0, 0, 0, 1);
    labels.putScalar(0, 1, 0, 0, 1);
    labels.putScalar(0, 2, 0, 0, 2);
    labels.putScalar(0, 3, 0, 0, 2);

    labels.putScalar(1, 0, 1, 1, 2);
    labels.putScalar(1, 1, 1, 1, 2);
    labels.putScalar(1, 2, 1, 1, 4);
    labels.putScalar(1, 3, 1, 1, 4);

    labels.putScalar(2, 0, 2, 2, 3);
    labels.putScalar(2, 1, 2, 2, 3);
    labels.putScalar(2, 2, 2, 2, 6);
    labels.putScalar(2, 3, 2, 2, 6);

    y2impl.setInput(input);
    y2impl.setLabels(labels);
    double score = y2impl.computeScore(0, 0, true);

    System.out.println("SCORE: " + score);
    assertTrue(score > 0.0);


    //Finally: test ser/de:
    MultiLayerNetwork netLoaded = TestUtils.testModelSerialization(net);

    y2impl = (org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer) netLoaded.getLayer(1);
    y2impl.setInput(input);
    y2impl.setLabels(labels);
    double score2 = y2impl.computeScore(0, 0, true);

    assertEquals(score, score2, 1e-8);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:81,代码来源:TestYolo2OutputLayer.java

示例8: testYoloActivateSanityCheck

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testYoloActivateSanityCheck(){

    int mb = 3;
    int b = 4;
    int c = 3;
    int depth = b * (5 + c);
    int w = 6;
    int h = 6;

    INDArray bbPrior = Nd4j.rand(b, 2).muliRowVector(Nd4j.create(new double[]{w, h}));


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .list()
            .layer(new ConvolutionLayer.Builder().nIn(1).nOut(1).kernelSize(1,1).build())
            .layer(new Yolo2OutputLayer.Builder()
                    .boundingBoxPriors(bbPrior)
                    .build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer y2impl = (org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer) net.getLayer(1);

    INDArray input = Nd4j.rand(new int[]{mb, depth, h, w});

    INDArray out = y2impl.activate(input);

    assertEquals(4, out.rank());


    //Check values for x/y, confidence: all should be 0 to 1
    INDArray out5 = out.reshape('c', mb, b, 5+c, h, w);

    INDArray predictedXYCenterGrid = out5.get(all(), all(), interval(0,2), all(), all());
    INDArray predictedWH = out5.get(all(), all(), interval(2,4), all(), all());   //Shape: [mb, B, 2, H, W]
    INDArray predictedConf = out5.get(all(), all(), point(4), all(), all());   //Shape: [mb, B, H, W]


    assertTrue(predictedXYCenterGrid.minNumber().doubleValue() >= 0.0);
    assertTrue(predictedXYCenterGrid.maxNumber().doubleValue() <= 1.0);
    assertTrue(predictedWH.minNumber().doubleValue() >= 0.0);
    assertTrue(predictedConf.minNumber().doubleValue() >= 0.0);
    assertTrue(predictedConf.maxNumber().doubleValue() <= 1.0);


    //Check classes:
    INDArray probs = out5.get(all(), all(), interval(5, 5+c), all(), all());   //Shape: [minibatch, C, H, W]
    assertTrue(probs.minNumber().doubleValue() >= 0.0);
    assertTrue(probs.maxNumber().doubleValue() <= 1.0);

    INDArray probsSum = probs.sum(2);
    assertEquals(1.0, probsSum.minNumber().doubleValue(), 1e-6);
    assertEquals(1.0, probsSum.maxNumber().doubleValue(), 1e-6);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:58,代码来源:TestYolo2OutputLayer.java

示例9: testConvolutionalNoBias

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testConvolutionalNoBias() throws Exception {
    int[] minibatchSizes = {1, 4};
    int width = 6;
    int height = 6;
    int inputDepth = 2;
    int nOut = 3;

    Field f = org.deeplearning4j.nn.layers.convolution.ConvolutionLayer.class.getDeclaredField("helper");
    f.setAccessible(true);

    Random r = new Random(12345);
    for (int minibatchSize : minibatchSizes) {
        for (boolean convHasBias : new boolean[]{true, false}) {

            INDArray input = Nd4j.rand(new int[]{minibatchSize, inputDepth, height, width});
            INDArray labels = Nd4j.zeros(minibatchSize, nOut);
            for (int i = 0; i < minibatchSize; i++) {
                labels.putScalar(i, r.nextInt(nOut), 1.0);
            }

            MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-1, 1))
                    .updater(new NoOp()).seed(12345L)
                    .list()
                    .layer(0, new ConvolutionLayer.Builder(2, 2).stride(2, 2).padding(1, 1).nOut(3)
                            .hasBias(convHasBias)
                            .activation(Activation.TANH).build())
                    .layer(1, new ConvolutionLayer.Builder(2, 2).stride(2, 2).padding(0, 0).nOut(3)
                            .hasBias(convHasBias)
                            .activation(Activation.TANH).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                            .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(height, width, inputDepth)).pretrain(false)
                    .backprop(true);

            MultiLayerConfiguration conf = builder.build();

            MultiLayerNetwork mln = new MultiLayerNetwork(conf);
            mln.init();

            org.deeplearning4j.nn.layers.convolution.ConvolutionLayer c0 =
                    (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) mln.getLayer(0);
            ConvolutionHelper ch0 = (ConvolutionHelper) f.get(c0);
            assertTrue(ch0 instanceof CudnnConvolutionHelper);

            org.deeplearning4j.nn.layers.convolution.ConvolutionLayer c1 =
                    (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) mln.getLayer(1);
            ConvolutionHelper ch1 = (ConvolutionHelper) f.get(c1);
            assertTrue(ch1 instanceof CudnnConvolutionHelper);


            String name = new Object() {}.getClass().getEnclosingMethod().getName() + ", minibatch = "
                    + minibatchSize + ", convHasBias = " + convHasBias;

            if (PRINT_RESULTS) {
                System.out.println(name);
                for (int j = 0; j < mln.getnLayers(); j++)
                    System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

            assertTrue(name, gradOK);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:CuDNNGradientChecks.java

示例10: testBatchNormCnn

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testBatchNormCnn() throws Exception {
    //Note: CuDNN batch norm supports 4d only, as per 5.1 (according to api reference documentation)
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 1;
    int hw = 4;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(depth).nOut(2)
                                    .activation(Activation.IDENTITY).build())
                    .layer(1, new BatchNormalization.Builder().build())
                    .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.normalization.BatchNormalization.class.getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.normalization.BatchNormalization b =
                    (org.deeplearning4j.nn.layers.normalization.BatchNormalization) mln.getLayer(1);
    BatchNormalizationHelper bn = (BatchNormalizationHelper) f.get(b);
    assertTrue(bn instanceof CudnnBatchNormalizationHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(b, null);
    //        assertNull(f.get(b));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:54,代码来源:CuDNNGradientChecks.java

示例11: testLRN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLRN() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 6;
    int hw = 5;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().nOut(6).kernelSize(2, 2).stride(1, 1)
                                    .activation(Activation.TANH).build())
                    .layer(1, new LocalResponseNormalization.Builder().build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization.class
                    .getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization l =
                    (org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization) mln.getLayer(1);
    LocalResponseNormalizationHelper lrn = (LocalResponseNormalizationHelper) f.get(l);
    assertTrue(lrn instanceof CudnnLocalResponseNormalizationHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(l, null);
    //        assertNull(f.get(l));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:54,代码来源:CuDNNGradientChecks.java

示例12: testLSTM

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTM() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 8;
    int lstmLayerSize = 7;
    int timeSeriesLength = 6;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .updater(new NoOp()).seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.recurrent.LSTM l = (org.deeplearning4j.nn.layers.recurrent.LSTM) mln.getLayer(1);
    LSTMHelper helper = (LSTMHelper) f.get(l);
    assertTrue(helper instanceof CudnnLSTMHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(l, null);
    //        assertNull(f.get(l));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:56,代码来源:CuDNNGradientChecks.java

示例13: testLSTM2

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTM2() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 3;
    int nOut = 2;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                    .updater(new NoOp()).seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    org.deeplearning4j.nn.layers.recurrent.LSTM l = (org.deeplearning4j.nn.layers.recurrent.LSTM) mln.getLayer(1);
    LSTMHelper helper = (LSTMHelper) f.get(l);
    assertTrue(helper instanceof CudnnLSTMHelper);

    //-------------------------------
    //For debugging/comparison to no-cudnn case: set helper field to null
    //        f.set(l, null);
    //        assertNull(f.get(l));
    //-------------------------------

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:56,代码来源:CuDNNGradientChecks.java

示例14: validateImplSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void validateImplSimple() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 3;
    int nOut = 2;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().inferenceWorkspaceMode(WorkspaceMode.NONE)
                    .trainingWorkspaceMode(WorkspaceMode.NONE).updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    f.set(l0, null);
    assertNull(f.get(l0));

    l0 = mln2.getLayer(0);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);


    INDArray out1 = mln1.output(input);
    INDArray out2 = mln2.output(input);

    assertEquals(out1, out2);


    mln1.setInput(input);
    mln1.setLabels(labels);

    mln2.setInput(input);
    mln2.setLabels(labels);

    mln1.computeGradientAndScore();
    mln2.computeGradientAndScore();

    assertEquals(mln1.score(), mln2.score(), 1e-8);

    Gradient g1 = mln1.gradient();
    Gradient g2 = mln2.gradient();

    for (Map.Entry<String, INDArray> entry : g1.gradientForVariable().entrySet()) {
        INDArray exp = entry.getValue();
        INDArray act = g2.gradientForVariable().get(entry.getKey());

        System.out.println(entry.getKey() + "\t" + exp.equals(act));
    }

    assertEquals(mln1.getFlattenedGradients(), mln2.getFlattenedGradients());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:78,代码来源:ValidateCudnnLSTM.java

示例15: validateImplMultiLayerTBPTT

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void validateImplMultiLayerTBPTT() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 23;
    int tbpttLength = 5;
    int nOut = 2;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .inferenceWorkspaceMode(WorkspaceMode.NONE).trainingWorkspaceMode(WorkspaceMode.NONE)
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(inputSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTLength(tbpttLength).build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    Layer l1 = mln1.getLayer(1);
    f.set(l0, null);
    f.set(l1, null);
    assertNull(f.get(l0));
    assertNull(f.get(l1));

    l0 = mln2.getLayer(0);
    l1 = mln2.getLayer(1);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);
    assertTrue(f.get(l1) instanceof CudnnLSTMHelper);

    Random r = new Random(12345);
    for (int x = 0; x < 1; x++) {
        INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
        INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
        for (int i = 0; i < minibatch; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                labels.putScalar(i, r.nextInt(nOut), j, 1.0);
            }
        }

        DataSet ds = new DataSet(input, labels);
        mln1.fit(ds);
        mln2.fit(ds);
    }


    assertEquals(mln1.params(), mln2.params());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:67,代码来源:ValidateCudnnLSTM.java


注:本文中的org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getLayer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。