当前位置: 首页>>代码示例>>Java>>正文


Java ComputationGraph.getNumLayers方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.getNumLayers方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.getNumLayers方法的具体用法?Java ComputationGraph.getNumLayers怎么用?Java ComputationGraph.getNumLayers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.graph.ComputationGraph的用法示例。


在下文中一共展示了ComputationGraph.getNumLayers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGraphEmbeddingLayerSimple

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testGraphEmbeddingLayerSimple() {
    Random r = new Random(12345);
    int nExamples = 5;
    INDArray input = Nd4j.zeros(nExamples, 1);
    INDArray labels = Nd4j.zeros(nExamples, 3);
    for (int i = 0; i < nExamples; i++) {
        input.putScalar(i, r.nextInt(4));
        labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);
    }

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L)
                    .updater(new NoOp()).graphBuilder().addInputs("in")
                    .addLayer("0", new EmbeddingLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER)
                                    .activation(Activation.TANH).build(), "in")
                    .addLayer("1", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(3).nOut(3)
                                    .activation(Activation.SOFTMAX).build(), "0")
                    .setOutputs("1").build();

    ComputationGraph cg = new ComputationGraph(conf);
    cg.init();

    if (PRINT_RESULTS) {
        System.out.println("testGraphEmbeddingLayerSimple");
        for (int j = 0; j < cg.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + cg.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(cg, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR,
                    PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input}, new INDArray[] {labels});

    String msg = "testGraphEmbeddingLayerSimple";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:36,代码来源:GradientCheckTestsComputationGraph.java

示例2: testMultipleOutputsLayer

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMultipleOutputsLayer() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("i0")
                    .addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i0")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                    .addLayer("d2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                    .addLayer("d3", new DenseLayer.Builder().nIn(2).nOut(2).build(), "d0")
                    .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(6)
                                    .nOut(2).build(), "d1", "d2", "d3")
                    .setOutputs("out").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] minibatchSizes = {1, 3};
    for (int mb : minibatchSizes) {
        INDArray input = Nd4j.rand(mb, 2);
        INDArray out = Nd4j.rand(mb, 2);


        String msg = "testMultipleOutputsLayer() - minibatchSize = " + mb;
        if (PRINT_RESULTS) {
            System.out.println(msg);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {out});

        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:GradientCheckTestsComputationGraph.java

示例3: testBasicIris

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIris() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
                    .graphBuilder().addInputs("input")
                    .addLayer("firstLayer",
                                    new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addLayer("outputLayer",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                    "firstLayer")
                    .setOutputs("outputLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    INDArray min = ds.getFeatureMatrix().min(0);
    INDArray max = ds.getFeatureMatrix().max(0);
    ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    if (PRINT_RESULTS) {
        System.out.println("testBasicIris()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testBasicIris()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:45,代码来源:GradientCheckTestsComputationGraph.java

示例4: testBasicIrisWithMerging

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithMerging() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
                    .graphBuilder().addInputs("input")
                    .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addVertex("merge", new MergeVertex(), "l1", "l2")
                    .addLayer("outputLayer",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(5 + 5).nOut(3).build(),
                                    "merge")
                    .setOutputs("outputLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (10 * 3 + 3);
    assertEquals(numParams, graph.numParams());

    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    INDArray min = ds.getFeatureMatrix().min(0);
    INDArray max = ds.getFeatureMatrix().max(0);
    ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    if (PRINT_RESULTS) {
        System.out.println("testBasicIrisWithMerging()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testBasicIrisWithMerging()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:50,代码来源:GradientCheckTestsComputationGraph.java

示例5: testBasicIrisWithElementWiseNode

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithElementWiseNode() {

    ElementWiseVertex.Op[] ops = new ElementWiseVertex.Op[] {ElementWiseVertex.Op.Add,
                    ElementWiseVertex.Op.Subtract, ElementWiseVertex.Op.Product, ElementWiseVertex.Op.Average, ElementWiseVertex.Op.Max};

    for (ElementWiseVertex.Op op : ops) {

        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder().addInputs("input")
                        .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.SIGMOID)
                                        .build(), "input")
                        .addVertex("elementwise", new ElementWiseVertex(op), "l1", "l2")
                        .addLayer("outputLayer",
                                        new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                        "elementwise")
                        .setOutputs("outputLayer").pretrain(false).backprop(true).build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (5 * 3 + 3);
        assertEquals(numParams, graph.numParams());

        Nd4j.getRandom().setSeed(12345);
        int nParams = graph.numParams();
        INDArray newParams = Nd4j.rand(1, nParams);
        graph.setParams(newParams);

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        INDArray min = ds.getFeatureMatrix().min(0);
        INDArray max = ds.getFeatureMatrix().max(0);
        ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
        INDArray input = ds.getFeatureMatrix();
        INDArray labels = ds.getLabels();

        if (PRINT_RESULTS) {
            System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")");
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {labels});

        String msg = "testBasicIrisWithElementWiseVertex(op=" + op + ")";
        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:57,代码来源:GradientCheckTestsComputationGraph.java

示例6: testBasicIrisWithElementWiseNodeInputSizeGreaterThanTwo

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithElementWiseNodeInputSizeGreaterThanTwo() {

    ElementWiseVertex.Op[] ops =
                    new ElementWiseVertex.Op[] {ElementWiseVertex.Op.Add, ElementWiseVertex.Op.Product, ElementWiseVertex.Op.Average, ElementWiseVertex.Op.Max};

    for (ElementWiseVertex.Op op : ops) {

        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder().addInputs("input")
                        .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.SIGMOID)
                                        .build(), "input")
                        .addLayer("l3", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.RELU).build(),
                                        "input")
                        .addVertex("elementwise", new ElementWiseVertex(op), "l1", "l2", "l3")
                        .addLayer("outputLayer",
                                        new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                        "elementwise")
                        .setOutputs("outputLayer").pretrain(false).backprop(true).build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (4 * 5 + 5) + (5 * 3 + 3);
        assertEquals(numParams, graph.numParams());

        Nd4j.getRandom().setSeed(12345);
        int nParams = graph.numParams();
        INDArray newParams = Nd4j.rand(1, nParams);
        graph.setParams(newParams);

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        INDArray min = ds.getFeatureMatrix().min(0);
        INDArray max = ds.getFeatureMatrix().max(0);
        ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
        INDArray input = ds.getFeatureMatrix();
        INDArray labels = ds.getLabels();

        if (PRINT_RESULTS) {
            System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")");
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {labels});

        String msg = "testBasicIrisWithElementWiseVertex(op=" + op + ")";
        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:59,代码来源:GradientCheckTestsComputationGraph.java

示例7: testCnnDepthMerge

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testCnnDepthMerge() {

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.1))
                    .updater(new NoOp()).graphBuilder().addInputs("input")
                    .addLayer("l1", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(2).nOut(2).activation(Activation.TANH).build(), "input")
                    .addLayer("l2", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(2).nOut(2).activation(Activation.TANH).build(), "input")
                    .addVertex("merge", new MergeVertex(), "l1", "l2")
                    .addLayer("outputLayer",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(5 * 5 * (2 + 2)).nOut(3)
                                                    .build(),
                                    "merge")
                    .setOutputs("outputLayer")
                    .inputPreProcessor("outputLayer", new CnnToFeedForwardPreProcessor(5, 5, 4)).pretrain(false)
                    .backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Random r = new Random(12345);
    INDArray input = Nd4j.rand(new int[] {5, 2, 6, 6}); //Order: examples, channels, height, width
    INDArray labels = Nd4j.zeros(5, 3);
    for (int i = 0; i < 5; i++)
        labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);

    if (PRINT_RESULTS) {
        System.out.println("testCnnDepthMerge()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testCnnDepthMerge()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:45,代码来源:GradientCheckTestsComputationGraph.java

示例8: testLSTMWithMerging

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLSTMWithMerging() {

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf =
                    new NeuralNetConfiguration.Builder().seed(12345)
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0.2, 0.6))
                                    .updater(new NoOp()).graphBuilder().addInputs("input")
                                    .setOutputs("out")
                                    .addLayer("lstm1",
                                                    new GravesLSTM.Builder().nIn(3).nOut(4)
                                                                    .activation(Activation.TANH).build(),
                                                    "input")
                                    .addLayer("lstm2",
                                                    new GravesLSTM.Builder().nIn(4).nOut(4)
                                                                    .activation(Activation.TANH).build(),
                                                    "lstm1")
                                    .addLayer("dense1",
                                                    new DenseLayer.Builder().nIn(4).nOut(4)
                                                                    .activation(Activation.SIGMOID).build(),
                                                    "lstm1")
                                    .addLayer("lstm3",
                                                    new GravesLSTM.Builder().nIn(4).nOut(4)
                                                                    .activation(Activation.TANH).build(),
                                                    "dense1")
                                    .addVertex("merge", new MergeVertex(), "lstm2", "lstm3")
                                    .addLayer("out", new RnnOutputLayer.Builder().nIn(8).nOut(3)
                                                    .activation(Activation.SOFTMAX)
                                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build(),
                                                    "merge")
                                    .inputPreProcessor("dense1", new RnnToFeedForwardPreProcessor())
                                    .inputPreProcessor("lstm3", new FeedForwardToRnnPreProcessor()).pretrain(false)
                                    .backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Random r = new Random(12345);
    INDArray input = Nd4j.rand(new int[] {3, 3, 5});
    INDArray labels = Nd4j.zeros(3, 3, 5);
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 5; j++) {
            labels.putScalar(new int[] {i, r.nextInt(3), j}, 1.0);
        }
    }

    if (PRINT_RESULTS) {
        System.out.println("testLSTMWithMerging()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testLSTMWithMerging()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:61,代码来源:GradientCheckTestsComputationGraph.java

示例9: testLSTMWithLastTimeStepVertex

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLSTMWithLastTimeStepVertex() {

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).graphBuilder().addInputs("input").setOutputs("out")
                    .addLayer("lstm1", new GravesLSTM.Builder().nIn(3).nOut(4).activation(Activation.TANH).build(),
                                    "input")
                    .addVertex("lastTS", new LastTimeStepVertex("input"), "lstm1")
                    .addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "lastTS")
                    .pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Random r = new Random(12345);
    INDArray input = Nd4j.rand(new int[] {3, 3, 5});
    INDArray labels = Nd4j.zeros(3, 3); //Here: labels are 2d (due to LastTimeStepVertex)
    for (int i = 0; i < 3; i++) {
        labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);
    }

    if (PRINT_RESULTS) {
        System.out.println("testLSTMWithLastTimeStepVertex()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    //First: test with no input mask array
    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testLSTMWithLastTimeStepVertex()";
    assertTrue(msg, gradOK);

    //Second: test with input mask arrays.
    INDArray inMask = Nd4j.zeros(3, 5);
    inMask.putRow(0, Nd4j.create(new double[] {1, 1, 1, 0, 0}));
    inMask.putRow(1, Nd4j.create(new double[] {1, 1, 1, 1, 0}));
    inMask.putRow(2, Nd4j.create(new double[] {1, 1, 1, 1, 1}));
    graph.setLayerMaskArrays(new INDArray[] {inMask}, null);
    gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR,
                    PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input}, new INDArray[] {labels});

    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:51,代码来源:GradientCheckTestsComputationGraph.java

示例10: testL2NormalizeVertex2d

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testL2NormalizeVertex2d() {
    Nd4j.getRandom().setSeed(12345);

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1").addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(3).build(), "in1")
                    .addVertex("norm", new L2NormalizeVertex(), "d1")
                    .addLayer("out1",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(3)
                                                    .nOut(2).activation(Activation.IDENTITY).build(),
                                    "norm")
                    .setOutputs("out1").build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(minibatch, 2);

        INDArray labels1 = Nd4j.rand(minibatch, 2);

        String testName = "testL2NormalizeVertex2d() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1},
                        new INDArray[] {labels1});

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:42,代码来源:GradientCheckTestsComputationGraph.java

示例11: testLSTMWithReverseTimeSeriesVertex

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLSTMWithReverseTimeSeriesVertex() {

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf =
            new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).graphBuilder()
                    .addInputs("input").setOutputs("out")
                    .addLayer("lstm_a",
                            new GravesLSTM.Builder().nIn(3).nOut(4)
                                    .activation(Activation.TANH).build(),
                            "input")
                    .addVertex("input_rev", new ReverseTimeSeriesVertex("input"), "input")
                    .addLayer("lstm_b",
                            new GravesLSTM.Builder().nIn(3).nOut(4)
                                    .activation(Activation.TANH).build(),
                            "input_rev")
                    .addVertex("lstm_b_rev", new ReverseTimeSeriesVertex("input"), "lstm_b")
                    .addLayer("out", new RnnOutputLayer.Builder().nIn(4 + 4).nOut(3)
                                    .activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build(),
                            "lstm_a", "lstm_b_rev")
                    .pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Random r = new Random(12345);
    INDArray input  = Nd4j.rand(new int[] {3, 3, 5});
    INDArray labels = Nd4j.zeros(3, 3, 5);
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 5; j++) {
            labels.putScalar(new int[] {i, r.nextInt(3), j}, 1.0);
        }
    }

    if (PRINT_RESULTS) {
        System.out.println("testLSTMWithReverseTimeSeriesVertex()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
            new INDArray[] {labels});

    String msg = "testLSTMWithDuplicateToTimeSeries()";
    assertTrue(msg, gradOK);

    //Second: test with input mask arrays.
    INDArray inMask = Nd4j.zeros(3, 5);
    inMask.putRow(0, Nd4j.create(new double[] {1, 1, 1, 0, 0}));
    inMask.putRow(1, Nd4j.create(new double[] {1, 1, 0, 1, 0}));
    inMask.putRow(2, Nd4j.create(new double[] {1, 1, 1, 1, 1}));
    graph.setLayerMaskArrays(new INDArray[] {inMask}, null);
    gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR,
            PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input}, new INDArray[] {labels});

    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:63,代码来源:GradientCheckTestsComputationGraph.java

示例12: testMultipleInputsLayer

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMultipleInputsLayer() {

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("i0", "i1", "i2")
                    .addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i0")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i1")
                    .addLayer("d2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i2")
                    .addLayer("d3", new DenseLayer.Builder().nIn(6).nOut(2).build(), "d0", "d1", "d2")
                    .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(2)
                                    .nOut(2).build(), "d3")
                    .setOutputs("out").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] minibatchSizes = {1, 3};
    for (int mb : minibatchSizes) {
        INDArray[] inputs = new INDArray[3];
        for (int i = 0; i < 3; i++) {
            inputs[i] = Nd4j.rand(mb, 2);
        }
        INDArray out = Nd4j.rand(mb, 2);


        String msg = "testMultipleInputsLayer() - minibatchSize = " + mb;
        if (PRINT_RESULTS) {
            System.out.println(msg);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, inputs,
                        new INDArray[] {out});

        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:43,代码来源:GradientCheckTestsComputationGraph.java

示例13: testMultipleOutputsMergeVertex

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMultipleOutputsMergeVertex() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("i0", "i1", "i2")
                    .addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i0")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i1")
                    .addLayer("d2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i2")
                    .addVertex("m", new MergeVertex(), "d0", "d1", "d2")
                    .addLayer("D0", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                    .addLayer("D1", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                    .addLayer("D2", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                    .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(6)
                                    .nOut(2).build(), "D0", "D1", "D2")
                    .setOutputs("out").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] minibatchSizes = {1, 3};
    for (int mb : minibatchSizes) {
        INDArray[] input = new INDArray[3];
        for (int i = 0; i < 3; i++) {
            input[i] = Nd4j.rand(mb, 2);
        }
        INDArray out = Nd4j.rand(mb, 2);


        String msg = "testMultipleOutputsMergeVertex() - minibatchSize = " + mb;
        if (PRINT_RESULTS) {
            System.out.println(msg);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, new INDArray[] {out});

        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:44,代码来源:GradientCheckTestsComputationGraph.java

示例14: testMultipleOutputsMergeCnn

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMultipleOutputsMergeCnn() {
    int inH = 7;
    int inW = 7;

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("input")
                    .addLayer("l0", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(2).nOut(2).activation(Activation.TANH).build(), "input")
                    .addLayer("l1", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(2).nOut(2).activation(Activation.TANH).build(), "l0")
                    .addLayer("l2", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(2).nOut(2).activation(Activation.TANH).build(), "l0")
                    .addVertex("m", new MergeVertex(), "l1", "l2")
                    .addLayer("l3", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(4).nOut(2).activation(Activation.TANH).build(), "m")
                    .addLayer("l4", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0)
                                    .nIn(4).nOut(2).activation(Activation.TANH).build(), "m")
                    .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                            .activation(Activation.IDENTITY).nOut(2)
                                    .build(), "l3", "l4")
                    .setOutputs("out").setInputTypes(InputType.convolutional(inH, inW, 2)).pretrain(false)
                    .backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] minibatchSizes = {1, 3};
    for (int mb : minibatchSizes) {
        INDArray input = Nd4j.rand(new int[] {mb, 2, inH, inW}).muli(4); //Order: examples, channels, height, width
        INDArray out = Nd4j.rand(mb, 2);

        String msg = "testMultipleOutputsMergeVertex() - minibatchSize = " + mb;
        if (PRINT_RESULTS) {
            System.out.println(msg);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {out});

        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:50,代码来源:GradientCheckTestsComputationGraph.java

示例15: testL2NormalizeVertex4d

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testL2NormalizeVertex4d() {
    Nd4j.getRandom().setSeed(12345);

    int h = 4;
    int w = 4;
    int dIn = 2;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1")
                    .addLayer("d1", new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nOut(2).build(),
                                    "in1")
                    .addVertex("norm", new L2NormalizeVertex(), "d1")
                    .addLayer("out1",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nOut(2)
                                                    .activation(Activation.IDENTITY).build(),
                                    "norm")
                    .setOutputs("out1").setInputTypes(InputType.convolutional(h, w, dIn)).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(new int[] {minibatch, dIn, h, w});

        INDArray labels1 = Nd4j.rand(minibatch, 2);

        String testName = "testL2NormalizeVertex4d() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1},
                        new INDArray[] {labels1});

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:48,代码来源:GradientCheckTestsComputationGraph.java


注:本文中的org.deeplearning4j.nn.graph.ComputationGraph.getNumLayers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。