当前位置: 首页>>代码示例>>Java>>正文


Java ComputationGraph.numParams方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.numParams方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.numParams方法的具体用法?Java ComputationGraph.numParams怎么用?Java ComputationGraph.numParams使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.graph.ComputationGraph的用法示例。


在下文中一共展示了ComputationGraph.numParams方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: call

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public Iterable<T[]> call(Iterator<MultiDataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.emptyList();
    }

    INDArray val = params.value().unsafeDuplication();
    ComputationGraph graph = new ComputationGraph(ComputationGraphConfiguration.fromJson(json.getValue()));
    graph.init();
    if (val.length() != graph.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    graph.setParams(val);

    T[] eval = graph.doEvaluation(
                    new SparkAMDSI(new IteratorMultiDataSetIterator(dataSetIterator, evalBatchSize), 2, true),
                    evaluations);
    return Collections.singletonList(eval);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:IEvaluateMDSFlatMapFunction.java

示例2: getVaeLayer

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
    ComputationGraph network =
                    new ComputationGraph(ComputationGraphConfiguration.fromJson((String) jsonConfig.getValue()));
    network.init();
    INDArray val = ((INDArray) params.value()).unsafeDuplication();
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcasted set parameters");
    network.setParams(val);

    Layer l = network.getLayer(0);
    if (!(l instanceof VariationalAutoencoder)) {
        throw new RuntimeException(
                        "Cannot use CGVaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE "
                                        + "layer as layer 0. Layer type: " + l.getClass());
    }
    return (VariationalAutoencoder) l;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:CGVaeReconstructionErrorWithKeyFunction.java

示例3: getVaeLayer

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
    ComputationGraph network =
                    new ComputationGraph(ComputationGraphConfiguration.fromJson((String) jsonConfig.getValue()));
    network.init();
    INDArray val = ((INDArray) params.value()).unsafeDuplication();
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcasted set parameters");
    network.setParams(val);

    Layer l = network.getLayer(0);
    if (!(l instanceof VariationalAutoencoder)) {
        throw new RuntimeException(
                        "Cannot use CGVaeReconstructionProbWithKeyFunction on network that doesn't have a VAE "
                                        + "layer as layer 0. Layer type: " + l.getClass());
    }
    return (VariationalAutoencoder) l;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:CGVaeReconstructionProbWithKeyFunction.java

示例4: call

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<MultiDataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.singletonList(new Tuple2<>(0, 0.0));
    }

    MultiDataSetIterator iter = new IteratorMultiDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate


    ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json));
    network.init();
    INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParams(val);

    List<Tuple2<Integer, Double>> out = new ArrayList<>();
    while (iter.hasNext()) {
        MultiDataSet ds = iter.next();
        double score = network.score(ds, false);
        int numExamples = ds.getFeatures(0).size(0);
        out.add(new Tuple2<>(numExamples, score * numExamples));
    }

    Nd4j.getExecutioner().commit();

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:30,代码来源:ScoreFlatMapFunctionCGMultiDataSet.java

示例5: call

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<DataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.singletonList(new Tuple2<>(0, 0.0));
    }

    DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate

    ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json));
    network.init();
    INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParams(val);

    List<Tuple2<Integer, Double>> out = new ArrayList<>();
    while (iter.hasNext()) {
        DataSet ds = iter.next();
        double score = network.score(ds, false);
        int numExamples = ds.getFeatureMatrix().size(0);
        out.add(new Tuple2<>(numExamples, score * numExamples));
    }

    Nd4j.getExecutioner().commit();

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:29,代码来源:ScoreFlatMapFunctionCGDataSet.java

示例6: testBasicIris

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIris() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
                    .graphBuilder().addInputs("input")
                    .addLayer("firstLayer",
                                    new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addLayer("outputLayer",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                    "firstLayer")
                    .setOutputs("outputLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    INDArray min = ds.getFeatureMatrix().min(0);
    INDArray max = ds.getFeatureMatrix().max(0);
    ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    if (PRINT_RESULTS) {
        System.out.println("testBasicIris()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testBasicIris()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:45,代码来源:GradientCheckTestsComputationGraph.java

示例7: testBasicIrisWithMerging

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithMerging() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
                    .graphBuilder().addInputs("input")
                    .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                    "input")
                    .addVertex("merge", new MergeVertex(), "l1", "l2")
                    .addLayer("outputLayer",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(5 + 5).nOut(3).build(),
                                    "merge")
                    .setOutputs("outputLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (10 * 3 + 3);
    assertEquals(numParams, graph.numParams());

    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    INDArray min = ds.getFeatureMatrix().min(0);
    INDArray max = ds.getFeatureMatrix().max(0);
    ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    if (PRINT_RESULTS) {
        System.out.println("testBasicIrisWithMerging()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                    new INDArray[] {labels});

    String msg = "testBasicIrisWithMerging()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:50,代码来源:GradientCheckTestsComputationGraph.java

示例8: testBasicIrisWithElementWiseNode

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithElementWiseNode() {

    ElementWiseVertex.Op[] ops = new ElementWiseVertex.Op[] {ElementWiseVertex.Op.Add,
                    ElementWiseVertex.Op.Subtract, ElementWiseVertex.Op.Product, ElementWiseVertex.Op.Average, ElementWiseVertex.Op.Max};

    for (ElementWiseVertex.Op op : ops) {

        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder().addInputs("input")
                        .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.SIGMOID)
                                        .build(), "input")
                        .addVertex("elementwise", new ElementWiseVertex(op), "l1", "l2")
                        .addLayer("outputLayer",
                                        new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                        "elementwise")
                        .setOutputs("outputLayer").pretrain(false).backprop(true).build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (5 * 3 + 3);
        assertEquals(numParams, graph.numParams());

        Nd4j.getRandom().setSeed(12345);
        int nParams = graph.numParams();
        INDArray newParams = Nd4j.rand(1, nParams);
        graph.setParams(newParams);

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        INDArray min = ds.getFeatureMatrix().min(0);
        INDArray max = ds.getFeatureMatrix().max(0);
        ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
        INDArray input = ds.getFeatureMatrix();
        INDArray labels = ds.getLabels();

        if (PRINT_RESULTS) {
            System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")");
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {labels});

        String msg = "testBasicIrisWithElementWiseVertex(op=" + op + ")";
        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:57,代码来源:GradientCheckTestsComputationGraph.java

示例9: testBasicIrisWithElementWiseNodeInputSizeGreaterThanTwo

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisWithElementWiseNodeInputSizeGreaterThanTwo() {

    ElementWiseVertex.Op[] ops =
                    new ElementWiseVertex.Op[] {ElementWiseVertex.Op.Add, ElementWiseVertex.Op.Product, ElementWiseVertex.Op.Average, ElementWiseVertex.Op.Max};

    for (ElementWiseVertex.Op op : ops) {

        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).graphBuilder().addInputs("input")
                        .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
                                        "input")
                        .addLayer("l2", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.SIGMOID)
                                        .build(), "input")
                        .addLayer("l3", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.RELU).build(),
                                        "input")
                        .addVertex("elementwise", new ElementWiseVertex(op), "l1", "l2", "l3")
                        .addLayer("outputLayer",
                                        new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                                        .activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
                                        "elementwise")
                        .setOutputs("outputLayer").pretrain(false).backprop(true).build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int numParams = (4 * 5 + 5) + (4 * 5 + 5) + (4 * 5 + 5) + (5 * 3 + 3);
        assertEquals(numParams, graph.numParams());

        Nd4j.getRandom().setSeed(12345);
        int nParams = graph.numParams();
        INDArray newParams = Nd4j.rand(1, nParams);
        graph.setParams(newParams);

        DataSet ds = new IrisDataSetIterator(150, 150).next();
        INDArray min = ds.getFeatureMatrix().min(0);
        INDArray max = ds.getFeatureMatrix().max(0);
        ds.getFeatureMatrix().subiRowVector(min).diviRowVector(max.sub(min));
        INDArray input = ds.getFeatureMatrix();
        INDArray labels = ds.getLabels();

        if (PRINT_RESULTS) {
            System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")");
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {input},
                        new INDArray[] {labels});

        String msg = "testBasicIrisWithElementWiseVertex(op=" + op + ")";
        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:59,代码来源:GradientCheckTestsComputationGraph.java

示例10: testBasicIrisTripletStackingL2Loss

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicIrisTripletStackingL2Loss() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf =
                    new NeuralNetConfiguration.Builder().seed(12345)
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                                    .updater(new NoOp()).graphBuilder()
                                    .addInputs("input1", "input2", "input3")
                                    .addVertex("stack1", new StackVertex(), "input1", "input2", "input3")
                                    .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5)
                                                    .activation(Activation.TANH).build(), "stack1")
                                    .addVertex("unstack0", new UnstackVertex(0, 3), "l1")
                                    .addVertex("unstack1", new UnstackVertex(1, 3), "l1")
                                    .addVertex("unstack2", new UnstackVertex(2, 3), "l1")
                                    .addVertex("l2-1", new L2Vertex(), "unstack1", "unstack0") // x - x-
                                    .addVertex("l2-2", new L2Vertex(), "unstack1", "unstack2") // x - x+
                                    .addLayer("lossLayer",
                                                    new LossLayer.Builder()
                                                                    .lossFunction(LossFunctions.LossFunction.MCXENT)
                                                                    .activation(Activation.SOFTMAX).build(),
                                                    "l2-1", "l2-2")
                                    .setOutputs("lossLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    int numParams = (4 * 5 + 5);
    assertEquals(numParams, graph.numParams());

    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    INDArray pos = Nd4j.rand(150, 4);
    INDArray anc = Nd4j.rand(150, 4);
    INDArray neg = Nd4j.rand(150, 4);

    INDArray labels = Nd4j.zeros(150, 2);
    Random r = new Random(12345);
    for (int i = 0; i < 150; i++) {
        labels.putScalar(i, r.nextInt(2), 1.0);
    }


    Map<String, INDArray> out = graph.feedForward(new INDArray[] {pos, anc, neg}, true);

    for (String s : out.keySet()) {
        System.out.println(s + "\t" + Arrays.toString(out.get(s).shape()));
    }

    if (PRINT_RESULTS) {
        System.out.println("testBasicIrisTripletStackingL2Loss()");
        for (int j = 0; j < graph.getNumLayers(); j++)
            System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {pos, anc, neg},
                    new INDArray[] {labels});

    String msg = "testBasicIrisTripletStackingL2Loss()";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:66,代码来源:GradientCheckTestsComputationGraph.java

示例11: testBasicL2

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicL2() {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1", "in2").addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in1")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in2")
                    .addVertex("l2", new L2Vertex(), "d0", "d1")
                    .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(1)
                                    .nOut(1).activation(Activation.IDENTITY).build(), "l2")
                    .setOutputs("out").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();


    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(minibatch, 2);
        INDArray in2 = Nd4j.rand(minibatch, 2);

        INDArray labels = Nd4j.rand(minibatch, 1);

        String testName = "testBasicL2() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1, in2},
                        new INDArray[] {labels});

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:47,代码来源:GradientCheckTestsComputationGraph.java

示例12: testBasicStackUnstack

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicStackUnstack() {

    int layerSizes = 2;

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1", "in2")
                    .addLayer("d0", new DenseLayer.Builder().nIn(layerSizes).nOut(layerSizes).build(), "in1")
                    .addLayer("d1", new DenseLayer.Builder().nIn(layerSizes).nOut(layerSizes).build(), "in2")
                    .addVertex("stack", new StackVertex(), "d0", "d1")
                    .addLayer("d2", new DenseLayer.Builder().nIn(layerSizes).nOut(layerSizes).build(), "stack")
                    .addVertex("u1", new UnstackVertex(0, 2), "d2").addVertex("u2", new UnstackVertex(1, 2), "d2")
                    .addLayer("out1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2)
                                    .nIn(layerSizes).nOut(layerSizes).activation(Activation.IDENTITY).build(), "u1")
                    .addLayer("out2", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2)
                                    .nIn(layerSizes).nOut(2).activation(Activation.IDENTITY).build(), "u2")
                    .setOutputs("out1", "out2").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();


    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(minibatch, layerSizes);
        INDArray in2 = Nd4j.rand(minibatch, layerSizes);

        INDArray labels1 = Nd4j.rand(minibatch, 2);
        INDArray labels2 = Nd4j.rand(minibatch, 2);

        String testName = "testBasicStackUnstack() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1, in2},
                        new INDArray[] {labels1, labels2});

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:56,代码来源:GradientCheckTestsComputationGraph.java

示例13: testBasicStackUnstackDebug

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicStackUnstackDebug() {
    Nd4j.getRandom().setSeed(12345);

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1", "in2").addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in1")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in2")
                    .addVertex("stack", new StackVertex(), "d0", "d1")
                    .addVertex("u0", new UnstackVertex(0, 2), "stack")
                    .addVertex("u1", new UnstackVertex(1, 2), "stack")
                    .addLayer("out1",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(2)
                                                    .nOut(2).activation(Activation.IDENTITY).build(),
                                    "u0")
                    .addLayer("out2",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(2)
                                                    .nOut(2).activation(Activation.IDENTITY).build(),
                                    "u1")
                    .setOutputs("out1", "out2").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();


    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(minibatch, 2);
        INDArray in2 = Nd4j.rand(minibatch, 2);

        INDArray labels1 = Nd4j.rand(minibatch, 2);
        INDArray labels2 = Nd4j.rand(minibatch, 2);

        String testName = "testBasicStackUnstack() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1, in2},
                        new INDArray[] {labels1, labels2});

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:57,代码来源:GradientCheckTestsComputationGraph.java

示例14: testBasicStackUnstackVariableLengthTS

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicStackUnstackVariableLengthTS() {

    int layerSizes = 2;

    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1", "in2")
                    .addLayer("d0", new GravesLSTM.Builder().nIn(layerSizes).nOut(layerSizes).build(), "in1")
                    .addLayer("d1", new GravesLSTM.Builder().nIn(layerSizes).nOut(layerSizes).build(), "in2")
                    .addVertex("stack", new StackVertex(), "d0", "d1")
                    .addLayer("d2", new GravesLSTM.Builder().nIn(layerSizes).nOut(layerSizes).build(), "stack")
                    .addVertex("u1", new UnstackVertex(0, 2), "d2").addVertex("u2", new UnstackVertex(1, 2), "d2")
                    .addLayer("p1", new GlobalPoolingLayer.Builder(PoolingType.AVG).build(), "u1")
                    .addLayer("p2", new GlobalPoolingLayer.Builder(PoolingType.AVG).build(), "u2")
                    .addLayer("out1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2)
                                    .nIn(layerSizes).nOut(layerSizes).activation(Activation.IDENTITY).build(), "p1")
                    .addLayer("out2", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2)
                                    .nIn(layerSizes).nOut(2).activation(Activation.IDENTITY).build(), "p2")
                    .setOutputs("out1", "out2").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();


    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(new int[] {minibatch, layerSizes, 4});
        INDArray in2 = Nd4j.rand(new int[] {minibatch, layerSizes, 5});
        INDArray inMask1 = Nd4j.zeros(minibatch, 4);
        inMask1.get(NDArrayIndex.all(), NDArrayIndex.interval(0, 3)).assign(1);
        INDArray inMask2 = Nd4j.zeros(minibatch, 5);
        inMask2.get(NDArrayIndex.all(), NDArrayIndex.interval(0, 4)).assign(1);

        INDArray labels1 = Nd4j.rand(new int[] {minibatch, 2});
        INDArray labels2 = Nd4j.rand(new int[] {minibatch, 2});

        String testName = "testBasicStackUnstackVariableLengthTS() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        graph.setLayerMaskArrays(new INDArray[] {inMask1, inMask2}, null);

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1, in2},
                        new INDArray[] {labels1, labels2}, new INDArray[] {inMask1, inMask2}, null);

        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:64,代码来源:GradientCheckTestsComputationGraph.java

示例15: testBasicTwoOutputs

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBasicTwoOutputs() {
    Nd4j.getRandom().setSeed(12345);

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                    .activation(Activation.TANH).updater(new NoOp()).graphBuilder()
                    .addInputs("in1", "in2").addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in1")
                    .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "in2")
                    .addLayer("out1",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(2)
                                                    .nOut(2).activation(Activation.IDENTITY).build(),
                                    "d0")
                    .addLayer("out2",
                                    new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.L2).nIn(2)
                                                    .nOut(2).activation(Activation.IDENTITY).build(),
                                    "d1")
                    .setOutputs("out1", "out2").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    System.out.println("Num layers: " + graph.getNumLayers());
    System.out.println("Num params: " + graph.numParams());


    Nd4j.getRandom().setSeed(12345);
    int nParams = graph.numParams();
    INDArray newParams = Nd4j.rand(1, nParams);
    graph.setParams(newParams);

    int[] mbSizes = new int[] {1, 3, 10};
    for (int minibatch : mbSizes) {

        INDArray in1 = Nd4j.rand(minibatch, 2);
        INDArray in2 = Nd4j.rand(minibatch, 2);
        INDArray labels1 = Nd4j.rand(minibatch, 2);
        INDArray labels2 = Nd4j.rand(minibatch, 2);

        String testName = "testBasicStackUnstack() - minibatch = " + minibatch;

        if (PRINT_RESULTS) {
            System.out.println(testName);
            for (int j = 0; j < graph.getNumLayers(); j++)
                System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(graph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[] {in1, in2},
                        new INDArray[] {labels1, labels2});
        assertTrue(testName, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:55,代码来源:GradientCheckTestsComputationGraph.java


注:本文中的org.deeplearning4j.nn.graph.ComputationGraph.numParams方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。