当前位置: 首页>>代码示例>>Java>>正文


Java ComputationGraph.outputSingle方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.outputSingle方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.outputSingle方法的具体用法?Java ComputationGraph.outputSingle怎么用?Java ComputationGraph.outputSingle使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.graph.ComputationGraph的用法示例。


在下文中一共展示了ComputationGraph.outputSingle方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
public static void main (String[] args) throws IOException {
    log.info("download and extract data...");
    CNNSentenceClassification.aclImdbDownloader(DATA_URL, DATA_PATH);

    // basic configuration
    int batchSize = 32;
    int vectorSize = 300;               //Size of the word vectors. 300 in the Google News model
    int nEpochs = 1;                    //Number of epochs (full passes of training data) to train on
    int truncateReviewsToLength = 256;  //Truncate reviews with length (# words) greater than this
    int cnnLayerFeatureMaps = 100;      //Number of feature maps / channels / depth for each CNN layer
    PoolingType globalPoolingType = PoolingType.MAX;
    Random rng = new Random(12345); //For shuffling repeatability

    log.info("construct cnn model...");
    ComputationGraph net = CNNSentenceClassification.buildCNNGraph(vectorSize, cnnLayerFeatureMaps, globalPoolingType);
    log.info("number of parameters by layer:");
    for (Layer l : net.getLayers()) {
        log.info("\t" + l.conf().getLayer().getLayerName() + "\t" + l.numParams());
    }

    // Load word vectors and get the DataSetIterators for training and testing
    log.info("loading word vectors and creating DataSetIterators...");
    WordVectors wordVectors = WordVectorSerializer.loadStaticModel(new File(WORD_VECTORS_PATH));
    DataSetIterator trainIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, true, wordVectors, batchSize,
            truncateReviewsToLength, rng);
    DataSetIterator testIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, false, wordVectors, batchSize,
            truncateReviewsToLength, rng);

    log.info("starting training...");
    for (int i = 0; i < nEpochs; i++) {
        net.fit(trainIter);
        log.info("Epoch " + i + " complete. Starting evaluation:");
        //Run evaluation. This is on 25k reviews, so can take some time
        Evaluation evaluation = net.evaluate(testIter);
        log.info(evaluation.stats());
    }

    // after training: load a single sentence and generate a prediction
    String pathFirstNegativeFile = FilenameUtils.concat(DATA_PATH, "aclImdb/test/neg/0_2.txt");
    String contentsFirstNegative = FileUtils.readFileToString(new File(pathFirstNegativeFile));
    INDArray featuresFirstNegative = ((CnnSentenceDataSetIterator)testIter).loadSingleSentence(contentsFirstNegative);
    INDArray predictionsFirstNegative = net.outputSingle(featuresFirstNegative);
    List<String> labels = testIter.getLabels();
    log.info("\n\nPredictions for first negative review:");
    for( int i=0; i<labels.size(); i++ ){
        log.info("P(" + labels.get(i) + ") = " + predictionsFirstNegative.getDouble(i));
    }
}
 
开发者ID:IsaacChanghau,项目名称:Word2VecfJava,代码行数:49,代码来源:DL4JCNNSentClassifyExample.java

示例2: testModelSerializerFrozenLayersCompGraph

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testModelSerializerFrozenLayersCompGraph() throws Exception {
    FineTuneConfiguration finetune = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build();

    int nIn = 6;
    int nOut = 3;

    ComputationGraphConfiguration origConf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).graphBuilder().addInputs("in")
                    .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(5).build(), "in")
                    .addLayer("1", new DenseLayer.Builder().nIn(5).nOut(4).build(), "0")
                    .addLayer("2", new DenseLayer.Builder().nIn(4).nOut(3).build(), "1")
                    .addLayer("3", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3)
                                                    .nOut(nOut).build(),
                                    "2")
                    .setOutputs("3").build();
    ComputationGraph origModel = new ComputationGraph(origConf);
    origModel.init();

    ComputationGraph withFrozen = new TransferLearning.GraphBuilder(origModel).fineTuneConfiguration(finetune)
                    .setFeatureExtractor("1").build();

    assertTrue(withFrozen.getLayer(0) instanceof FrozenLayer);
    assertTrue(withFrozen.getLayer(1) instanceof FrozenLayer);

    Map<String, GraphVertex> m = withFrozen.getConfiguration().getVertices();
    Layer l0 = ((LayerVertex) m.get("0")).getLayerConf().getLayer();
    Layer l1 = ((LayerVertex) m.get("1")).getLayerConf().getLayer();
    assertTrue(l0 instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer);
    assertTrue(l1 instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer);

    ComputationGraph restored = TestUtils.testModelSerialization(withFrozen);

    assertTrue(restored.getLayer(0) instanceof FrozenLayer);
    assertTrue(restored.getLayer(1) instanceof FrozenLayer);
    assertFalse(restored.getLayer(2) instanceof FrozenLayer);
    assertFalse(restored.getLayer(3) instanceof FrozenLayer);

    INDArray in = Nd4j.rand(3, nIn);
    INDArray out = withFrozen.outputSingle(in);
    INDArray out2 = restored.outputSingle(in);

    assertEquals(out, out2);

    //Sanity check on train mode:
    out = withFrozen.outputSingle(true, in);
    out2 = restored.outputSingle(true, in);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:49,代码来源:TestTransferLearningModelSerializer.java

示例3: testMlnToCompGraph

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMlnToCompGraph() {
    Nd4j.getRandom().setSeed(12345);

    for( int i=0; i<3; i++ ){
        MultiLayerNetwork n;
        switch (i){
            case 0:
                n = getNet1(false);
                break;
            case 1:
                n = getNet1(true);
                break;
            case 2:
                n = getNet2();
                break;
            default:
                throw new RuntimeException();
        }
        INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10}));
        INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10}));

        ComputationGraph cg = n.toComputationGraph();

        INDArray out1 = n.output(in);
        INDArray out2 = cg.outputSingle(in);
        assertEquals(out1, out2);


        n.setInput(in);
        n.setLabels(labels);

        cg.setInputs(in);
        cg.setLabels(labels);

        n.computeGradientAndScore();
        cg.computeGradientAndScore();

        assertEquals(n.score(), cg.score(), 1e-6);

        assertEquals(n.gradient().gradient(), cg.gradient().gradient());

        n.fit(in, labels);
        cg.fit(new INDArray[]{in}, new INDArray[]{labels});

        assertEquals(n.params(), cg.params());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:49,代码来源:TestNetConversion.java

示例4: testSerializationCompGraph

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testSerializationCompGraph() throws Exception {

    for(WorkspaceMode wsm : WorkspaceMode.values()) {
        log.info("*** Starting workspace mode: " + wsm);

        Nd4j.getRandom().setSeed(12345);

        ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder()
                .activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER)
                .trainingWorkspaceMode(wsm)
                .inferenceWorkspaceMode(wsm)
                .updater(new Adam())
                .graphBuilder()
                .addInputs("in")
                .layer("0", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "in")
                .layer("1", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "0")
                .layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                        .nIn(10).nOut(10).build(), "1")
                .setOutputs("2")
                .build();

        ComputationGraph net1 = new ComputationGraph(conf1);
        net1.init();

        INDArray in = Nd4j.rand(new int[]{3, 10, 5});
        INDArray labels = Nd4j.rand(new int[]{3, 10, 5});

        net1.fit(new DataSet(in, labels));

        byte[] bytes;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            ModelSerializer.writeModel(net1, baos, true);
            bytes = baos.toByteArray();
        }


        ComputationGraph net2 = ModelSerializer.restoreComputationGraph(new ByteArrayInputStream(bytes), true);


        in = Nd4j.rand(new int[]{3, 10, 5});
        labels = Nd4j.rand(new int[]{3, 10, 5});

        INDArray out1 = net1.outputSingle(in);
        INDArray out2 = net2.outputSingle(in);

        assertEquals(out1, out2);

        net1.setInput(0, in);
        net2.setInput(0, in);
        net1.setLabels(labels);
        net2.setLabels(labels);

        net1.computeGradientAndScore();
        net2.computeGradientAndScore();

        assertEquals(net1.score(), net2.score(), 1e-6);
        assertEquals(net1.gradient().gradient(), net2.gradient().gradient());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:62,代码来源:BidirectionalTest.java

示例5: testFrozenLayerInstantiationCompGraph

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testFrozenLayerInstantiationCompGraph() {

    //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if
    // they were initialized via the builder
    ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                    .weightInit(WeightInit.XAVIER).build(), "in")
                    .addLayer("1", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                    .weightInit(WeightInit.XAVIER).build(), "0")
                    .addLayer("2", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10)
                                                    .nOut(10).build(),
                                    "1")
                    .setOutputs("2").build();

    ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer.Builder()
                                    .layer(new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                                    .weightInit(WeightInit.XAVIER).build())
                                    .build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer.Builder()
                                    .layer(new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                                    .weightInit(WeightInit.XAVIER).build())
                                    .build(), "0")
                    .addLayer("2", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10)
                                                    .nOut(10).build(),
                                    "1")
                    .setOutputs("2").build();

    ComputationGraph net1 = new ComputationGraph(conf1);
    net1.init();
    ComputationGraph net2 = new ComputationGraph(conf2);
    net2.init();

    assertEquals(net1.params(), net2.params());


    String json = conf2.toJson();
    ComputationGraphConfiguration fromJson = ComputationGraphConfiguration.fromJson(json);

    assertEquals(conf2, fromJson);

    ComputationGraph net3 = new ComputationGraph(fromJson);
    net3.init();

    INDArray input = Nd4j.rand(10, 10);

    INDArray out2 = net2.outputSingle(input);
    INDArray out3 = net3.outputSingle(input);

    assertEquals(out2, out3);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:57,代码来源:FrozenLayerTest.java


注:本文中的org.deeplearning4j.nn.graph.ComputationGraph.outputSingle方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。