当前位置: 首页>>代码示例>>Java>>正文


Java ComputationGraph.score方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.score方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.score方法的具体用法?Java ComputationGraph.score怎么用?Java ComputationGraph.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.graph.ComputationGraph的用法示例。


在下文中一共展示了ComputationGraph.score方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testLambdaConf

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLambdaConf() {
    double[] lambdas = new double[] {0.1, 0.01};
    double[] results = new double[2];
    int numClasses = 2;

    INDArray input = Nd4j.rand(150, 4);
    INDArray labels = Nd4j.zeros(150, numClasses);
    Random r = new Random(12345);
    for (int i = 0; i < 150; i++) {
        labels.putScalar(i, r.nextInt(numClasses), 1.0);
    }
    ComputationGraph graph;

    for (int i = 0; i < lambdas.length; i++) {
        graph = getGraph(numClasses, lambdas[i]);
        graph.setInput(0, input);
        graph.setLabel(0, labels);
        graph.computeGradientAndScore();
        results[i] = graph.score();
    }

    assertNotEquals(results[0], results[1]);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:25,代码来源:CenterLossOutputLayerTest.java

示例2: getFinalResult

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public ParameterAveragingTrainingResult getFinalResult(ComputationGraph network) {
    INDArray updaterState = null;
    if (saveUpdater) {
        ComputationGraphUpdater u = network.getUpdater();
        if (u != null)
            updaterState = u.getStateViewArray();
    }

    Nd4j.getExecutioner().commit();

    Collection<StorageMetaData> storageMetaData = null;
    Collection<Persistable> listenerStaticInfo = null;
    Collection<Persistable> listenerUpdates = null;
    if (listenerRouterProvider != null) {
        StatsStorageRouter r = listenerRouterProvider.getRouter();
        if (r instanceof VanillaStatsStorageRouter) { //TODO this is ugly... need to find a better solution
            VanillaStatsStorageRouter ssr = (VanillaStatsStorageRouter) r;
            storageMetaData = ssr.getStorageMetaData();
            listenerStaticInfo = ssr.getStaticInfo();
            listenerUpdates = ssr.getUpdates();
        }
    }

    return new ParameterAveragingTrainingResult(network.params(), updaterState, network.score(), storageMetaData,
                    listenerStaticInfo, listenerUpdates);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:28,代码来源:ParameterAveragingTrainingWorker.java

示例3: call

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<MultiDataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.singletonList(new Tuple2<>(0, 0.0));
    }

    MultiDataSetIterator iter = new IteratorMultiDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate


    ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json));
    network.init();
    INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParams(val);

    List<Tuple2<Integer, Double>> out = new ArrayList<>();
    while (iter.hasNext()) {
        MultiDataSet ds = iter.next();
        double score = network.score(ds, false);
        int numExamples = ds.getFeatures(0).size(0);
        out.add(new Tuple2<>(numExamples, score * numExamples));
    }

    Nd4j.getExecutioner().commit();

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:30,代码来源:ScoreFlatMapFunctionCGMultiDataSet.java

示例4: call

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<DataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.singletonList(new Tuple2<>(0, 0.0));
    }

    DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate

    ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json));
    network.init();
    INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParams(val);

    List<Tuple2<Integer, Double>> out = new ArrayList<>();
    while (iter.hasNext()) {
        DataSet ds = iter.next();
        double score = network.score(ds, false);
        int numExamples = ds.getFeatureMatrix().size(0);
        out.add(new Tuple2<>(numExamples, score * numExamples));
    }

    Nd4j.getExecutioner().commit();

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:29,代码来源:ScoreFlatMapFunctionCGDataSet.java

示例5: elementWiseMultiplicationLayerTest

import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
    public void elementWiseMultiplicationLayerTest(){

        for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) {

            ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                    .seed(12345L)
                    .weightInit(new UniformDistribution(0, 1))
                    .graphBuilder()
                    .addInputs("features")
                    .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(4)
                            .activation(Activation.TANH)
                            .build(), "features")
                    .addLayer("elementWiseMul", new ElementWiseMultiplicationLayer.Builder().nIn(4).nOut(4)
                            .activation(a)
                            .build(), "dense")
                    .addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.COSINE_PROXIMITY)
                            .activation(Activation.IDENTITY).build(), "elementWiseMul")
                    .setOutputs("loss")
                    .pretrain(false).backprop(true).build();

            ComputationGraph netGraph = new ComputationGraph(conf);
            netGraph.init();

            log.info("params before learning: " + netGraph.getLayer(1).paramTable());

            //Run a number of iterations of learning manually make some pseudo data
            //the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim
            // is mainly decided by the fourth value, if everything runs well, we will get a large weight for the fourth value

            INDArray features = Nd4j.create(new double[][]{{1, 2, 3, 4}, {1, 2, 3, 1}, {1, 2, 3, 0}});
            INDArray labels = Nd4j.create(new double[][]{{1, 1, 1, 8}, {1, 1, 1, 2}, {1, 1, 1, 1}});

            netGraph.setInputs(features);
            netGraph.setLabels(labels);
            netGraph.computeGradientAndScore();
            double scoreBefore = netGraph.score();

            String msg;
            for (int epoch = 0; epoch < 5; epoch++)
                netGraph.fit(new INDArray[]{features}, new INDArray[]{labels});
            netGraph.computeGradientAndScore();
            double scoreAfter = netGraph.score();
            //Can't test in 'characteristic mode of operation' if not learning
            msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn="
                    + "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id"
                    + ", doLearningFirst=" + "true" + " (before=" + scoreBefore
                    + ", scoreAfter=" + scoreAfter + ")";
            assertTrue(msg, scoreAfter < 0.8 * scoreBefore);

//        expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight
            log.info("params after learning: " + netGraph.getLayer(1).paramTable());

            boolean gradOK = checkGradients(netGraph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[]{features}, new INDArray[]{labels});

            msg = "elementWiseMultiplicationLayerTest() - activationFn=" + "ID" + ", lossFn=" + "Cos-sim"
                    + ", outputActivation=" + "Id" + ", doLearningFirst=" + "true";
            assertTrue(msg, gradOK);

            TestUtils.testModelSerialization(netGraph);
        }
    }
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:65,代码来源:GradientCheckTests.java


注:本文中的org.deeplearning4j.nn.graph.ComputationGraph.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。