当前位置: 首页>>代码示例>>Java>>正文


Java Model.score方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.api.Model.score方法的典型用法代码示例。如果您正苦于以下问题:Java Model.score方法的具体用法?Java Model.score怎么用?Java Model.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.api.Model的用法示例。


在下文中一共展示了Model.score方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: iterationDone

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
@Override
public void iterationDone(Model model, int iteration, int epoch) {
    //Check per-iteration termination conditions
    double latestScore = model.score();
    trainer.setLatestScore(latestScore);
    for (IterationTerminationCondition c : esConfig.getIterationTerminationConditions()) {
        if (c.terminate(latestScore)) {
            trainer.setTermination(true);
            trainer.setTerminationReason(c);
            break;
        }
    }
    if (trainer.getTermination()) {
        // use built-in kill switch to stop fit operation
        wrapper.stopFit();
    }

    trainer.incrementIteration();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:EarlyStoppingParallelTrainer.java

示例2: iterationDone

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
@Override
public void iterationDone(Model model, int iteration) {
    if(m_printIterations <= 0)
        m_printIterations = 1;
    if(m_iterCount % m_printIterations == 0) {
        invoke();
        double result = model.score();
        m_progressBar.printProgress("Iteration: " + m_iterCount + ", Score: " + result);
    }
    m_iterCount++;
}
 
开发者ID:braeunlich,项目名称:anagnostes,代码行数:12,代码来源:TrainProgressIterationListener.java

示例3: iterationDone

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
@Override
public void iterationDone (Model model,
                           int iteration)
{
    iterCount++;

    if ((iterCount % constants.listenerPeriod.getValue()) == 0) {
        invoke();

        final double score = model.score();
        final int count = (int) iterCount;
        logger.info(String.format("Score at iteration %d is %.5f", count, score));
        display(epoch, count, score);
    }
}
 
开发者ID:Audiveris,项目名称:audiveris,代码行数:16,代码来源:TrainingPanel.java

示例4: iterationDone

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
@Override
public void iterationDone(Model model, int iteration, int epoch) {
    if (++iterationCount % frequency == 0) {
        double score = model.score();
        scoreVsIter.add(new Pair<>(iterationCount, score));
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:8,代码来源:CollectScoresIterationListener.java

示例5: iterationDone

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
@Override
public void iterationDone(Model model, int iteration, int epoch) {
    if (printIterations <= 0)
        printIterations = 1;
    if (iteration % printIterations == 0) {
        double score = model.score();
        log.info("Score at iteration {} is {}", iteration, score);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:10,代码来源:ScoreIterationListener.java

示例6: testSphereFnMultipleStepsHelper

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
private static void testSphereFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter,
                int maxNumLineSearchIter) {
    double[] scores = new double[nOptIter + 1];

    for (int i = 0; i <= nOptIter; i++) {
        Random rng = new DefaultRandom(12345L);
        org.nd4j.linalg.api.rng.distribution.Distribution dist =
                        new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                        .maxNumLineSearchIterations(maxNumLineSearchIter).updater(new Sgd(0.1))
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Model m = new SphereFunctionModel(100, dist, conf);
        if (i == 0) {
            m.computeGradientAndScore();
            scores[0] = m.score(); //Before optimization
        } else {
            ConvexOptimizer opt = getOptimizer(oa, conf, m);
            for( int j=0; j<100; j++ ) {
                opt.optimize();
            }
            m.computeGradientAndScore();
            scores[i] = m.score();
            assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
        }
    }

    if (PRINT_OPT_RESULTS) {
        System.out.println("Multiple optimization iterations (" + nOptIter
                        + " opt. iter.) score vs iteration, maxNumLineSearchIter=" + maxNumLineSearchIter + ": "
                        + oa);
        System.out.println(Arrays.toString(scores));
    }

    for (int i = 1; i < scores.length; i++) {
        assertTrue(scores[i] <= scores[i - 1]);
    }
    assertTrue(scores[scores.length - 1] < 1.0); //Very easy function, expect score ~= 0 with any reasonable number of steps/numLineSearchIter
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:41,代码来源:TestOptimizers.java

示例7: testRastriginFnMultipleStepsHelper

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
private static void testRastriginFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter,
                int maxNumLineSearchIter) {
    double[] scores = new double[nOptIter + 1];

    for (int i = 0; i <= nOptIter; i++) {
        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                        .maxNumLineSearchIterations(maxNumLineSearchIter).miniBatch(false)
                        .updater(new AdaGrad(1e-2))
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Model m = new RastriginFunctionModel(10, conf);
        int nParams = m.numParams();
        if (i == 0) {
            m.computeGradientAndScore();
            scores[0] = m.score(); //Before optimization
        } else {
            ConvexOptimizer opt = getOptimizer(oa, conf, m);
            opt.getUpdater().setStateViewArray((Layer) m, Nd4j.create(new int[] {1, nParams}, 'c'), true);
            opt.optimize();
            m.computeGradientAndScore();
            scores[i] = m.score();
            assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
        }
    }

    if (PRINT_OPT_RESULTS) {
        System.out.println("Rastrigin: Multiple optimization iterations (" + nOptIter
                        + " opt. iter.) score vs iteration, maxNumLineSearchIter=" + maxNumLineSearchIter + ": "
                        + oa);
        System.out.println(Arrays.toString(scores));
    }
    for (int i = 1; i < scores.length; i++) {
        if (i == 1) {
            assertTrue(scores[i] <= scores[i - 1]); //Require at least one step of improvement
        } else {
            assertTrue(scores[i] <= scores[i - 1]);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:41,代码来源:TestOptimizers.java

示例8: testSphereFnOptHelper

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
public void testSphereFnOptHelper(OptimizationAlgorithm oa, int numLineSearchIter, int nDimensions) {

        if (PRINT_OPT_RESULTS)
            System.out.println("---------\n Alg= " + oa + ", nIter= " + numLineSearchIter + ", nDimensions= "
                            + nDimensions);

        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(numLineSearchIter)
                        .updater(new Sgd(1e-2))
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Random rng = new DefaultRandom(12345L);
        org.nd4j.linalg.api.rng.distribution.Distribution dist =
                        new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10);
        Model m = new SphereFunctionModel(nDimensions, dist, conf);
        m.computeGradientAndScore();
        double scoreBefore = m.score();
        assertTrue(!Double.isNaN(scoreBefore) && !Double.isInfinite(scoreBefore));
        if (PRINT_OPT_RESULTS) {
            System.out.println("Before:");
            System.out.println(scoreBefore);
            System.out.println(m.params());
        }

        ConvexOptimizer opt = getOptimizer(oa, conf, m);

        opt.setupSearchState(m.gradientAndScore());
        for( int i=0; i<100; i++ ) {
            opt.optimize();
        }
        m.computeGradientAndScore();
        double scoreAfter = m.score();

        assertTrue(!Double.isNaN(scoreAfter) && !Double.isInfinite(scoreAfter));
        if (PRINT_OPT_RESULTS) {
            System.out.println("After:");
            System.out.println(scoreAfter);
            System.out.println(m.params());
        }

        //Expected behaviour after optimization:
        //(a) score is better (lower) after optimization.
        //(b) Parameters are closer to minimum after optimization (TODO)
        assertTrue("Score did not improve after optimization (b= " + scoreBefore + " ,a= " + scoreAfter + ")",
                        scoreAfter < scoreBefore);
    }
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:47,代码来源:TestOptimizers.java

示例9: testRosenbrockFnMultipleStepsHelper

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
private static void testRosenbrockFnMultipleStepsHelper(OptimizationAlgorithm oa, int nOptIter,
                int maxNumLineSearchIter) {
    double[] scores = new double[nOptIter + 1];

    for (int i = 0; i <= nOptIter; i++) {
        NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                        .maxNumLineSearchIterations(maxNumLineSearchIter)
                        .updater(new Sgd(1e-1))
                        .stepFunction(new org.deeplearning4j.nn.conf.stepfunctions.NegativeDefaultStepFunction())
                        .layer(new DenseLayer.Builder().nIn(1).nOut(1).build())
                        .build();
        conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here

        Model m = new RosenbrockFunctionModel(100, conf);
        if (i == 0) {
            m.computeGradientAndScore();
            scores[0] = m.score(); //Before optimization
        } else {
            ConvexOptimizer opt = getOptimizer(oa, conf, m);
            opt.optimize();
            m.computeGradientAndScore();
            scores[i] = m.score();
            assertTrue("NaN or infinite score: " + scores[i],
                            !Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]));
        }
    }

    if (PRINT_OPT_RESULTS) {
        System.out.println("Rosenbrock: Multiple optimization iterations ( " + nOptIter
                        + " opt. iter.) score vs iteration, maxNumLineSearchIter= " + maxNumLineSearchIter + ": "
                        + oa);
        System.out.println(Arrays.toString(scores));
    }
    for (int i = 1; i < scores.length; i++) {
        if (i == 1) {
            assertTrue(scores[i] < scores[i - 1]); //Require at least one step of improvement
        } else {
            assertTrue(scores[i] <= scores[i - 1]);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:42,代码来源:TestOptimizers.java

示例10: ModelAndGradient

import org.deeplearning4j.nn.api.Model; //导入方法依赖的package包/类
public ModelAndGradient(Model model) {
    model.computeGradientAndScore();
    this.gradients = model.gradient().gradientForVariable();
    this.parameters = model.paramTable();
    this.score = model.score();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:7,代码来源:ModelAndGradient.java


注:本文中的org.deeplearning4j.nn.api.Model.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。