当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerNetwork.score方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.score方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.score方法的具体用法?Java MultiLayerNetwork.score怎么用?Java MultiLayerNetwork.score使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.multilayer.MultiLayerNetwork的用法示例。


在下文中一共展示了MultiLayerNetwork.score方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testScoringDataSet

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testScoringDataSet() {
    ComputationGraphConfiguration configuration = getIrisGraphConfiguration();
    ComputationGraph graph = new ComputationGraph(configuration);
    graph.init();

    MultiLayerConfiguration mlc = getIrisMLNConfiguration();
    MultiLayerNetwork net = new MultiLayerNetwork(mlc);
    net.init();

    DataSetIterator iris = new IrisDataSetIterator(150, 150);
    DataSet ds = iris.next();

    //Now: set parameters of both networks to be identical. Then feedforward, and check we get the same score
    Nd4j.getRandom().setSeed(12345);
    int nParams = getNumParams();
    INDArray params = Nd4j.rand(1, nParams);
    graph.setParams(params.dup());
    net.setParams(params.dup());

    double scoreMLN = net.score(ds, false);
    double scoreCG = graph.score(ds, false);

    assertEquals(scoreMLN, scoreCG, 1e-4);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:26,代码来源:TestComputationGraphNetwork.java

示例2: getFinalResult

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
public ParameterAveragingTrainingResult getFinalResult(MultiLayerNetwork network) {
    INDArray updaterState = null;
    if (saveUpdater) {
        Updater u = network.getUpdater();
        if (u != null)
            updaterState = u.getStateViewArray();
    }

    Nd4j.getExecutioner().commit();

    Collection<StorageMetaData> storageMetaData = null;
    Collection<Persistable> listenerStaticInfo = null;
    Collection<Persistable> listenerUpdates = null;
    if (listenerRouterProvider != null) {
        StatsStorageRouter r = listenerRouterProvider.getRouter();
        if (r instanceof VanillaStatsStorageRouter) { //TODO this is ugly... need to find a better solution
            VanillaStatsStorageRouter ssr = (VanillaStatsStorageRouter) r;
            storageMetaData = ssr.getStorageMetaData();
            listenerStaticInfo = ssr.getStaticInfo();
            listenerUpdates = ssr.getUpdates();
        }
    }
    return new ParameterAveragingTrainingResult(network.params(), updaterState, network.score(), storageMetaData,
                    listenerStaticInfo, listenerUpdates);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:ParameterAveragingTrainingWorker.java

示例3: call

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<DataSet> dataSetIterator) throws Exception {
    if (!dataSetIterator.hasNext()) {
        return Collections.singletonList(new Tuple2<>(0, 0.0));
    }

    DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate

    MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(json));
    network.init();
    INDArray val = params.value().unsafeDuplication(); //.value() object will be shared by all executors on each machine -> OK, as params are not modified by score function
    if (val.length() != network.numParams(false))
        throw new IllegalStateException(
                        "Network did not have same number of parameters as the broadcast set parameters");
    network.setParameters(val);

    List<Tuple2<Integer, Double>> out = new ArrayList<>();
    while (iter.hasNext()) {
        DataSet ds = iter.next();
        double score = network.score(ds, false);
        int numExamples = ds.getFeatureMatrix().size(0);
        out.add(new Tuple2<>(numExamples, score * numExamples));
    }

    Nd4j.getExecutioner().commit();

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:29,代码来源:ScoreFlatMapFunction.java

示例4: evaluateNet

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
private static String evaluateNet(MultiLayerNetwork net, List<INDArray> featuresTest, List<INDArray> labelsTest) {
	List<Triple<Double, Integer, INDArray>> scoredList = new ArrayList<>();

	// run throug all example chunks
	for(int i = 0; i < featuresTest.size(); i++) {
		INDArray testData = featuresTest.get(i);
		INDArray labels = labelsTest.get(i);
		int nRows = testData.rows();

		// go through each example individually
		for(int j = 0; j < nRows; j++) {
			INDArray example = testData.getRow(j);
			int digit = (int)labels.getDouble(j);
			double score = net.score(new DataSet(example,example));
			scoredList.add(new ImmutableTriple<Double, Integer, INDArray>(new Double(score), new Integer(digit), example));
		}
	}

	// sort for increasing score
	Collections.sort(scoredList, new Comparator<Triple<Double, Integer, INDArray>>() {
		@Override
		public int compare(Triple<Double, Integer, INDArray> o1, Triple<Double, Integer, INDArray> o2) {
			return(o1.getLeft().compareTo(o2.getLeft()));
		}
	});

	// grid search for error threshold (maximizes f-measure)
	String bestResult = "";
	float fMeasureMax = 0.0f;

	for(int i = 1; i < 30; i++) {
		float errorThreshold = (float) (i * 0.05);
		String result = evaluateForThreshold(errorThreshold, scoredList);
		float fMeasure = Float.valueOf(result.substring(0, result.indexOf(' ')));

		System.out.println(result);
		
		if(fMeasure >= fMeasureMax) {
			bestResult = result;
			fMeasureMax = fMeasure;
		}
	}

	return bestResult;
}
 
开发者ID:matthiaszimmermann,项目名称:ml_demo,代码行数:46,代码来源:MammographyAutoencoder.java

示例5: testGradientCNNMLN

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testGradientCNNMLN() {
    //Parameterized test, testing combinations of:
    // (a) activation function
    // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
    // (c) Loss function (with specified output activations)
    Activation[] activFns = {Activation.SIGMOID, Activation.TANH};
    boolean[] characteristic = {false, true}; //If true: run some backprop steps first

    LossFunctions.LossFunction[] lossFunctions =
            {LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD, LossFunctions.LossFunction.MSE};
    Activation[] outputActivations = {Activation.SOFTMAX, Activation.TANH}; //i.e., lossFunctions[i] used with outputActivations[i] here

    DataSet ds = new IrisDataSetIterator(150, 150).next();
    ds.normalizeZeroMeanZeroUnitVariance();
    INDArray input = ds.getFeatureMatrix();
    INDArray labels = ds.getLabels();

    for (Activation afn : activFns) {
        for (boolean doLearningFirst : characteristic) {
            for (int i = 0; i < lossFunctions.length; i++) {
                LossFunctions.LossFunction lf = lossFunctions[i];
                Activation outputActivation = outputActivations[i];

                MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
                        .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                        .weightInit(WeightInit.XAVIER).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder(1, 1).nOut(6).activation(afn).build())
                        .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3).build())
                        .setInputType(InputType.convolutionalFlat(1, 4, 1)).pretrain(false).backprop(true);

                MultiLayerConfiguration conf = builder.build();

                MultiLayerNetwork mln = new MultiLayerNetwork(conf);
                mln.init();
                String name = new Object() {
                }.getClass().getEnclosingMethod().getName();

                if (doLearningFirst) {
                    //Run a number of iterations of learning
                    mln.setInput(ds.getFeatures());
                    mln.setLabels(ds.getLabels());
                    mln.computeGradientAndScore();
                    double scoreBefore = mln.score();
                    for (int j = 0; j < 10; j++)
                        mln.fit(ds);
                    mln.computeGradientAndScore();
                    double scoreAfter = mln.score();
                    //Can't test in 'characteristic mode of operation' if not learning
                    String msg = name + " - score did not (sufficiently) decrease during learning - activationFn="
                            + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                            + ", doLearningFirst= " + doLearningFirst + " (before=" + scoreBefore
                            + ", scoreAfter=" + scoreAfter + ")";
                    assertTrue(msg, scoreAfter < 0.8 * scoreBefore);
                }

                if (PRINT_RESULTS) {
                    System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation="
                            + outputActivation + ", doLearningFirst=" + doLearningFirst);
                    for (int j = 0; j < mln.getnLayers(); j++)
                        System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
                }

                boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                assertTrue(gradOK);
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:72,代码来源:CNNGradientCheckTest.java

示例6: testDropoutSimple

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testDropoutSimple() throws Exception {
    //Testing dropout with a single layer
    //Layer input: values should be set to either 0.0 or 2.0x original value

    int nIn = 8;
    int nOut = 8;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .updater(new Sgd())
                    .dropOut(0.5).list()
                    .layer(0, new OutputLayer.Builder().activation(Activation.IDENTITY)
                                    .lossFunction(LossFunctions.LossFunction.MSE).nIn(nIn).nOut(nOut)
                                    .weightInit(WeightInit.XAVIER).build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    net.getLayer(0).getParam("W").assign(Nd4j.eye(nIn));

    int nTests = 15;

    Nd4j.getRandom().setSeed(12345);
    int noDropoutCount = 0;
    for (int i = 0; i < nTests; i++) {
        INDArray in = Nd4j.rand(1, nIn);
        INDArray out = Nd4j.rand(1, nOut);
        INDArray inCopy = in.dup();

        List<INDArray> l = net.feedForward(in, true);

        INDArray postDropout = l.get(l.size() - 1);
        //Dropout occurred. Expect inputs to be either scaled 2x original, or set to 0.0 (with dropout = 0.5)
        for (int j = 0; j < inCopy.length(); j++) {
            double origValue = inCopy.getDouble(j);
            double doValue = postDropout.getDouble(j);
            if (doValue > 0.0) {
                //Input was kept -> should be scaled by factor of (1.0/0.5 = 2)
                assertEquals(origValue * 2.0, doValue, 0.0001);
            }
        }

        //Do forward pass
        //(1) ensure dropout ISN'T being applied for forward pass at test time
        //(2) ensure dropout ISN'T being applied for test time scoring
        //If dropout is applied at test time: outputs + score will differ between passes
        INDArray in2 = Nd4j.rand(1, nIn);
        INDArray out2 = Nd4j.rand(1, nOut);
        INDArray outTest1 = net.output(in2, false);
        INDArray outTest2 = net.output(in2, false);
        INDArray outTest3 = net.output(in2, false);
        assertEquals(outTest1, outTest2);
        assertEquals(outTest1, outTest3);

        double score1 = net.score(new DataSet(in2, out2), false);
        double score2 = net.score(new DataSet(in2, out2), false);
        double score3 = net.score(new DataSet(in2, out2), false);
        assertEquals(score1, score2, 0.0);
        assertEquals(score1, score3, 0.0);
    }

    if (noDropoutCount >= nTests / 3) {
        //at 0.5 dropout ratio and more than a few inputs, expect only a very small number of instances where
        //no dropout occurs, just due to random chance
        fail("Too many instances of dropout not being applied");
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:TestDropout.java

示例7: testOptimizersMLP

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testOptimizersMLP() {
    //Check that the score actually decreases over time

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    OptimizationAlgorithm[] toTest =
                    {OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT, OptimizationAlgorithm.LINE_GRADIENT_DESCENT,
                                    OptimizationAlgorithm.CONJUGATE_GRADIENT, OptimizationAlgorithm.LBFGS
                    //OptimizationAlgorithm.HESSIAN_FREE	//Known to not work
                    };

    DataSet ds = iter.next();
    ds.normalizeZeroMeanZeroUnitVariance();

    for (OptimizationAlgorithm oa : toTest) {
        int nIter = 10;
        MultiLayerNetwork network = new MultiLayerNetwork(getMLPConfigIris(oa));
        network.init();
        double score = network.score(ds);
        assertTrue(score != 0.0 && !Double.isNaN(score));

        if (PRINT_OPT_RESULTS)
            System.out.println("testOptimizersMLP() - " + oa);

        int nCallsToOptimizer = 30;
        double[] scores = new double[nCallsToOptimizer + 1];
        scores[0] = score;
        for (int i = 0; i < nCallsToOptimizer; i++) {
            for( int j=0; j<nIter; j++ ) {
                network.fit(ds);
            }
            double scoreAfter = network.score(ds);
            scores[i + 1] = scoreAfter;
            assertTrue("Score is NaN after optimization", !Double.isNaN(scoreAfter));
            assertTrue("OA= " + oa + ", before= " + score + ", after= " + scoreAfter, scoreAfter <= score);
            score = scoreAfter;
        }

        if (PRINT_OPT_RESULTS)
            System.out.println(oa + " - " + Arrays.toString(scores));
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:44,代码来源:TestOptimizers.java


注:本文中的org.deeplearning4j.nn.multilayer.MultiLayerNetwork.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。