当前位置: 首页>>代码示例>>Java>>正文


Java LossFunction类代码示例

本文整理汇总了Java中org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction的典型用法代码示例。如果您正苦于以下问题:Java LossFunction类的具体用法?Java LossFunction怎么用?Java LossFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LossFunction类属于org.nd4j.linalg.lossfunctions.LossFunctions包,在下文中一共展示了LossFunction类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: softMaxRegression

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
private static MultiLayerNetwork softMaxRegression(int seed,
		int iterations, int numRows, int numColumns, int outputNum) {
	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(seed)
			.gradientNormalization(
					GradientNormalization.ClipElementWiseAbsoluteValue)
			.gradientNormalizationThreshold(1.0)
			.iterations(iterations)
			.momentum(0.5)
			.momentumAfter(Collections.singletonMap(3, 0.9))
			.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
			.list(1)
			.layer(0,
					new OutputLayer.Builder(
							LossFunction.NEGATIVELOGLIKELIHOOD)
							.activation("softmax")
							.nIn(numColumns * numRows).nOut(outputNum)
							.build()).pretrain(true).backprop(false)
			.build();

	MultiLayerNetwork model = new MultiLayerNetwork(conf);

	return model;
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:25,代码来源:NeuralNetworks.java

示例2: getConfiguration

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
	.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
	.momentumAfter(Collections.singletonMap(3, 0.9))
	.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
	.layer(0,
		new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
			.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
	.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
		.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)

		.build())
	.layer(2,
		new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
			.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
	.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
		.nOut(parameters.getOutputSize()).build())
	.pretrain(true).backprop(false).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:23,代码来源:StackedAutoEncoderModel.java

示例3: testSetParams

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testSetParams() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
                    .updater(new Sgd(1e-1))
                    .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
                                    .weightInit(WeightInit.ZERO).activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .build();

    int numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.create(1, numParams);
    OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
                    Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
    params = l.params();
    l.setParams(params);
    assertEquals(params, l.params());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:19,代码来源:OutputLayerTest.java

示例4: getGraph

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
private ComputationGraph getGraph(int numLabels, double lambda) {
    Nd4j.getRandom().setSeed(12345);
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
                    .graphBuilder().addInputs("input1")
                    .addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.RELU).build(),
                                    "input1")
                    .addLayer("lossLayer", new CenterLossOutputLayer.Builder()
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(numLabels)
                                    .lambda(lambda).activation(Activation.SOFTMAX).build(), "l1")
                    .setOutputs("lossLayer").pretrain(false).backprop(true).build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    return graph;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:19,代码来源:CenterLossOutputLayerTest.java

示例5: getIrisMLPSimpleConfig

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
/** Very simple back-prop config set up for Iris.
 * Learning Rate = 0.1
 * No regularization, no Adagrad, no momentum etc. One iteration.
 */
private static MultiLayerConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes,
                Activation activationFunction) {
    NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1))
                .seed(12345L).list();

    for (int i = 0; i < hiddenLayerSizes.length; i++) {
        int nIn = (i == 0 ? 4 : hiddenLayerSizes[i - 1]);
        lb.layer(i, new DenseLayer.Builder().nIn(nIn).nOut(hiddenLayerSizes[i]).weightInit(WeightInit.XAVIER)
                        .activation(activationFunction).build());
    }

    lb.layer(hiddenLayerSizes.length,
                    new OutputLayer.Builder(LossFunction.MCXENT).nIn(hiddenLayerSizes[hiddenLayerSizes.length - 1])
                                    .nOut(3).weightInit(WeightInit.XAVIER)
                                    .activation(activationFunction.equals(Activation.IDENTITY) ? Activation.IDENTITY
                                                    : Activation.SOFTMAX)
                                    .build());
    lb.pretrain(false).backprop(true);

    return lb.build();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:26,代码来源:BackPropMLPTest.java

示例6: testRnnTimeStepWithPreprocessor

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testRnnTimeStepWithPreprocessor() {

    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder()
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .list()
                                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
                                    .inputPreProcessor(0, new FeedForwardToRnnPreProcessor()).pretrain(false)
                                    .backprop(true).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:MultiLayerTestRNN.java

示例7: testRnnTimeStepWithPreprocessorGraph

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testRnnTimeStepWithPreprocessorGraph() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .graphBuilder().addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "0")
                    .addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build(), "1")
                    .setOutputs("2").inputPreProcessor("0", new FeedForwardToRnnPreProcessor()).pretrain(false)
                    .backprop(true).build();

    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:22,代码来源:MultiLayerTestRNN.java

示例8: getConfiguration

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
final int nLayers = hiddenLayerNodes.length + 1;

final ListBuilder list = new NeuralNetConfiguration.Builder()
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	.iterations(parameters.getIterations()).learningRate(parameters.getLearningRate()).rmsDecay(0.95)
	.seed(parameters.getSeed()).regularization(true).l2(0.001).list(nLayers).pretrain(false).backprop(true);

for (int i = 0; i < nLayers; i++)
{
    int nIn;
    if (i == 0)
    {
	nIn = parameters.getInputSize();
    }
    else
    {
	nIn = hiddenLayerNodes[i - 1];
    }
    if (i < nLayers - 1)
    {
	final GravesLSTM layer = new GravesLSTM.Builder().nIn(nIn).nOut(hiddenLayerNodes[i])
		.updater(Updater.RMSPROP).activation("tanh").weightInit(WeightInit.DISTRIBUTION)
		.dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, layer);
    }
    else
    {
	final RnnOutputLayer outputLayer = new RnnOutputLayer.Builder(LossFunction.MCXENT).activation("softmax")
		.updater(Updater.RMSPROP).nIn(hiddenLayerNodes[1]).nOut(parameters.getOutputSize())
		.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, outputLayer);
    }
}
return list.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:40,代码来源:LstmModel.java

示例9: testL2

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testL2() {
    LossFunction[] lossFunctionList = {LossFunction.MSE, LossFunction.L1, LossFunction.EXPLL, LossFunction.XENT,
                    LossFunction.MCXENT, LossFunction.SQUARED_LOSS, LossFunction.RECONSTRUCTION_CROSSENTROPY,
                    LossFunction.NEGATIVELOGLIKELIHOOD, LossFunction.COSINE_PROXIMITY, LossFunction.HINGE,
                    LossFunction.SQUARED_HINGE, LossFunction.KL_DIVERGENCE, LossFunction.MEAN_ABSOLUTE_ERROR,
                    LossFunction.L2, LossFunction.MEAN_ABSOLUTE_PERCENTAGE_ERROR,
                    LossFunction.MEAN_SQUARED_LOGARITHMIC_ERROR, LossFunction.POISSON};

    testLossFunctions(lossFunctionList);
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:12,代码来源:TestLossFunctionsSizeChecks.java

示例10: testEmbeddingLayerSimple

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testEmbeddingLayerSimple() {
    Random r = new Random(12345);
    int nExamples = 5;
    INDArray input = Nd4j.zeros(nExamples, 1);
    INDArray labels = Nd4j.zeros(nExamples, 3);
    for (int i = 0; i < nExamples; i++) {
        input.putScalar(i, r.nextInt(4));
        labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0);
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L)
                    .list().layer(0,
                                    new EmbeddingLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER)
                                                    .dist(new NormalDistribution(0, 1))
                                                    .updater(new NoOp()).activation(
                                                                    Activation.TANH)
                                                    .build())
                    .layer(1, new OutputLayer.Builder(LossFunction.MCXENT).nIn(3).nOut(3)
                                    .weightInit(WeightInit.XAVIER).dist(new NormalDistribution(0, 1))
                                    .updater(new NoOp()).activation(Activation.SOFTMAX).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    if (PRINT_RESULTS) {
        System.out.println("testEmbeddingLayerSimple");
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    String msg = "testEmbeddingLayerSimple";
    assertTrue(msg, gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:40,代码来源:GradientCheckTests.java

示例11: testIris2

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testIris2() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .updater(new Sgd(1e-1))
                    .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .build();

    int numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.create(1, numParams);
    OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
                    Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
    l.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
    DataSetIterator iter = new IrisDataSetIterator(150, 150);


    DataSet next = iter.next();
    next.shuffle();
    SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
    trainTest.getTrain().normalizeZeroMeanZeroUnitVariance();
    for( int i=0; i<10; i++ ) {
        l.fit(trainTest.getTrain());
    }


    DataSet test = trainTest.getTest();
    test.normalizeZeroMeanZeroUnitVariance();
    Evaluation eval = new Evaluation();
    INDArray output = l.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());


}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:37,代码来源:OutputLayerTest.java

示例12: testWeightsDifferent

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testWeightsDifferent() {
    Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
    Nd4j.MAX_SLICES_TO_PRINT = Integer.MAX_VALUE;

    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .miniBatch(false).seed(123)
                    .updater(new AdaGrad(1e-1))
                    .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
                                    .weightInit(WeightInit.XAVIER)
                                    .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                                    .activation(Activation.SOFTMAX).build())
                    .build();

    int numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.create(1, numParams);
    OutputLayer o = (OutputLayer) conf.getLayer().instantiate(conf, null, 0, params, true);
    o.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));


    int numSamples = 150;
    int batchSize = 150;


    DataSetIterator iter = new IrisDataSetIterator(batchSize, numSamples);
    DataSet iris = iter.next(); // Loads data into generator and format consumable for NN
    iris.normalizeZeroMeanZeroUnitVariance();
    o.setListeners(new ScoreIterationListener(1));
    SplitTestAndTrain t = iris.splitTestAndTrain(0.8);
    for( int i=0; i<1000; i++ ){
        o.fit(t.getTrain());
    }
    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(3);
    eval.eval(t.getTest().getLabels(), o.output(t.getTest().getFeatureMatrix(), true));
    log.info(eval.stats());

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:OutputLayerTest.java

示例13: testBinary

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testBinary() {

    Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
    Nd4j.MAX_SLICES_TO_PRINT = Integer.MAX_VALUE;
    DataBuffer.Type initialType = Nd4j.dataType();
    DataTypeUtil.setDTypeForContext(DataBuffer.Type.DOUBLE);
    INDArray data = Nd4j.create(new double[][] {{1, 1, 1, 0, 0, 0}, {1, 0, 1, 0, 0, 0}, {1, 1, 1, 0, 0, 0},
                    {0, 0, 1, 1, 1, 0}, {0, 0, 1, 1, 0, 0}, {0, 0, 1, 1, 1, 0}});

    INDArray data2 = Nd4j.create(new double[][] {{1, 0}, {1, 0}, {1, 0}, {0, 1}, {0, 1}, {0, 1}});

    DataSet dataset = new DataSet(data, data2);
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(123)
                    .updater(new Sgd(1e-2))
                    .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(6).nOut(2)
                                    .weightInit(WeightInit.ZERO).activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).build())
                    .build();

    int numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.create(1, numParams);
    OutputLayer o = (OutputLayer) conf.getLayer().instantiate(conf, null, 0, params, true);
    o.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));

    o.setListeners(new ScoreIterationListener(1));
    for( int i=0; i<200; i++ ) {
        o.fit(dataset);
    }

    DataTypeUtil.setDTypeForContext(initialType);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:34,代码来源:OutputLayerTest.java

示例14: testIris

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testIris() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).updater(new Sgd(1e-1))
                    .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .build();

    int numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.create(1, numParams);
    OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
                    Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
    l.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
    DataSetIterator iter = new IrisDataSetIterator(150, 150);


    DataSet next = iter.next();
    next.shuffle();
    SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
    trainTest.getTrain().normalizeZeroMeanZeroUnitVariance();
    for( int i=0; i<5; i++ ) {
        l.fit(trainTest.getTrain());
    }


    DataSet test = trainTest.getTest();
    test.normalizeZeroMeanZeroUnitVariance();
    Evaluation eval = new Evaluation();
    INDArray output = l.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());


}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:36,代码来源:OutputLayerTest.java

示例15: testCnnOutputLayerSoftmax

import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; //导入依赖的package包/类
@Test
public void testCnnOutputLayerSoftmax(){
    //Check that softmax is applied depth-wise

    MultiLayerConfiguration conf =
            new NeuralNetConfiguration.Builder().seed(12345L)
                    .updater(new NoOp())
                    .convolutionMode(ConvolutionMode.Same)
                    .list()
                    .layer(new ConvolutionLayer.Builder().nIn(3).nOut(4).activation(Activation.IDENTITY)
                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1.0))
                            .updater(new NoOp()).build())
                    .layer(new CnnLossLayer.Builder(LossFunction.MSE)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(new int[]{2,3,4,5});
    INDArray out = net.output(in);

    double min = out.minNumber().doubleValue();
    double max = out.maxNumber().doubleValue();

    assertTrue(min >= 0 && max <= 1.0);

    INDArray sum = out.sum(1);
    assertEquals(Nd4j.ones(2,4,5), sum);

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:33,代码来源:OutputLayerTest.java


注:本文中的org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。