本文整理汇总了Java中org.deeplearning4j.eval.Evaluation.eval方法的典型用法代码示例。如果您正苦于以下问题:Java Evaluation.eval方法的具体用法?Java Evaluation.eval怎么用?Java Evaluation.eval使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.eval.Evaluation
的用法示例。
在下文中一共展示了Evaluation.eval方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEvaluation
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testEvaluation(){
SparkDl4jMultiLayer sparkNet = getBasicNetwork();
MultiLayerNetwork netCopy = sparkNet.getNetwork().clone();
Evaluation evalExpected = new Evaluation();
INDArray outLocal = netCopy.output(input, Layer.TrainingMode.TEST);
evalExpected.eval(labels, outLocal);
Evaluation evalActual = sparkNet.evaluate(sparkData);
assertEquals(evalExpected.accuracy(), evalActual.accuracy(), 1e-3);
assertEquals(evalExpected.f1(), evalActual.f1(), 1e-3);
assertEquals(evalExpected.getNumRowCounter(), evalActual.getNumRowCounter(), 1e-3);
assertMapEquals(evalExpected.falseNegatives(),evalActual.falseNegatives());
assertMapEquals(evalExpected.falsePositives(), evalActual.falsePositives());
assertMapEquals(evalExpected.trueNegatives(), evalActual.trueNegatives());
assertMapEquals(evalExpected.truePositives(),evalActual.truePositives());
assertEquals(evalExpected.precision(), evalActual.precision(), 1e-3);
assertEquals(evalExpected.recall(), evalActual.recall(), 1e-3);
assertEquals(evalExpected.getConfusionMatrix(), evalActual.getConfusionMatrix());
}
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:24,代码来源:TestSparkMultiLayerParameterAveraging.java
示例2: evalMnistTestSet
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
private static void evalMnistTestSet(MultiLayerNetwork leNetModel) throws Exception {
log.info("Load test data....");
int batchSize = 64;
DataSetIterator mnistTest = new MnistDataSetIterator(batchSize,false,12345);
log.info("Evaluate model....");
int outputNum = 10;
Evaluation eval = new Evaluation(outputNum);
while(mnistTest.hasNext()){
DataSet dataSet = mnistTest.next();
INDArray output = leNetModel.output(dataSet.getFeatureMatrix(), false);
eval.eval(dataSet.getLabels(), output);
}
log.info(eval.stats());
}
示例3: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@SuppressWarnings("rawtypes")
public DeepBeliefNetworkModel evaluate()
{
final DataSet testingData = ((IrisData) data).getTestingData();
final Evaluation evaluation = new Evaluation(parameters.getOutputSize());
for (int j = 0; j < 2; j++)
{
final INDArray output = model.output(testingData.getFeatureMatrix(), Layer.TrainingMode.TEST);
for (int i = 0; i < output.rows(); i++)
{
String actual = testingData.getLabels().getRow(i).toString().trim();
String predicted = output.getRow(i).toString().trim();
System.out.println("actual " + actual + " vs predicted " + predicted);
}
evaluation.eval(testingData.getLabels(), output);
System.out.println(evaluation.stats());
}
return this;
}
示例4: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Override
@SuppressWarnings("rawtypes")
public Model evaluate()
{
final Evaluation evaluation = new Evaluation(parameters.getOutputSize());
try
{
final DataSetIterator iterator = new MnistDataSetIterator(100, 10000);
while (iterator.hasNext())
{
final DataSet testingData = iterator.next();
evaluation.eval(testingData.getLabels(), model.output(testingData.getFeatureMatrix()));
}
System.out.println(evaluation.stats());
}
catch (IOException e)
{
e.printStackTrace();
}
return this;
}
示例5: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Override
@SuppressWarnings("rawtypes")
public Model evaluate()
{
final List<INDArray> testingFeatures = ((MnistData) data).getTestingFeatures();
final List<INDArray> testingLabels = ((MnistData) data).getTestingLabels();
final Evaluation evaluation = new Evaluation(parameters.getOutputSize());
for (int i = 0; i < testingFeatures.size(); i++)
{
evaluation.eval(testingLabels.get(i), model.output(testingFeatures.get(i)));
}
// evaluation.eval(testingLabels.get(0),
// model.output(testingFeatures.get(0)));
System.out.println(evaluation.stats());
return this;
}
示例6: testMLPMultiLayerBackprop
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testMLPMultiLayerBackprop() {
MultiLayerNetwork model = getDenseMLNConfig(true, false);
model.fit(iter);
MultiLayerNetwork model2 = getDenseMLNConfig(true, false);
model2.fit(iter);
iter.reset();
DataSet test = iter.next();
assertEquals(model.params(), model2.params());
Evaluation eval = new Evaluation();
INDArray output = model.output(test.getFeatureMatrix());
eval.eval(test.getLabels(), output);
double f1Score = eval.f1();
Evaluation eval2 = new Evaluation();
INDArray output2 = model2.output(test.getFeatureMatrix());
eval2.eval(test.getLabels(), output2);
double f1Score2 = eval2.f1();
assertEquals(f1Score, f1Score2, 1e-4);
}
示例7: testEvaluation
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testEvaluation() {
SparkDl4jMultiLayer sparkNet = getBasicNetwork();
MultiLayerNetwork netCopy = sparkNet.getNetwork().clone();
Evaluation evalExpected = new Evaluation();
INDArray outLocal = netCopy.output(input, Layer.TrainingMode.TEST);
evalExpected.eval(labels, outLocal);
Evaluation evalActual = sparkNet.evaluate(sparkData);
assertEquals(evalExpected.accuracy(), evalActual.accuracy(), 1e-3);
assertEquals(evalExpected.f1(), evalActual.f1(), 1e-3);
assertEquals(evalExpected.getNumRowCounter(), evalActual.getNumRowCounter(), 1e-3);
assertMapEquals(evalExpected.falseNegatives(), evalActual.falseNegatives());
assertMapEquals(evalExpected.falsePositives(), evalActual.falsePositives());
assertMapEquals(evalExpected.trueNegatives(), evalActual.trueNegatives());
assertMapEquals(evalExpected.truePositives(), evalActual.truePositives());
assertEquals(evalExpected.precision(), evalActual.precision(), 1e-3);
assertEquals(evalExpected.recall(), evalActual.recall(), 1e-3);
assertEquals(evalExpected.getConfusionMatrix(), evalActual.getConfusionMatrix());
}
示例8: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Override
public String evaluate(FederatedDataSet federatedDataSet) {
//evaluate the model on the test set
DataSet testData = (DataSet) federatedDataSet.getNativeDataSet();
double score = model.score(testData);
Evaluation eval = new Evaluation(numClasses);
INDArray output = model.output(testData.getFeatureMatrix());
eval.eval(testData.getLabels(), output);
return "Score: " + score;
}
示例9: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Override
public String evaluate(FederatedDataSet federatedDataSet) {
DataSet testData = (DataSet) federatedDataSet.getNativeDataSet();
List<DataSet> listDs = testData.asList();
DataSetIterator iterator = new ListDataSetIterator(listDs, BATCH_SIZE);
Evaluation eval = new Evaluation(OUTPUT_NUM); //create an evaluation object with 10 possible classes
while (iterator.hasNext()) {
DataSet next = iterator.next();
INDArray output = model.output(next.getFeatureMatrix()); //get the networks prediction
eval.eval(next.getLabels(), output); //check the prediction against the true class
}
return eval.stats();
}
示例10: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
public void evaluate() {
log.info("Evaluate model....");
Evaluation eval = new Evaluation(ConfigurationFactory.NUM_OUTPUTS);
while (m_testSet.hasNext()) {
DataSet ds = m_testSet.next();
INDArray output = m_model.output(ds.getFeatureMatrix(), false);
eval.eval(ds.getLabels(), output);
}
log.info(eval.stats());
m_testSet.reset();
}
示例11: evaluate
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Override
@SuppressWarnings("rawtypes")
public Model evaluate()
{
final DataSet testingData = ((IrisData) data).getTestingData();
final Evaluation evaluation = new Evaluation(parameters.getOutputSize());
evaluation.eval(testingData.getLabels(), model.output(testingData.getFeatureMatrix()));
System.out.println(evaluation.stats());
return this;
}
示例12: main
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
public static void main(String... args) throws Exception {
Options options = new Options();
options.addOption("i", "input", true, "The file with test data.");
options.addOption("m", "model", true, "Name of trained model file.");
CommandLine cmd = new BasicParser().parse(options, args);
String input = cmd.getOptionValue("i");
String modelName = cmd.getOptionValue("m");
if (cmd.hasOption("i") && cmd.hasOption("m")) {
MultiLayerNetwork model = ModelSerializer.restoreMultiLayerNetwork(modelName);
DataIterator<NormalizerStandardize> it = DataIterator.irisCsv(input);
RecordReaderDataSetIterator testData = it.getIterator();
NormalizerStandardize normalizer = it.getNormalizer();
normalizer.load(
new File(modelName + ".norm1"),
new File(modelName + ".norm2"),
new File(modelName + ".norm3"),
new File(modelName + ".norm4")
);
Evaluation eval = new Evaluation(3);
while (testData.hasNext()) {
DataSet ds = testData.next();
INDArray output = model.output(ds.getFeatureMatrix());
eval.eval(ds.getLabels(), output);
}
log.info(eval.stats());
} else {
log.error("Invalid arguments.");
new HelpFormatter().printHelp("Evaluate", options);
}
}
示例13: testIris2
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testIris2() {
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(1e-1))
.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.build();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
l.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
DataSetIterator iter = new IrisDataSetIterator(150, 150);
DataSet next = iter.next();
next.shuffle();
SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
trainTest.getTrain().normalizeZeroMeanZeroUnitVariance();
for( int i=0; i<10; i++ ) {
l.fit(trainTest.getTrain());
}
DataSet test = trainTest.getTest();
test.normalizeZeroMeanZeroUnitVariance();
Evaluation eval = new Evaluation();
INDArray output = l.output(test.getFeatureMatrix());
eval.eval(test.getLabels(), output);
log.info("Score " + eval.stats());
}
示例14: testWeightsDifferent
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testWeightsDifferent() {
Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
Nd4j.MAX_SLICES_TO_PRINT = Integer.MAX_VALUE;
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.miniBatch(false).seed(123)
.updater(new AdaGrad(1e-1))
.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
.weightInit(WeightInit.XAVIER)
.lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX).build())
.build();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
OutputLayer o = (OutputLayer) conf.getLayer().instantiate(conf, null, 0, params, true);
o.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
int numSamples = 150;
int batchSize = 150;
DataSetIterator iter = new IrisDataSetIterator(batchSize, numSamples);
DataSet iris = iter.next(); // Loads data into generator and format consumable for NN
iris.normalizeZeroMeanZeroUnitVariance();
o.setListeners(new ScoreIterationListener(1));
SplitTestAndTrain t = iris.splitTestAndTrain(0.8);
for( int i=0; i<1000; i++ ){
o.fit(t.getTrain());
}
log.info("Evaluate model....");
Evaluation eval = new Evaluation(3);
eval.eval(t.getTest().getLabels(), o.output(t.getTest().getFeatureMatrix(), true));
log.info(eval.stats());
}
示例15: testIris
import org.deeplearning4j.eval.Evaluation; //导入方法依赖的package包/类
@Test
public void testIris() {
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).updater(new Sgd(1e-1))
.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.build();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
l.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
DataSetIterator iter = new IrisDataSetIterator(150, 150);
DataSet next = iter.next();
next.shuffle();
SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
trainTest.getTrain().normalizeZeroMeanZeroUnitVariance();
for( int i=0; i<5; i++ ) {
l.fit(trainTest.getTrain());
}
DataSet test = trainTest.getTest();
test.normalizeZeroMeanZeroUnitVariance();
Evaluation eval = new Evaluation();
INDArray output = l.output(test.getFeatureMatrix());
eval.eval(test.getLabels(), output);
log.info("Score " + eval.stats());
}