当前位置: 首页>>代码示例>>Java>>正文


Java Evaluation类代码示例

本文整理汇总了Java中weka.classifiers.Evaluation的典型用法代码示例。如果您正苦于以下问题:Java Evaluation类的具体用法?Java Evaluation怎么用?Java Evaluation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Evaluation类属于weka.classifiers包,在下文中一共展示了Evaluation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: useClassifier

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
 * uses the meta-classifier
 */
protected static void useClassifier(Instances data) throws Exception {
    System.out.println("\n1. Meta-classfier");
    AttributeSelectedClassifier classifier = new AttributeSelectedClassifier();
    CfsSubsetEval eval = new CfsSubsetEval();
    //GreedyStepwise search = new GreedyStepwise();
    GeneticSearch search = new GeneticSearch();
    //	search.setSearchBackwards(false);
    RandomForest base = new RandomForest();
    classifier.setClassifier(base);
    System.out.println("Set the classifier : " + base.toString());
    classifier.setEvaluator(eval);
    System.out.println("Set the evaluator : " + eval.toString());
    //	classifier.setSearch( search );
    System.out.println("Set the search : " + search.toString());
    Evaluation evaluation = new Evaluation(data);
    evaluation.crossValidateModel(classifier, data, 10, new Random(1));
    System.out.println(evaluation.toSummaryString());
}
 
开发者ID:ajaybhat,项目名称:Essay-Grading-System,代码行数:22,代码来源:AttributeSelectionRunner.java

示例2: performTestSetEvaluation

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
 * Splits the dataset between training set and test set according with the percentage given.
 * < br/>Then, build the classifier based on the training set and apply to predict the test set.
 * @param dataset
 * Dataset to be divided
 * @param percentageSplit
 * Rate of split
 * @return
 * An Evaluation Object with the results
 * @throws Exception
 */
public Evaluation[] performTestSetEvaluation(Instances dataset, int percentageSplit) throws Exception{
	int trainSetSize = Math.round((dataset.numInstances() * percentageSplit)/100);
	int testSetSize = dataset.numInstances() - trainSetSize;

	dataset = randomizeSet(dataset);
	trainingSet = new Instances(dataset, 0, trainSetSize);
	testingSet = new Instances(dataset, trainSetSize, testSetSize);

	for(int i = 0;i < cls.length;i++){
		cls[i].buildClassifier(trainingSet);
		eval[i] = new Evaluation(trainingSet);
		eval[i].evaluateModel(cls[i], testingSet);
	}

	return eval;
}
 
开发者ID:a-n-d-r-e-i,项目名称:seagull,代码行数:28,代码来源:Classification.java

示例3: crossValidate

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
  * Utility method for fast 5-fold cross validation of a naive bayes
  * model
  *
  * @param fullModel a <code>NaiveBayesUpdateable</code> value
  * @param trainingSet an <code>Instances</code> value
  * @param r a <code>Random</code> value
  * @return a <code>double</code> value
  * @exception Exception if an error occurs
  */
 public static double crossValidate(NaiveBayesUpdateable fullModel,
		       Instances trainingSet,
		       Random r) throws Exception {
   // make some copies for fast evaluation of 5-fold xval
   Classifier [] copies = AbstractClassifier.makeCopies(fullModel, 5);
   Evaluation eval = new Evaluation(trainingSet);
   // make some splits
   for (int j = 0; j < 5; j++) {
     Instances test = trainingSet.testCV(5, j);
     // unlearn these test instances
     for (int k = 0; k < test.numInstances(); k++) {
test.instance(k).setWeight(-test.instance(k).weight());
((NaiveBayesUpdateable)copies[j]).updateClassifier(test.instance(k));
// reset the weight back to its original value
test.instance(k).setWeight(-test.instance(k).weight());
     }
     eval.evaluateModel(copies[j], test);
   }
   return eval.incorrect();
 }
 
开发者ID:dsibournemouth,项目名称:autoweka,代码行数:31,代码来源:NBTreeNoSplit.java

示例4: trainRandomForest

import weka.classifiers.Evaluation; //导入依赖的package包/类
public static void trainRandomForest(final Instances trainingSet) throws Exception {
        // Create a classifier
        final RandomForest tree = new RandomForest();
        tree.buildClassifier(trainingSet);

        // Test the model
        final Evaluation eval = new Evaluation(trainingSet);
//        eval.crossValidateModel(tree, trainingSet, 10, new Random(1));
        eval.evaluateModel(tree, trainingSet);

        // Print the result à la Weka explorer:
        logger.info(eval.toSummaryString());
        logger.info(eval.toMatrixString());
        logger.info(tree.toString());
    }
 
开发者ID:cobr123,项目名称:VirtaMarketAnalyzer,代码行数:16,代码来源:RetailSalePrediction.java

示例5: modelErrors

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
    *Updates the numIncorrectModel field for all nodes. This is needed for calculating the alpha-values. 
    */
   public void modelErrors() throws Exception{
	
Evaluation eval = new Evaluation(m_train);
	
if (!m_isLeaf) {
    m_isLeaf = true;
    eval.evaluateModel(this, m_train);
    m_isLeaf = false;
    m_numIncorrectModel = eval.incorrect();
    for (int i = 0; i < m_sons.length; i++) m_sons[i].modelErrors();
} else {
    eval.evaluateModel(this, m_train);
    m_numIncorrectModel = eval.incorrect();
}
   }
 
开发者ID:dsibournemouth,项目名称:autoweka,代码行数:19,代码来源:LMTNode.java

示例6: crossValidate

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
  * Utility method for fast 5-fold cross validation of a naive bayes
  * model
  *
  * @param fullModel a <code>NaiveBayesUpdateable</code> value
  * @param trainingSet an <code>Instances</code> value
  * @param r a <code>Random</code> value
  * @return a <code>double</code> value
  * @exception Exception if an error occurs
  */
 public static double crossValidate(NaiveBayesUpdateable fullModel,
		       Instances trainingSet,
		       Random r) throws Exception {
   // make some copies for fast evaluation of 5-fold xval
   Classifier [] copies = Classifier.makeCopies(fullModel, 5);
   Evaluation eval = new Evaluation(trainingSet);
   // make some splits
   for (int j = 0; j < 5; j++) {
     Instances test = trainingSet.testCV(5, j);
     // unlearn these test instances
     for (int k = 0; k < test.numInstances(); k++) {
test.instance(k).setWeight(-test.instance(k).weight());
((NaiveBayesUpdateable)copies[j]).updateClassifier(test.instance(k));
// reset the weight back to its original value
test.instance(k).setWeight(-test.instance(k).weight());
     }
     eval.evaluateModel(copies[j], test);
   }
   return eval.incorrect();
 }
 
开发者ID:williamClanton,项目名称:jbossBA,代码行数:31,代码来源:NBTreeNoSplit.java

示例7: Classification

import weka.classifiers.Evaluation; //导入依赖的package包/类
public Classification(ArrayList<ClassifierType> cType) {

		cls = new Classifier[cType.size()];
		eval = new Evaluation[cType.size()];

		for(int i = 0; i < cType.size();i++){			
			switch(cType.get(i)){
			// TODO Will we use J48 or ID3 implementation of decision trees?
			case J48:
				cls[i] = new J48();
				break;
			case NAIVE_BAYES:
				// If bType == Incremental then cls = new UpdateableNaiveBayes(); else
				cls[i] = new NaiveBayes();
				break;
			case IBK:
				cls[i] = new IBk();
				break;
			case COSINE:
				cls[i] = useCosine();
				// TODO Add other cases: Decision Rule, KNN and so on.
			}
		}
	}
 
开发者ID:a-n-d-r-e-i,项目名称:seagull,代码行数:25,代码来源:Classification.java

示例8: Main

import weka.classifiers.Evaluation; //导入依赖的package包/类
public Main() {
    try {
        BufferedReader datafile;
        datafile = readDataFile("camping.txt");
        Instances data = new Instances(datafile);
        data.setClassIndex(data.numAttributes() - 1);

        Instances trainingData = new Instances(data, 0, 14);
        Instances testingData = new Instances(data, 14, 5);
        Evaluation evaluation = new Evaluation(trainingData);

        SMO smo = new SMO();
        smo.buildClassifier(data);

        evaluation.evaluateModel(smo, testingData);
        System.out.println(evaluation.toSummaryString());

        // Test instance 
        Instance instance = new DenseInstance(3);
        instance.setValue(data.attribute("age"), 78);
        instance.setValue(data.attribute("income"), 125700);
        instance.setValue(data.attribute("camps"), 1);            
        instance.setDataset(data);
        System.out.println("The instance: " + instance);
        System.out.println(smo.classifyInstance(instance));
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:30,代码来源:Main-SVG.java

示例9: writeCrossValidationResults

import weka.classifiers.Evaluation; //导入依赖的package包/类
@TimeThis(task="write-results", category=TimerCategory.EXPORT)
protected void writeCrossValidationResults(ProcessingContext<Corpus> ctx, TargetStream evaluationFile, Evaluation evaluation, String[] classes) throws Exception {
	Logger logger = getLogger(ctx);
       logger.info("writing test results into " + evaluationFile.getName());
       try (PrintStream out = evaluationFile.getPrintStream()) {
       	for (int i = 0; i < classes.length; ++i) {
       		out.printf("Results for class %d (%s):\n", i, classes[i]);
       		out.printf("  True positives : %8.0f\n", evaluation.numTruePositives(i));
       		out.printf("  False positives: %8.0f\n", evaluation.numFalsePositives(i));
       		out.printf("  True negatives : %8.0f\n", evaluation.numTrueNegatives(i));
       		out.printf("  False negatives: %8.0f\n", evaluation.numFalseNegatives(i));
       		out.printf("  Recall:    %6.4f\n", evaluation.recall(i));
       		out.printf("  Precision: %6.4f\n", evaluation.precision(i));
       		out.printf("  F-Measure: %6.4f\n", evaluation.fMeasure(i));
       		out.println();
       	}
       	out.println(evaluation.toMatrixString("Confusion matrix:"));
       }
}
 
开发者ID:Bibliome,项目名称:alvisnlp,代码行数:20,代码来源:WekaTrain.java

示例10: evaluateResults

import weka.classifiers.Evaluation; //导入依赖的package包/类
public static void evaluateResults(Evaluation evaluation) {

        for (Prediction p : evaluation.predictions()) {
            System.out.println(p.actual() + " " + p.predicted());
        }
        System.out.println(evaluation.toSummaryString("\nResults\n======\n", true));
        //  System.out.println(evaluation.toSummaryString(evaluation.correlationCoefficient() + " " + evaluation.errorRate() + " " + evaluation.meanAbsoluteError() + " ");

    }
 
开发者ID:gizemsogancioglu,项目名称:biosses,代码行数:10,代码来源:LinearRegressionMethod.java

示例11: evaluate

import weka.classifiers.Evaluation; //导入依赖的package包/类
public static void evaluate(Classifier clf, Instances data, double minPerfomance)
    throws Exception {
  Instances[] split = TestUtil.splitTrainTest(data);

  Instances train = split[0];
  Instances test = split[1];

  clf.buildClassifier(train);
  Evaluation trainEval = new Evaluation(train);
  trainEval.evaluateModel(clf, train);

  Evaluation testEval = new Evaluation(train);
  testEval.evaluateModel(clf, test);

  final double testPctCorrect = testEval.pctCorrect();
  final double trainPctCorrect = trainEval.pctCorrect();

  log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect);
  boolean success =
      testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance;
  Assert.assertTrue(success);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:23,代码来源:StabilityTest.java

示例12: holdout

import weka.classifiers.Evaluation; //导入依赖的package包/类
/**
 * Perform simple holdout with a given percentage
 *
 * @param clf Classifier
 * @param data Full dataset
 * @param p Split percentage
 * @throws Exception
 */
public static void holdout(Classifier clf, Instances data, double p) throws Exception {
  Instances[] split = splitTrainTest(data, p);

  Instances train = split[0];
  Instances test = split[1];

  clf.buildClassifier(train);
  Evaluation trainEval = new Evaluation(train);
  trainEval.evaluateModel(clf, train);
  logger.info("Weka Train Evaluation:");
  logger.info(trainEval.toSummaryString());
  if (!data.classAttribute().isNumeric()) {
    logger.info(trainEval.toMatrixString());
  }

  Evaluation testEval = new Evaluation(train);
  logger.info("Weka Test Evaluation:");
  testEval.evaluateModel(clf, test);
  logger.info(testEval.toSummaryString());
  if (!data.classAttribute().isNumeric()) {
    logger.info(testEval.toMatrixString());
  }
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:32,代码来源:TestUtil.java

示例13: getErrorPercent

import weka.classifiers.Evaluation; //导入依赖的package包/类
@Override
public double getErrorPercent() {
    this.splitInstances();

    try {
        this.getClassifier().buildClassifier(getTrainInstances());

        Evaluation eval = new Evaluation(getTestInstances());
        eval.evaluateModel(getClassifier(), getTestInstances());

        return eval.pctIncorrect();

    } catch (Exception e) {
        e.printStackTrace();
        return -1;
    }
}
 
开发者ID:garciparedes,项目名称:java-examples,代码行数:18,代码来源:AbstractSplitEstimator.java

示例14: trainRandomCommittee

import weka.classifiers.Evaluation; //导入依赖的package包/类
public static void trainRandomCommittee(final Instances trainingSet) throws Exception {
        logger.info("Create a classifier");
        final RandomTree classifier = new RandomTree();
        classifier.setKValue(0);
        classifier.setMaxDepth(0);
        classifier.setMinNum(0.001);
        classifier.setAllowUnclassifiedInstances(false);
        classifier.setNumFolds(0);

        final RandomCommittee tree = new RandomCommittee();
        tree.setClassifier(classifier);
        tree.setNumIterations(10);
        tree.buildClassifier(trainingSet);

        logger.info("Test the model");
        final Evaluation eval = new Evaluation(trainingSet);
//        eval.crossValidateModel(tree, trainingSet, 10, new Random(1));
        eval.evaluateModel(tree, trainingSet);

        // Print the result à la Weka explorer:
        logger.info(eval.toSummaryString());
        logger.info(tree.toString());
        logger.info(eval.toMatrixString());
        logger.info(eval.toClassDetailsString());
        logger.info(eval.toCumulativeMarginDistributionString());

//        logger.info("coefficients");
//        for(int i = 0; i < tree.coefficients().length; ++i){
//            logger.info("{} | {}", trainingSet.attribute(i).name(), tree.coefficients()[i]);
//        }

//        try {
//            final File file = new File(GitHubPublisher.localPath + RetailSalePrediction.predict_retail_sales + File.separator + "prediction_set_script.js");
//            FileUtils.writeStringToFile(file, ClassifierToJs.compress(ClassifierToJs.toSource(tree, "predictCommonBySet")), "UTF-8");
//        } catch (final Exception e) {
//            logger.error(e.getLocalizedMessage(), e);
//        }
    }
 
开发者ID:cobr123,项目名称:VirtaMarketAnalyzer,代码行数:39,代码来源:RetailSalePrediction.java

示例15: trainDecisionTable

import weka.classifiers.Evaluation; //导入依赖的package包/类
public static void trainDecisionTable(final Instances trainingSet) throws Exception {
        // Create a classifier
        final DecisionTable tree = new DecisionTable();
        tree.buildClassifier(trainingSet);

        // Test the model
        final Evaluation eval = new Evaluation(trainingSet);
//        eval.crossValidateModel(tree, trainingSet, 10, new Random(1));
        eval.evaluateModel(tree, trainingSet);

        // Print the result à la Weka explorer:
        logger.info(eval.toSummaryString());
        logger.info(tree.toString());

//        try {
//            final File file = new File(GitHubPublisher.localPath + RetailSalePrediction.predict_retail_sales + File.separator + "prediction_set_script.js");
//            FileUtils.writeStringToFile(file, ClassifierToJs.compress(ClassifierToJs.toSource(tree, "predictCommonBySet")), "UTF-8");
//        } catch (final Exception e) {
//            logger.error(e.getLocalizedMessage(), e);
//        }
    }
 
开发者ID:cobr123,项目名称:VirtaMarketAnalyzer,代码行数:22,代码来源:RetailSalePrediction.java


注:本文中的weka.classifiers.Evaluation类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。