本文整理汇总了Java中weka.classifiers.Evaluation.errorRate方法的典型用法代码示例。如果您正苦于以下问题:Java Evaluation.errorRate方法的具体用法?Java Evaluation.errorRate怎么用?Java Evaluation.errorRate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.classifiers.Evaluation
的用法示例。
在下文中一共展示了Evaluation.errorRate方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ensembleVote
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
public static double ensembleVote(Instances train, Classifier[] newCfsArray) {
double correctRate =0;
try {
int i;
Vote ensemble = new Vote();
SelectedTag tag = new SelectedTag(Vote.MAJORITY_VOTING_RULE,
Vote.TAGS_RULES);
ensemble.setCombinationRule(tag);
ensemble.setClassifiers(newCfsArray);
ensemble.setSeed(2);
ensemble.buildClassifier(train);
Evaluation eval = new Evaluation(train);
Random random = new Random(1000);
eval.crossValidateModel(ensemble, train, 5, random);
correctRate = 1 - eval.errorRate();
// setTempMatrixString(eval.toMatrixString());
// setTempClassDetailsString(eval.toClassDetailsString());
} catch (Exception e) {
e.printStackTrace();
}
return correctRate;
}
示例2: findParamsByCrossValidation
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
/**
* Finds the best parameter combination. (recursive for each parameter
* being optimised).
*
* @param depth the index of the parameter to be optimised at this level
* @param trainData the data the search is based on
* @param random a random number generator
* @throws Exception if an error occurs
*/
protected void findParamsByCrossValidation(int depth, Instances trainData,
Random random)
throws Exception {
if (depth < m_CVParams.size()) {
CVParameter cvParam = (CVParameter)m_CVParams.elementAt(depth);
double upper;
switch ((int)(cvParam.m_Lower - cvParam.m_Upper + 0.5)) {
case 1:
upper = m_NumAttributes;
break;
case 2:
upper = m_TrainFoldSize;
break;
default:
upper = cvParam.m_Upper;
break;
}
double increment = (upper - cvParam.m_Lower) / (cvParam.m_Steps - 1);
for(cvParam.m_ParamValue = cvParam.m_Lower;
cvParam.m_ParamValue <= upper;
cvParam.m_ParamValue += increment) {
findParamsByCrossValidation(depth + 1, trainData, random);
}
} else {
Evaluation evaluation = new Evaluation(trainData);
// Set the classifier options
String [] options = createOptions();
if (m_Debug) {
System.err.print("Setting options for "
+ m_Classifier.getClass().getName() + ":");
for (int i = 0; i < options.length; i++) {
System.err.print(" " + options[i]);
}
System.err.println("");
}
((OptionHandler)m_Classifier).setOptions(options);
for (int j = 0; j < m_NumFolds; j++) {
// We want to randomize the data the same way for every
// learning scheme.
Instances train = trainData.trainCV(m_NumFolds, j, new Random(1));
Instances test = trainData.testCV(m_NumFolds, j);
m_Classifier.buildClassifier(train);
evaluation.setPriors(train);
evaluation.evaluateModel(m_Classifier, test);
}
double error = evaluation.errorRate();
if (m_Debug) {
System.err.println("Cross-validated error rate: "
+ Utils.doubleToString(error, 6, 4));
}
if ((m_BestPerformance == -99) || (error < m_BestPerformance)) {
m_BestPerformance = error;
m_BestClassifierOptions = createOptions();
}
}
}
示例3: evaluateSubset
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
/**
* Evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be
* evaluated
* @return the error rate
* @throws Exception if the subset could not be evaluated
*/
public double evaluateSubset (BitSet subset)
throws Exception {
int i,j;
double errorRate = 0;
int numAttributes = 0;
Instances trainCopy=null;
Instances testCopy=null;
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the training instances
trainCopy = new Instances(m_trainingInstances);
if (!m_useTraining) {
if (m_holdOutInstances == null) {
throw new Exception("Must specify a set of hold out/test instances "
+"with -H");
}
// copy the test instances
testCopy = new Instances(m_holdOutInstances);
}
// count attributes set in the BitSet
for (i = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
if (!m_useTraining) {
testCopy = Filter.useFilter(testCopy, delTransform);
}
// build the classifier
m_Classifier.buildClassifier(trainCopy);
m_Evaluation = new Evaluation(trainCopy);
if (!m_useTraining) {
m_Evaluation.evaluateModel(m_Classifier, testCopy);
} else {
m_Evaluation.evaluateModel(m_Classifier, trainCopy);
}
if (m_trainingInstances.classAttribute().isNominal()) {
errorRate = m_Evaluation.errorRate();
} else {
errorRate = m_Evaluation.meanAbsoluteError();
}
m_Evaluation = null;
// return the negative of the error rate as search methods need to
// maximize something
return -errorRate;
}
示例4: evaluateSubset
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
/**
* Evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be
* evaluated
* @return the error rate
* @throws Exception if the subset could not be evaluated
*/
public double evaluateSubset (BitSet subset)
throws Exception {
double errorRate = 0;
double[] repError = new double[5];
int numAttributes = 0;
int i, j;
Random Rnd = new Random(m_seed);
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the instances
Instances trainCopy = new Instances(m_trainInstances);
// count attributes set in the BitSet
for (i = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
// max of 5 repititions ofcross validation
for (i = 0; i < 5; i++) {
m_Evaluation = new Evaluation(trainCopy);
m_Evaluation.crossValidateModel(m_BaseClassifier, trainCopy, m_folds, Rnd);
repError[i] = m_Evaluation.errorRate();
// check on the standard deviation
if (!repeat(repError, i + 1)) {
i++;
break;
}
}
for (j = 0; j < i; j++) {
errorRate += repError[j];
}
errorRate /= (double)i;
m_Evaluation = null;
return m_trainInstances.classAttribute().isNumeric() ? -errorRate : 1.0 - errorRate;
}
示例5: getErrorRate
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
/**
* Returns the misclassification error of the current model on a set of
* instances.
*
* @param data the set of instances
* @return the error rate
* @throws Exception if something goes wrong
*/
protected double getErrorRate(Instances data) throws Exception {
Evaluation eval = new Evaluation(data);
eval.evaluateModel(this, data);
return eval.errorRate();
}
示例6: getErrorRate
import weka.classifiers.Evaluation; //导入方法依赖的package包/类
/**
* Returns the misclassification error of the current model on a set of instances.
* @param data the set of instances
* @return the error rate
* @throws Exception if something goes wrong
*/
protected double getErrorRate(Instances data) throws Exception {
Evaluation eval = new Evaluation(data);
eval.evaluateModel(this,data);
return eval.errorRate();
}