本文整理汇总了Python中weka.classifiers.Evaluation.matrix方法的典型用法代码示例。如果您正苦于以下问题:Python Evaluation.matrix方法的具体用法?Python Evaluation.matrix怎么用?Python Evaluation.matrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.classifiers.Evaluation
的用法示例。
在下文中一共展示了Evaluation.matrix方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import matrix [as 别名]
data_dir = os.environ.get("WEKAMOOC_DATA")
if data_dir is None:
data_dir = "." + os.sep + "data"
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.classifiers import Classifier, Evaluation, PredictionOutput
from weka.core.classes import Random
import weka.plot.classifiers as plc
jvm.start()
# load weather.nominal
fname = data_dir + os.sep + "weather.nominal.arff"
print("\nLoading dataset: " + fname + "\n")
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(fname)
data.class_is_last()
# cross-validate NaiveBayes
cls = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
pout = PredictionOutput(classname="weka.classifiers.evaluation.output.prediction.PlainText", options=["-distribution"])
evl = Evaluation(data)
evl.crossvalidate_model(cls, data, 10, Random(1), pout)
print(evl.summary())
print(evl.matrix())
print(pout)
plc.plot_roc(evl, wait=True)
jvm.stop()
示例2: Classifier
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import matrix [as 别名]
trainData = loader.load_file('segment-challenge.arff')
trainData.class_is_last()
testData = loader.load_file('segment-test.arff')
testData.class_is_last()
# Default C4.5 tree
classifier = Classifier(classname="weka.classifiers.trees.J48")
# Search for the best parameters and build a classifier with them
classifier.build_classifier(trainData)
print("\n\n=========== Classifier information ================\n\n")
print(classifier.options)
print(classifier)
print("\n\n=========== Train results ================\n\n")
evaluation = Evaluation(trainData)
evaluation.test_model(classifier, trainData)
print(classifier.to_commandline())
print(evaluation.matrix())
print("Train recognition: %0.2f%%" % evaluation.percent_correct)
print("\n\n=========== Test results ================\n\n")
evaluation = Evaluation(testData)
evaluation.test_model(classifier, testData)
print(classifier.to_commandline())
print(evaluation.matrix())
print("Test recognition: %0.2f%%" % evaluation.percent_correct)
jvm.stop()
示例3: main
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import matrix [as 别名]
#.........这里部分代码省略.........
print(meta.to_commandline())
# direct FilteredClassifier instantiation
print("direct FilteredClassifier instantiation")
meta = FilteredClassifier()
meta.classifier = Classifier(classname="weka.classifiers.functions.LinearRegression")
flter = Filter("weka.filters.unsupervised.attribute.Remove")
flter.options = ["-R", "first"]
meta.filter = flter
print(meta.to_commandline())
# generic Vote
print("generic Vote instantiation")
meta = MultipleClassifiersCombiner(classname="weka.classifiers.meta.Vote")
classifiers = [
Classifier(classname="weka.classifiers.functions.SMO"),
Classifier(classname="weka.classifiers.trees.J48")
]
meta.classifiers = classifiers
print(meta.to_commandline())
# cross-validate nominal classifier
helper.print_title("Cross-validating NaiveBayes on diabetes")
diabetes_file = helper.get_data_dir() + os.sep + "diabetes.arff"
helper.print_info("Loading dataset: " + diabetes_file)
loader = Loader("weka.core.converters.ArffLoader")
diabetes_data = loader.load_file(diabetes_file)
diabetes_data.class_is_last()
classifier = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
pred_output = PredictionOutput(
classname="weka.classifiers.evaluation.output.prediction.PlainText", options=["-distribution"])
evaluation = Evaluation(diabetes_data)
evaluation.crossvalidate_model(classifier, diabetes_data, 10, Random(42), output=pred_output)
print(evaluation.summary())
print(evaluation.class_details())
print(evaluation.matrix())
print("areaUnderPRC/0: " + str(evaluation.area_under_prc(0)))
print("weightedAreaUnderPRC: " + str(evaluation.weighted_area_under_prc))
print("areaUnderROC/1: " + str(evaluation.area_under_roc(1)))
print("weightedAreaUnderROC: " + str(evaluation.weighted_area_under_roc))
print("avgCost: " + str(evaluation.avg_cost))
print("totalCost: " + str(evaluation.total_cost))
print("confusionMatrix: " + str(evaluation.confusion_matrix))
print("correct: " + str(evaluation.correct))
print("pctCorrect: " + str(evaluation.percent_correct))
print("incorrect: " + str(evaluation.incorrect))
print("pctIncorrect: " + str(evaluation.percent_incorrect))
print("unclassified: " + str(evaluation.unclassified))
print("pctUnclassified: " + str(evaluation.percent_unclassified))
print("coverageOfTestCasesByPredictedRegions: " + str(evaluation.coverage_of_test_cases_by_predicted_regions))
print("sizeOfPredictedRegions: " + str(evaluation.size_of_predicted_regions))
print("falseNegativeRate: " + str(evaluation.false_negative_rate(1)))
print("weightedFalseNegativeRate: " + str(evaluation.weighted_false_negative_rate))
print("numFalseNegatives: " + str(evaluation.num_false_negatives(1)))
print("trueNegativeRate: " + str(evaluation.true_negative_rate(1)))
print("weightedTrueNegativeRate: " + str(evaluation.weighted_true_negative_rate))
print("numTrueNegatives: " + str(evaluation.num_true_negatives(1)))
print("falsePositiveRate: " + str(evaluation.false_positive_rate(1)))
print("weightedFalsePositiveRate: " + str(evaluation.weighted_false_positive_rate))
print("numFalsePositives: " + str(evaluation.num_false_positives(1)))
print("truePositiveRate: " + str(evaluation.true_positive_rate(1)))
print("weightedTruePositiveRate: " + str(evaluation.weighted_true_positive_rate))
print("numTruePositives: " + str(evaluation.num_true_positives(1)))
print("fMeasure: " + str(evaluation.f_measure(1)))
print("weightedFMeasure: " + str(evaluation.weighted_f_measure))
print("unweightedMacroFmeasure: " + str(evaluation.unweighted_macro_f_measure))
print("unweightedMicroFmeasure: " + str(evaluation.unweighted_micro_f_measure))
print("precision: " + str(evaluation.precision(1)))
示例4: Loader
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import matrix [as 别名]
from utilities import *
import weka.core.jvm as jvm
from weka.core.converters import Loader, Saver
from weka.classifiers import Classifier, Evaluation
from weka.core.classes import Random
jvm.start(max_heap_size="3072m")
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file("./Dataset/trainGrid.arff")
data.class_is_last()
#classifier = Classifier(classname="weka.classifiers.trees.J48", options=["-C", "0.25", "-M", "2"])
classifier = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
evaluation = Evaluation(data)
#evaluation.crossvalidate_model(classifier, data, 10, Random(42))
evaluation.evaluate_train_test_split(classifier, data, 66, Random(42))
res = evaluation.summary()
res += "\n" + evaluation.matrix()
#f = open('./Dataset/resultsGrid.txt', 'w')
#f.write(res)
print res
jvm.stop()
示例5: process_classifier
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import matrix [as 别名]
def process_classifier(runType, cls, occ, devList, fewCats, label, subtract):
global devCount
global save_orig
global save_subtract
conf_matrix = {}
if occ:
table = 'temp_dat_occ_vector_occ'
else:
table = 'temp_dat_occ_vector_2'
writeStr = '=========================================================================================\n' + \
'Running ' + runType + ' classifier for \'' + label + '\''
sys.stdout.write(writeStr + '\r')
total_conf.write(writeStr + '\n')
sys.stdout.flush()
if runType == 'unseen':
i = 0
indiv_results = {}
for dev in devList:
devCount += 1
remaining = chop_microseconds(((datetime.utcnow() - item_start)*totalDevs/devCount)-(datetime.utcnow() - item_start))
sys.stdout.write('Running ' + runType + ' classifier for \'' + label + '\' - ' + \
str(round(100*float(devCount)/totalDevs,2)) + ' pct complete (' + str(remaining) + ' remaining) \r')
sys.stdout.flush()
if fewCats:
aws_c.execute('select * from ' + table + ' ' \
'where duty!=0 and deviceMAC not in (select * from vector_reject) ' \
'and deviceMAC in (select * from id_fewcats_mac) '
'and deviceMAC!=\'' + dev + '\';')
else:
aws_c.execute('select * from ' + table + ' ' \
'where duty!=0 and deviceMAC not in (select * from vector_reject) ' \
'and deviceMAC!=\'' + dev + '\';')
results = aws_c.fetchall()
# Generate type list
total_types = ['{']
for data in results:
if(data[-1] not in total_types):
total_types.append('\"')
total_types.append(data[-1])
total_types.append('\"')
total_types.append(',')
total_types[-1] = '}'
typeStr = ''.join(total_types)
arff_train = label + '_' + dev + '_train'
arff_test = label + '_' + dev + '_test'
gen_arff(arff_train, typeStr, results, occ, arff_idcol)
if fewCats:
aws_c.execute('select * from ' + table + ' ' \
'where duty!=0 and deviceMAC not in (select * from vector_reject) ' \
'and deviceMAC in (select * from id_fewcats_mac) '
'and deviceMAC=\'' + dev + '\';')
else:
aws_c.execute('select * from ' + table + ' ' \
'where duty!=0 and deviceMAC not in (select * from vector_reject) ' \
'and deviceMAC=\'' + dev + '\';')
gen_arff(arff_test, typeStr, aws_c.fetchall(), occ, arff_idcol)
train = loader.load_file(arff_train + '.arff')
train.class_is_last()
mv(arff_train + '.arff', master_saveDir)
test = loader.load_file(arff_test + '.arff')
test.class_is_last()
mv(arff_test + '.arff', master_saveDir)
cls.build_classifier(train)
# output predictions
testName = ''
predictions = []
for index, inst in enumerate(test):
if testName != '':
if testName != inst.get_string_value(inst.class_index):
print(str(testName) + ' ' + str(inst.get_string_value(inst.class_index)))
exit()
else:
testName = inst.get_string_value(inst.class_index)
else:
testName = inst.get_string_value(inst.class_index)
if testName not in conf_matrix:
conf_matrix[testName] = {}
pred = cls.classify_instance(inst)
# dist = cls.distribution_for_instance(inst)
# if(pred == inst.get_value(inst.class_index)):
predName = inst.class_attribute.value(int(pred))
if predName not in conf_matrix[testName]:
conf_matrix[testName][predName] = 0
conf_matrix[testName][predName] += 1
predictions.append(predName)
total = 0
#.........这里部分代码省略.........