本文整理汇总了Python中weka.classifiers.Evaluation.f_measure方法的典型用法代码示例。如果您正苦于以下问题:Python Evaluation.f_measure方法的具体用法?Python Evaluation.f_measure怎么用?Python Evaluation.f_measure使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.classifiers.Evaluation
的用法示例。
在下文中一共展示了Evaluation.f_measure方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import f_measure [as 别名]
#.........这里部分代码省略.........
classname="weka.classifiers.evaluation.output.prediction.PlainText", options=["-distribution"])
evaluation = Evaluation(diabetes_data)
evaluation.crossvalidate_model(classifier, diabetes_data, 10, Random(42), output=pred_output)
print(evaluation.summary())
print(evaluation.class_details())
print(evaluation.matrix())
print("areaUnderPRC/0: " + str(evaluation.area_under_prc(0)))
print("weightedAreaUnderPRC: " + str(evaluation.weighted_area_under_prc))
print("areaUnderROC/1: " + str(evaluation.area_under_roc(1)))
print("weightedAreaUnderROC: " + str(evaluation.weighted_area_under_roc))
print("avgCost: " + str(evaluation.avg_cost))
print("totalCost: " + str(evaluation.total_cost))
print("confusionMatrix: " + str(evaluation.confusion_matrix))
print("correct: " + str(evaluation.correct))
print("pctCorrect: " + str(evaluation.percent_correct))
print("incorrect: " + str(evaluation.incorrect))
print("pctIncorrect: " + str(evaluation.percent_incorrect))
print("unclassified: " + str(evaluation.unclassified))
print("pctUnclassified: " + str(evaluation.percent_unclassified))
print("coverageOfTestCasesByPredictedRegions: " + str(evaluation.coverage_of_test_cases_by_predicted_regions))
print("sizeOfPredictedRegions: " + str(evaluation.size_of_predicted_regions))
print("falseNegativeRate: " + str(evaluation.false_negative_rate(1)))
print("weightedFalseNegativeRate: " + str(evaluation.weighted_false_negative_rate))
print("numFalseNegatives: " + str(evaluation.num_false_negatives(1)))
print("trueNegativeRate: " + str(evaluation.true_negative_rate(1)))
print("weightedTrueNegativeRate: " + str(evaluation.weighted_true_negative_rate))
print("numTrueNegatives: " + str(evaluation.num_true_negatives(1)))
print("falsePositiveRate: " + str(evaluation.false_positive_rate(1)))
print("weightedFalsePositiveRate: " + str(evaluation.weighted_false_positive_rate))
print("numFalsePositives: " + str(evaluation.num_false_positives(1)))
print("truePositiveRate: " + str(evaluation.true_positive_rate(1)))
print("weightedTruePositiveRate: " + str(evaluation.weighted_true_positive_rate))
print("numTruePositives: " + str(evaluation.num_true_positives(1)))
print("fMeasure: " + str(evaluation.f_measure(1)))
print("weightedFMeasure: " + str(evaluation.weighted_f_measure))
print("unweightedMacroFmeasure: " + str(evaluation.unweighted_macro_f_measure))
print("unweightedMicroFmeasure: " + str(evaluation.unweighted_micro_f_measure))
print("precision: " + str(evaluation.precision(1)))
print("weightedPrecision: " + str(evaluation.weighted_precision))
print("recall: " + str(evaluation.recall(1)))
print("weightedRecall: " + str(evaluation.weighted_recall))
print("kappa: " + str(evaluation.kappa))
print("KBInformation: " + str(evaluation.kb_information))
print("KBMeanInformation: " + str(evaluation.kb_mean_information))
print("KBRelativeInformation: " + str(evaluation.kb_relative_information))
print("SFEntropyGain: " + str(evaluation.sf_entropy_gain))
print("SFMeanEntropyGain: " + str(evaluation.sf_mean_entropy_gain))
print("SFMeanPriorEntropy: " + str(evaluation.sf_mean_prior_entropy))
print("SFMeanSchemeEntropy: " + str(evaluation.sf_mean_scheme_entropy))
print("matthewsCorrelationCoefficient: " + str(evaluation.matthews_correlation_coefficient(1)))
print("weightedMatthewsCorrelation: " + str(evaluation.weighted_matthews_correlation))
print("class priors: " + str(evaluation.class_priors))
print("numInstances: " + str(evaluation.num_instances))
print("meanAbsoluteError: " + str(evaluation.mean_absolute_error))
print("meanPriorAbsoluteError: " + str(evaluation.mean_prior_absolute_error))
print("relativeAbsoluteError: " + str(evaluation.relative_absolute_error))
print("rootMeanSquaredError: " + str(evaluation.root_mean_squared_error))
print("rootMeanPriorSquaredError: " + str(evaluation.root_mean_prior_squared_error))
print("rootRelativeSquaredError: " + str(evaluation.root_relative_squared_error))
print("prediction output:\n" + str(pred_output))
plot_cls.plot_roc(
evaluation, title="ROC diabetes",
class_index=range(0, diabetes_data.class_attribute.num_values), wait=False)
plot_cls.plot_prc(
evaluation, title="PRC diabetes",
class_index=range(0, diabetes_data.class_attribute.num_values), wait=False)
示例2: classify_and_save
# 需要导入模块: from weka.classifiers import Evaluation [as 别名]
# 或者: from weka.classifiers.Evaluation import f_measure [as 别名]
def classify_and_save(classifier, name, outfile):
random.seed("ML349")
csv_header = [
"Game Name",
"SteamID",
"Algorithm",
"Number Players",
"%Players of Training Set",
"Accuracy",
"Precision (0)",
"Recall (0)",
"F1 (0)",
"Precision (1)",
"Recall (1)",
"F1 (1)"
]
game_results = []
with open("data/games_by_username_all.csv", "r") as f:
game_list = f.next().rstrip().split(",")
loader = Loader(classname="weka.core.converters.ArffLoader")
train = loader.load_file("data/final_train.arff")
test = loader.load_file("data/final_test.arff")
count = 0
for i in itertools.chain(xrange(0, 50), random.sample(xrange(50, len(game_list)), 450)):
train.class_index = i
test.class_index = i
count += 1
classifier.build_classifier(train)
evaluation = Evaluation(train)
evaluation.test_model(classifier, test)
confusion = evaluation.confusion_matrix
num_players = sum(confusion[1])
steam_id = repr(train.class_attribute).split(" ")[1]
result = [
game_list[i],
steam_id,
name,
int(num_players),
num_players/1955,
evaluation.percent_correct,
evaluation.precision(0),
evaluation.recall(0),
evaluation.f_measure(0),
evaluation.precision(1),
evaluation.recall(1),
evaluation.f_measure(1)
]
game_results.append(result)
print "\nResult #{2}/500 for {0} (SteamID {1}):".format(game_list[i], steam_id, count),
print evaluation.summary()
with open(outfile, "wb") as f:
csv_writer = csv.writer(f, delimiter=",")
csv_writer.writerow(csv_header)
for r in game_results:
csv_writer.writerow(r)