本文整理匯總了Python中sklearn.metrics.cohen_kappa_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.cohen_kappa_score方法的具體用法?Python metrics.cohen_kappa_score怎麽用?Python metrics.cohen_kappa_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.cohen_kappa_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: inference_validation
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def inference_validation(self,test_X,test_y,model_save_dest,n_class=5,folds=5):
print(test_X.shape,test_y.shape)
pred = np.zeros(test_X.shape[0])
for k in range(1,folds + 1):
print(f'running inference on fold: {k}')
model = keras.models.load_model(model_save_dest[k])
pred = pred + model.predict(test_X)[:,0]
pred = pred
print(pred.shape)
print(pred)
pred = pred/float(folds)
pred_class = np.round(pred)
pred_class = np.array(pred_class,dtype=int)
pred_class = list(map(lambda x:4 if x > 4 else x,pred_class))
pred_class = list(map(lambda x:0 if x < 0 else x,pred_class))
act_class = test_y
accuracy = np.sum([pred_class == act_class])*1.0/len(test_X)
kappa = cohen_kappa_score(pred_class,act_class,weights='quadratic')
return pred_class,accuracy,kappa
示例2: class_wise_kappa
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def class_wise_kappa(true, pred, n_classes=None, ignore_zero=True):
from sklearn.metrics import cohen_kappa_score
if n_classes is None:
classes = np.unique(true)
else:
classes = np.arange(max(2, n_classes))
# Ignore background class?
if ignore_zero:
classes = classes[np.where(classes != 0)]
# Calculate kappa for all targets
kappa_scores = np.empty(shape=classes.shape, dtype=np.float32)
kappa_scores.fill(np.nan)
for idx, _class in enumerate(classes):
s1 = true == _class
s2 = pred == _class
if np.any(s1) or np.any(s2):
kappa_scores[idx] = cohen_kappa_score(s1, s2)
return kappa_scores
示例3: toy_cohens_kappa
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def toy_cohens_kappa():
# rater1 = [1, 1, 1, 0]
# rater2 = [1, 1, 0, 0]
# rater3 = [0, 1, 1]
rater1 = ['s', 's', 's', 'g', 'u']
rater2 = ['s', 's', 'g', 'g', 's']
taskdata = [[0, str(i), str(rater1[i])] for i in range(0, len(rater1))] + [
[1, str(i), str(rater2[i])] for i in range(0, len(rater2))] # + [
# [2, str(i), str(rater3[i])] for i in range(0, len(rater3))]
print(taskdata)
ratingtask = agreement.AnnotationTask(data=taskdata)
print("kappa " + str(ratingtask.kappa()))
print("fleiss " + str(ratingtask.multi_kappa()))
print("alpha " + str(ratingtask.alpha()))
print("scotts " + str(ratingtask.pi()))
print("sklearn kappa " + str(cohen_kappa_score(rater1, rater2)))
示例4: predict
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def predict(self):
"""
Predicts the model output, and computes precision, recall, and F1 score.
INPUT
model: Model trained in Keras
OUTPUT
Precision, Recall, and F1 score
"""
predictions = self.model.predict(self.X_test)
predictions = np.argmax(predictions, axis=1)
# predictions[predictions >=1] = 1 # Remove when non binary classifier
self.y_test = np.argmax(self.y_test, axis=1)
precision = precision_score(self.y_test, predictions, average="micro")
recall = recall_score(self.y_test, predictions, average="micro")
f1 = f1_score(self.y_test, predictions, average="micro")
cohen_kappa = cohen_kappa_score(self.y_test, predictions)
quad_kappa = kappa(self.y_test, predictions, weights='quadratic')
return precision, recall, f1, cohen_kappa, quad_kappa
示例5: evaluate
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def evaluate(source, source_batch):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
y_true = [] # true labels
y_pred = [] # predicted labels
hidden = model.init_hidden(args.bsz)
for i in range(len(source_batch)):
data, targets = get_batch(source, source_batch, i)
output, hidden = model(data, hidden)
total_loss += len(targets) * criterion(output[-1], targets).data
_, predicted = torch.max(output[-1], 1)
y_true.extend(targets.tolist())
y_pred.extend(predicted.tolist())
hidden = repackage_hidden(hidden)
val_loss = total_loss.item() / np.size(source_batch)
# Make report for the classfier
report = classification_report(y_true, y_pred, target_names=classes)
kappa = cohen_kappa_score(y_true, y_pred)
return val_loss, kappa, report
# Loop over epochs
示例6: evaluate
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def evaluate(source, source_batch):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
y_true = [] # true labels
y_pred = [] # predicted labels
for i in range(len(source_batch)):
data, targets = get_batch(source, source_batch, i)
outputs = model(data)
total_loss += len(targets) * criterion(outputs, targets).data
_, predicted = torch.max(outputs, 1)
y_true.extend(targets.tolist())
y_pred.extend(predicted.tolist())
val_loss = total_loss.item() / np.size(source_batch)
# Make report for the classfier
report = classification_report(y_true, y_pred, target_names=classes)
kappa = cohen_kappa_score(y_true, y_pred)
return val_loss, kappa, report
# Loop over epochs
示例7: quadratic_weighted_kappa
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def quadratic_weighted_kappa(y_pred, y_true):
if torch.is_tensor(y_pred):
y_pred = y_pred.data.cpu().numpy()
if torch.is_tensor(y_true):
y_true = y_true.data.cpu().numpy()
if y_pred.shape[1] == 1:
y_pred = y_pred[:, 0]
else:
y_pred = np.argmax(y_pred, axis=1)
return metrics.cohen_kappa_score(y_pred, y_true, weights='quadratic')
示例8: kappa_score
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def kappa_score(self):
return metrics.cohen_kappa_score(self.conditions, self.predictions)
示例9: test_cohen_kappa
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2,
weights="linear"), 0.9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2,
weights="quadratic"), 0.9541, decimal=4)
示例10: inference_validation
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def inference_validation(self,test_X,test_y,model_save_dest,n_class=5,folds=5):
pred = np.zeros((len(test_X),n_class))
for k in range(1,folds + 1):
model = keras.models.load_model(model_save_dest[k])
pred = pred + model.predict(test_X)
pred = pred/(1.0*folds)
pred_class = np.argmax(pred,axis=1)
act_class = np.argmax(test_y,axis=1)
accuracy = np.sum([pred_class == act_class])*1.0/len(test_X)
kappa = cohen_kappa_score(pred_class,act_class,weights='quadratic')
return pred_class,accuracy,kappa
示例11: main
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def main(self):
start_time = time.time()
print('Data Processing..')
self.num_class = len(self.class_folders)
model_to_store_path,class_dict = self.train_model(self.train_dir,self.val_dir,n_fold=self.folds,batch_size=self.batch_size,
epochs=self.epochs,dim=self.dim,lr=self.lr,model=self.model)
print("Model saved to dest:",model_to_store_path)
# Validatione evaluate results
folder_path = Path(f'{self.val_dir}')
val_results_df = self.inference(model_to_store_path,folder_path,class_dict,self.dim)
val_results_path = f'{self.outdir}/val_results.csv'
val_results_df.to_csv(val_results_path,index=False)
print(f'Validation results saved at : {val_results_path}')
pred_class_index = np.array(val_results_df['pred_class_index'].values)
actual_class_index = np.array(val_results_df['actual_class_index'].values)
print(pred_class_index)
print(actual_class_index)
accuracy = np.mean(actual_class_index == pred_class_index)
kappa = cohen_kappa_score(pred_class_index,actual_class_index,weights='quadratic')
#print("-----------------------------------------------------")
print(f'Validation Accuracy: {accuracy}')
print(f'Validation Quadratic Kappa Score: {kappa}')
#print("-----------------------------------------------------")
#print("Processing Time",time.time() - start_time,' secs')
示例12: reports
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def reports(y_pred, y_test):
classification = classification_report(y_test, y_pred)
oa = accuracy_score(y_test, y_pred)
confusion = confusion_matrix(y_test, y_pred)
each_acc, aa = AA_andEachClassAccuracy(confusion)
kappa = cohen_kappa_score(y_test, y_pred)
return classification, confusion, np.array([oa, aa, kappa] + list(each_acc)) * 100
示例13: calculate_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def calculate_metrics(val_results_dict, y_pred, y_val, suffix=""):
tmp_kappa_list = []
tmp_accur_list = []
tmp_f1_list = []
tmp_cm_list = []
y_val = utils.to_categorical(y_val)[:,-1]
for each_threshold in np.linspace(0.1, 0.9, 17):
tmp_pred = [1 if _ >= each_threshold else 0 for _ in y_pred]
tmp_kappa_list.append(cohen_kappa_score(tmp_pred, y_val))
tmp_accur_list.append(accuracy_score(tmp_pred, y_val))
tmp_f1_list.append(f1_score(tmp_pred, y_val))
tmp_cm_list.append(competitionMetric(tmp_pred, y_val))
auroc = round(roc_auc_score(y_val, y_pred), 3)
kappa = round(np.max(tmp_kappa_list), 3)
accur = round(np.max(tmp_accur_list), 3)
cm = round(np.max(tmp_cm_list), 3)
f1 = round(np.max(tmp_f1_list), 3)
val_results_dict["auc{}".format(suffix)].append(auroc)
val_results_dict["kap{}".format(suffix)].append(kappa)
val_results_dict["acc{}".format(suffix)].append(accur)
val_results_dict["f1{}".format(suffix)].append(f1)
val_results_dict["cm{}".format(suffix)].append(cm)
kappa_threshold = np.linspace(0.1,0.9,17)[tmp_kappa_list.index(np.max(tmp_kappa_list))]
accur_threshold = np.linspace(0.1,0.9,17)[tmp_accur_list.index(np.max(tmp_accur_list))]
f1_threshold = np.linspace(0.1,0.9,17)[tmp_f1_list.index(np.max(tmp_f1_list))]
cm_threshold = np.linspace(0.1,0.9,17)[tmp_cm_list.index(np.max(tmp_cm_list))]
val_results_dict["threshold_kap{}".format(suffix)].append(round(kappa_threshold, 2))
val_results_dict["threshold_acc{}".format(suffix)].append(round(accur_threshold, 2))
val_results_dict["threshold_f1{}".format(suffix)].append(round(f1_threshold, 2))
val_results_dict["threshold_cm{}".format(suffix)].append(round(cm_threshold, 2))
return val_results_dict
示例14: _kappa_score
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def _kappa_score(self):
png_file = self.scalars(
{'kappa_score': cohen_kappa_score(self.targets, self.predicts, weights='quadratic')}, 'kappa_score'
)
if png_file:
self.update_sheet('kappa_score', {'raw': png_file, 'processor': 'upload_image'})
示例15: print_metrics_regression
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import cohen_kappa_score [as 別名]
def print_metrics_regression(y_true, predictions, verbose=1):
predictions = np.array(predictions)
predictions = np.maximum(predictions, 0).flatten()
y_true = np.array(y_true)
y_true_bins = [get_bin_custom(x, CustomBins.nbins) for x in y_true]
prediction_bins = [get_bin_custom(x, CustomBins.nbins) for x in predictions]
cf = metrics.confusion_matrix(y_true_bins, prediction_bins)
if verbose:
print("Custom bins confusion matrix:")
print(cf)
kappa = metrics.cohen_kappa_score(y_true_bins, prediction_bins,
weights='linear')
mad = metrics.mean_absolute_error(y_true, predictions)
mse = metrics.mean_squared_error(y_true, predictions)
mape = mean_absolute_percentage_error(y_true, predictions)
if verbose:
print("Mean absolute deviation (MAD) = {}".format(mad))
print("Mean squared error (MSE) = {}".format(mse))
print("Mean absolute percentage error (MAPE) = {}".format(mape))
print("Cohen kappa score = {}".format(kappa))
return {"mad": mad,
"mse": mse,
"mape": mape,
"kappa": kappa}