本文整理汇总了Python中sklearn.metrics.log_loss方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.log_loss方法的具体用法?Python metrics.log_loss怎么用?Python metrics.log_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.log_loss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: multi_class_classification
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def multi_class_classification(data_X,data_Y):
'''
calculate multi-class classification and return related evaluation metrics
'''
svc = svm.SVC(C=1, kernel='linear')
# X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0)
clf = svc.fit(data_X, data_Y) #svm
# array = svc.coef_
# print array
predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
print "accuracy",metrics.accuracy_score(data_Y, predicted)
print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro')
print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro')
print "precision score",metrics.precision_score(data_Y, predicted, average='macro')
print "recall score",metrics.recall_score(data_Y, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
print "classification_report", metrics.classification_report(data_Y, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
# print "log_loss", metrics.log_loss(data_Y, predicted)
print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
# print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
# print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted)
示例2: evaluation_analysis
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def evaluation_analysis(true_label,predicted):
'''
return all metrics results
'''
print "accuracy",metrics.accuracy_score(true_label, predicted)
print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')
print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro')
print "precision score",metrics.precision_score(true_label, predicted, average='macro')
print "recall score",metrics.recall_score(true_label, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(true_label, predicted)
print "classification_report", metrics.classification_report(true_label, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
print "log_loss", metrics.log_loss(true_label, predicted)
print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted)
示例3: evaluate
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def evaluate(val_df, clf):
incorrect_session = {}
val_df['scores'] = clf.predict(val_df.drop(data_drop_columns, axis=1))
loss = log_loss(val_df.label.values, val_df.scores.values)
grouped_val = val_df.groupby('session_id')
rss_group = {i:[] for i in range(1,26)}
rss = []
for session_id, group in grouped_val:
scores = group.scores
sorted_arg = np.flip(np.argsort(scores))
rss.append( group['label'].values[sorted_arg])
rss_group[len(group)].append(group['label'].values[sorted_arg])
if group['label'].values[sorted_arg][0] != 1:
incorrect_session[session_id] = (sorted_arg.values, group['label'].values[sorted_arg])
mrr = compute_mean_reciprocal_rank(rss)
mrr_group = {i:(len(rss_group[i]), compute_mean_reciprocal_rank(rss_group[i])) for i in range(1,26)}
print(mrr_group)
if not configuration.debug:
pickle.dump( incorrect_session, open(f'../output/{model_name}_val_incorrect_order.p','wb'))
return mrr, mrr_group, loss
示例4: test_logreg_predict_proba_multinomial
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
示例5: test_warm_start_converge_LR
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def test_warm_start_converge_LR():
# Test to see that the logistic regression converges on warm start,
# with multi_class='multinomial'. Non-regressive test for #10836
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = np.array([1] * 100 + [-1] * 100)
lr_no_ws = LogisticRegression(multi_class='multinomial',
solver='sag', warm_start=False,
random_state=0)
lr_ws = LogisticRegression(multi_class='multinomial',
solver='sag', warm_start=True,
random_state=0)
lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X))
for i in range(5):
lr_ws.fit(X, y)
lr_ws_loss = log_loss(y, lr_ws.predict_proba(X))
assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5)
示例6: get_intercept_loss
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def get_intercept_loss(self, model, data):
y = np.array([x[1] for x in data.mapValues(lambda v: v.label).collect()])
X = np.ones((len(y), 1))
if model.model_name == 'HeteroLinearRegression' or model.model_name == 'HeteroPoissonRegression':
intercept_model = LinearRegression(fit_intercept=False)
trained_model = intercept_model.fit(X, y)
pred = trained_model.predict(X)
loss = metrics.mean_squared_error(y, pred) / 2
elif model.model_name == 'HeteroLogisticRegression':
intercept_model = LogisticRegression(penalty='l1', C=1e8, fit_intercept=False, solver='liblinear')
trained_model = intercept_model.fit(X, y)
pred = trained_model.predict(X)
loss = metrics.log_loss(y, pred)
else:
raise ValueError("Unknown model received. Stepwise stopped.")
self.intercept = intercept_model.intercept_
return loss
示例7: calc_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def calc_metrics(y_true, y_hat, max_steps=1000):
y_true = np.array(y_true)
y_hat = np.array(y_hat)
metrics = {}
metrics['Logloss'] = float(log_loss(y_true, y_hat))
metrics['AUC'] = roc_auc_score(y_true, y_hat)
metrics['F1'] = []
metrics['Precision'] = []
metrics['Recall'] = []
for i in range(1, max_steps):
threshold = float(i) / max_steps
y_tmp = y_hat > threshold
metrics['F1'].append(f1_score(y_true, y_tmp))
metrics['Precision'].append(precision_score(y_true, y_tmp))
metrics['Recall'].append(recall_score(y_true, y_tmp))
max_idx = np.argmax(metrics['F1'])
metrics['F1'] = metrics['F1'][max_idx]
metrics['Precision'] = metrics['Precision'][max_idx]
metrics['Recall'] = metrics['Recall'][max_idx]
metrics['Threshold'] = float(max_idx + 1) / max_steps
return metrics
示例8: _handle_broken_model
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def _handle_broken_model(self, model, error):
del model
n = self.genome_handler.n_classes
loss = log_loss(np.concatenate(([1], np.zeros(n - 1))), np.ones(n) / n)
accuracy = 1 / n
gc.collect()
if K.backend() == 'tensorflow':
K.clear_session()
tf.reset_default_graph()
print('An error occurred and the model could not train:')
print(error)
print(('Model assigned poor score. Please ensure that your model'
'constraints live within your computational resources.'))
return loss, accuracy
示例9: evaluate
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def evaluate(features):
dtrain = xgb.DMatrix(tr_x[features], label=tr_y)
dvalid = xgb.DMatrix(va_x[features], label=va_y)
params = {'objective': 'binary:logistic', 'silent': 1, 'random_state': 71}
num_round = 10 # 実際にはもっと多いround数が必要
early_stopping_rounds = 3
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
model = xgb.train(params, dtrain, num_round,
evals=watchlist, early_stopping_rounds=early_stopping_rounds,
verbose_eval=0)
va_pred = model.predict(dvalid)
score = log_loss(va_y, va_pred)
return score
# ---------------------------------
# Greedy Forward Selection
# ----------------------------------
示例10: score
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def score(params):
# パラメータセットを指定したときに最小化すべき関数を指定する
# モデルのパラメータ探索においては、モデルにパラメータを指定して学習・予測させた場合のスコアとする
model = MLP(params)
model.fit(tr_x, tr_y, va_x, va_y)
va_pred = model.predict(va_x)
score = log_loss(va_y, va_pred)
print(f'params: {params}, logloss: {score:.4f}')
# 情報を記録しておく
history.append((params, score))
return {'loss': score, 'status': STATUS_OK}
# hyperoptによるパラメータ探索の実行
示例11: score
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def score(params):
# パラメータを与えたときに最小化する評価指標を指定する
# 具体的には、モデルにパラメータを指定して学習・予測させた場合のスコアを返すようにする
# max_depthの型を整数型に修正する
params['max_depth'] = int(params['max_depth'])
# Modelクラスを定義しているものとする
# Modelクラスは、fitで学習し、predictで予測値の確率を出力する
model = Model(params)
model.fit(tr_x, tr_y, va_x, va_y)
va_pred = model.predict(va_x)
score = log_loss(va_y, va_pred)
print(f'params: {params}, logloss: {score:.4f}')
# 情報を記録しておく
history.append((params, score))
return {'loss': score, 'status': STATUS_OK}
# 探索するパラメータの空間を指定する
示例12: make_blender_cv
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def make_blender_cv(classifier, x, y, calibrate=False):
skf = StratifiedKFold(y, n_folds=5, random_state=23)
scores, predictions = [], None
for train_index, test_index in skf:
if calibrate:
# Make training and calibration
calibrated_classifier = CalibratedClassifierCV(classifier, method='isotonic', cv=get_cv(y[train_index]))
fitted_classifier = calibrated_classifier.fit(x[train_index, :], y[train_index])
else:
fitted_classifier = classifier.fit(x[train_index, :], y[train_index])
preds = fitted_classifier.predict_proba(x[test_index, :])
# Free memory
calibrated_classifier, fitted_classifier = None, None
gc.collect()
scores.append(log_loss(y[test_index], preds))
predictions = np.append(predictions, preds, axis=0) if predictions is not None else preds
return scores, predictions
示例13: eval_pred
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def eval_pred( y_true, y_pred, eval_type):
if eval_type == 'logloss':#eval_typeはここに追加
loss = ll( y_true, y_pred )
print "logloss: ", loss
return loss
elif eval_type == 'auc':
loss = AUC( y_true, y_pred )
print "AUC: ", loss
return loss
elif eval_type == 'rmse':
loss = np.sqrt(mean_squared_error(y_true, y_pred))
print "rmse: ", loss
return loss
######### BaseModel Class #########
示例14: logloss
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def logloss(y, p):
"""Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
"""
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p)
示例15: print_k_result
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import log_loss [as 别名]
def print_k_result(ys, Ep, ll, acc, name):
acc.append(accuracy_score(ys, Ep.argmax(axis=1)))
ll.append(log_loss(ys, Ep))
print("{}: accuracy = {:.4g}, log-loss = {:.4g}"
.format(name, acc[-1], ll[-1]))