本文整理汇总了Python中sklearn.metrics.hinge_loss函数的典型用法代码示例。如果您正苦于以下问题:Python hinge_loss函数的具体用法?Python hinge_loss怎么用?Python hinge_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hinge_loss函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_lambda
def check_lambda(datanm, samples_per_class, Cs, num_classes, gamma, num_iter = 100, kernel = 'linear', strat = 'ovr'):
data, labels = load_full(datanm, samples_per_class)
slo = StratifiedShuffleSplit(labels, n_iter=num_iter, test_size=0.3, train_size=0.7, random_state=None)
ans = np.zeros((len(Cs), len(gamma), 4))
for train_index, test_index in slo:
train_data = [data[train_index, :], labels[train_index]]
valid_data = [data[test_index , :], labels[test_index ]]
for j, g in enumerate(gamma):
for i, C in enumerate(Cs):
clf = svm.SVC(C=C, kernel=kernel, degree=3, gamma=g, coef0=0.0, shrinking=True,
probability=False, tol=0.001, cache_size=10000, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=strat, random_state=None)
clf.fit(train_data[0], train_data[1])
out_train = clf.decision_function(train_data[0])
out_valid = clf.decision_function(valid_data[0])
ans[i, j, 2] += hinge_loss(train_data[1], out_train, range(num_classes))
ans[i, j, 3] += hinge_loss(valid_data[1], out_valid, range(num_classes))
#ans[i, j, 0] += log_loss(train_data[1], clf.predict_proba(train_data[0]))
#ans[i, j, 1] += log_loss(valid_data[1], clf.predict_proba(valid_data[0]))
ans[:, :, :] /= num_iter
np.savez("svm_lambda_" + kernel + '_' + strat, ans= ans, Cs = Cs, num_iter = num_iter, num_classes = num_classes, samples_per_class = samples_per_class)
return ans
示例2: learning_curve
def learning_curve(classifier, y, train, cv, n=15):
"""Plot train and cv loss for increasing train sample sizes."""
chunk = int(len(y)/n)
n_samples = []
train_losses = []
cv_losses = []
previous_cache_dir = classifier.cache_dir
classifier.cache_dir = "diagnostics"
for i in range(n):
train_subset = train[:(i + 1)*chunk]
preds_cv = classifier.fit_predict(y, train_subset, cv,
show_steps=False)
preds_train = classifier.fit_predict(y, train_subset, train_subset,
show_steps=False)
n_samples.append((i + 1)*chunk)
cv_losses.append(hinge_loss(y[cv], preds_cv, neg_label=0))
train_losses.append(hinge_loss(y[train_subset], preds_train,
neg_label=0))
classifier.cache_dir = previous_cache_dir
plt.clf()
plt.plot(n_samples, train_losses, 'r--', n_samples, cv_losses, 'b--')
plt.ylim([min(train_losses) - .01, max(cv_losses) + .01])
plt.savefig('plots/learning_curve.png')
plt.show()
示例3: test_hinge_loss_binary
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
示例4: test_hinge_loss_binary
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision))
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision, pos_label=2, neg_label=0))
示例5: test_hinge_loss_binary
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision))
with warnings.catch_warnings():
# Test deprecated pos_label
assert_equal(hinge_loss(-y_true, pred_decision), hinge_loss(y_true, pred_decision, pos_label=-1, neg_label=1))
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision))
with warnings.catch_warnings():
# Test deprecated pos_label
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision, pos_label=2, neg_label=0))
示例6: validation_metric_vw
def validation_metric_vw(self):
v = open('%s' % self.holdout_pred, 'r')
y_pred_holdout = []
for line in v:
y_pred_holdout.append(float(line.split()[0].strip()))
if self.outer_loss_function == 'logistic':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
loss = log_loss(self.y_true_holdout, y_pred_holdout_proba)
elif self.outer_loss_function == 'squared':
loss = mean_squared_error(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'hinge':
loss = hinge_loss(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'pr-auc':
loss = -average_precision_score(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'roc-auc':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
fpr, tpr, _ = roc_curve(self.y_true_holdout, y_pred_holdout_proba)
loss = -auc(fpr, tpr)
self.logger.info('parameter suffix: %s' % self.param_suffix)
self.logger.info('loss value: %.6f' % loss)
return loss
示例7: test_hinge_loss_multiclass_invariance_lists
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
示例8: validation_metric_vw
def validation_metric_vw(self):
y_pred_holdout = self.get_y_pred_holdout()
if self.outer_loss_function == 'logistic':
if self.labels_clf_count > 2:
y_pred_holdout_proba = y_pred_holdout
else:
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
loss = log_loss(self.y_true_holdout, y_pred_holdout_proba)
elif self.outer_loss_function == 'squared':
loss = mean_squared_error(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'hinge':
loss = hinge_loss(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'pr-auc':
loss = -average_precision_score(self.y_true_holdout, y_pred_holdout)
elif self.outer_loss_function == 'roc-auc':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
fpr, tpr, _ = roc_curve(self.y_true_holdout, y_pred_holdout_proba)
loss = -auc(fpr, tpr)
else:
raise KeyError('Invalide outer loss function')
self.logger.info('parameter suffix: %s' % self.param_suffix)
self.logger.info('loss value: %.6f' % loss)
return loss
示例9: print_metrics
def print_metrics(y_true, y_preds):
'''
Description: print out accuracy, recall, precision, hinge loss, and f1-score of model
'''
print "Accuracy: %.4g" % metrics.accuracy_score(y_true, y_preds, normalize=True)
print "Recall: %.4g" % metrics.recall_score(y_true, y_preds)
print "Precision: %.4g" % metrics.precision_score(y_true, y_preds)
print "Hinge loss: %.4g" % metrics.hinge_loss(y_true, y_preds)
print "F1 score: %.4g" % metrics.f1_score(y_true, y_preds)
示例10: check_vb
def check_vb(datanm, samples_per_class, Cs, num_classes, gamma, num_iter = 100, kernel = 'linear', strat = 'ovr'):
data, labels = load_full(datanm, samples_per_class)
slo = StratifiedShuffleSplit(labels, n_iter=num_iter, test_size=0.5, train_size=0.5, random_state=None)
ans = np.zeros((len(Cs), len(gamma), samples_per_class/2, 4))
for train_index, test_index in slo:
train_data = [data[train_index, :], labels[train_index]]
valid_data = [data[test_index , :], labels[test_index ]]
for l in xrange(samples_per_class/2):
ind_train = []
ind_valid = []
for k in xrange(num_classes):
ind_train = ind_train + np.where(train_data[1] == k)[0].tolist()[:l+1]
ind_valid = ind_valid + np.where(valid_data[1] == k)[0].tolist()[:l+1]
ctrain_data = [ train_data[0][ind_train], train_data[1][ind_train] ]
cvalid_data = [ valid_data[0][ind_valid], valid_data[1][ind_valid] ]
for i, C in enumerate(Cs):
for j, g in enumerate(gamma):
clf = svm.SVC(C=C, kernel=kernel, degree=3, gamma=g, coef0=0.0, shrinking=True,
probability=False, tol=0.001, cache_size=10000, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=strat, random_state=None)
clf.fit(ctrain_data[0], ctrain_data[1])
#out_train = clf.predict_proba(ctrain_data[0])
#out_valid = clf.predict_proba(cvalid_data[0])
#ans[i, l, 0] += log_loss(ctrain_data[1], out_train)
#ans[i, l, 1] += log_loss(cvalid_data[1], out_valid)
out_train = clf.decision_function(train_data[0])
out_valid = clf.decision_function(valid_data[0])
ans[i, j, l, 2] += hinge_loss(train_data[1], out_train, range(num_classes))
ans[i, j, l, 3] += hinge_loss(valid_data[1], out_valid, range(num_classes))
ans /= num_iter
np.savez("svm_bv_" + kernel + '_' + strat, ans= ans, Cs = Cs, num_iter = num_iter, num_classes = num_classes, samples_per_class = samples_per_class)
return ans
示例11: cross_validation
def cross_validation(self, model):
# kfold = cross_validation.KFold(self.train_x.shape[0], n_folds=5, shuffle=True, random_state=self.random_state)
kfold = cross_validation.StratifiedKFold(self.train_y,
n_folds=self.k_fold_,
shuffle=True,
random_state=self.random_state)
scores = {'auc':list(),
'hinge_loss':list(),
'log_loss':list(),
'accuracy':list(),
'precision':list(),
'recall':list(),
'f1_value':list()}
#scores = list()
preds = np.zeros(len(self.train_y))
i = 0
for train_idx, test_idx in kfold:
print (' --------- fold {0} ---------- '.format(i))
train_x = self.train_x.toarray()[train_idx]
train_y = self.train_y[train_idx]
test_x = self.train_x.toarray()[test_idx]
test_y = self.train_y[test_idx]
model.fit(train_x, train_y)
pred = model.predict(test_x)
score = metrics.roc_auc_score(test_y, pred)
preds[test_idx] = pred
score = metrics.roc_auc_score(test_y, pred)# auc
scores['auc'].append(score)
score = metrics.hinge_loss(test_y, pred)# hinge_loss
scores['hinge_loss'].append(score)
score = metrics.log_loss(test_y, pred)# log_loss
scores['log_loss'].append(score)
#score = metrics.accuracy_score(test_y, pred)# accuracy
#scores['accuracy'].append(score)
#score = metrics.precision_score(test_y, pred)# precision
#scores['precision'].append(score)
#score = metrics.recall_score(test_y, pred)# recall
#scores['recall'].append(score)
#score = metrics.f1_score(test_y, pred)# f_value
#scores['f1_value'].append(score)
i += 1
for key in scores.keys():
scores[key] = np.asarray(scores[key], dtype=np.float32)
#print key, scores[key].mean(), scores[key].std()
return scores, preds
示例12: getResult
def getResult(self, predict, data_set):
y_true, y_predict = control.calculate_entire_ds(predict, data_set)
result = metrics.classification_report(y_true, y_predict)
result += "\nAccuracy classification: %f\n" % metrics.accuracy_score(y_true, y_predict)
result += "F1 score: %f\n" % metrics.f1_score(y_true, y_predict)
result += "Fbeta score: %f\n" % metrics.fbeta_score(y_true, y_predict, beta=0.5)
result += "Hamming loss: %f\n" % metrics.hamming_loss(y_true, y_predict)
result += "Hinge loss: %f\n" % metrics.hinge_loss(y_true, y_predict)
result += "Jaccard similarity: %f\n" % metrics.jaccard_similarity_score(y_true, y_predict)
result += "Precision: %f\n" % metrics.precision_score(y_true, y_predict)
result += "Recall: %f\n" % metrics.recall_score(y_true, y_predict)
if self.is_binary():
result += "Average precision: %f\n" % metrics.average_precision_score(y_true, y_predict)
result += "Matthews correlation coefficient: %f\n" % metrics.matthews_corrcoef(y_true, y_predict)
result += "Area Under the Curve: %f" % metrics.roc_auc_score(y_true, y_predict)
return result
示例13: test_hinge_loss_multiclass_with_missing_labels
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
示例14: test_hinge_loss_multiclass
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
示例15: evaluate
def evaluate(estimator, dev_X, dev_y):
print('evaluating on development set', flush=True)
guess_dev = estimator.predict(dev_X)
score_roc_auc_dev = roc_auc_score(dev_y, guess_dev)
print('{:.4f} -- roc auc'.format(score_roc_auc_dev))
score_brier_loss_dev = brier_score_loss(dev_y, guess_dev)
print('{:.4f} -- brier loss'.format(score_brier_loss_dev))
score_log_loss_dev = log_loss(dev_y, estimator.predict_proba(dev_X))
print('{:.4f} -- log loss'.format(score_log_loss_dev))
guess_dev_negative_one = guess_dev.copy().astype('int8')
guess_dev_negative_one[guess_dev_negative_one == 0] = -1
'''
decision_fuction not implemented
# score_hinge_loss_dev = hinge_loss(dev_y, estimator.decision_function(dev_X))
'''
score_hinge_loss_dev = hinge_loss(dev_y, guess_dev_negative_one)
print('{:.4f} -- hinge loss'.format(score_hinge_loss_dev))
score_matthews_corrcoef_dev = matthews_corrcoef(dev_y, guess_dev_negative_one)
print('{:.4f} -- matthews_corrcoef'.format(score_matthews_corrcoef_dev))
print(flush=True)
return score_roc_auc_dev, score_brier_loss_dev,\
score_log_loss_dev, score_hinge_loss_dev, score_matthews_corrcoef_dev