本文整理汇总了Python中sklearn.metrics.median_absolute_error方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.median_absolute_error方法的具体用法?Python metrics.median_absolute_error怎么用?Python metrics.median_absolute_error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.median_absolute_error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_regression_metrics_at_limits
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(max_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., 2., 3.], [1., -2., 3.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., -2., 3.], [1., 2., 3.])
示例2: mae_cv
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def mae_cv(self, cv):
"""
This method performs cross-validation over median absolute error.
Parameters
----------
* cv : integer
The number of cross validation folds to perform
Returns
-------
Returns a scores of the k-fold median absolute error.
"""
mae = metrics.make_scorer(metrics.median_absolute_error)
result = cross_validate(self.reg, self.X,
self.y, cv=cv,
scoring=(mae))
return self.get_test_score(result)
示例3: print_evaluation_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def print_evaluation_metrics(trained_model, trained_model_name, X_test, y_test):
print('--------- For Model: ', trained_model_name, ' ---------\n')
predicted_values = trained_model.predict(X_test)
print("Mean absolute error: ",
metrics.mean_absolute_error(y_test, predicted_values))
print("Median absolute error: ",
metrics.median_absolute_error(y_test, predicted_values))
print("Mean squared error: ", metrics.mean_squared_error(
y_test, predicted_values))
print("R2: ", metrics.r2_score(y_test, predicted_values))
plt.scatter(y_test, predicted_values, color='black')
# plt.plot(x, y_pred, color='blue', linewidth=3)
plt.title(trained_model_name)
plt.xlabel('$y_{test}$')
plt.ylabel('$y_{predicted}/y_{test}$')
plt.savefig('%s.png' %trained_model_name, bbox_inches='tight')
print("---------------------------------------\n")
示例4: print_evaluation_metrics2
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def print_evaluation_metrics2(trained_model, trained_model_name, X_test, y_test):
print('--------- For Model: ', trained_model_name, ' --------- (Train Data)\n')
predicted_values = trained_model.predict(X_test)
print("Mean absolute error: ",
metrics.mean_absolute_error(y_test, predicted_values))
print("Median absolute error: ",
metrics.median_absolute_error(y_test, predicted_values))
print("Mean squared error: ", metrics.mean_squared_error(
y_test, predicted_values))
print("R2: ", metrics.r2_score(y_test, predicted_values))
plt.scatter(y_test, predicted_values/y_test, color='black')
# plt.plot(x, y_pred, color='blue', linewidth=3)
plt_name = trained_model_name + " (Train Data)"
plt.title(plt_name)
plt.xlabel('$y_{test}$')
plt.ylabel('$y_{predicted}/y_{test}$')
plt.savefig('%s.png' %plt_name, bbox_inches='tight')
print("---------------------------------------\n")
示例5: eval_metrics_on
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def eval_metrics_on(predictions, labels):
'''
assuming this is a regression task; labels are continuous-valued floats
returns most regression-related scores for the given predictions/targets as a dictionary:
r2, mean_abs_error, mse, rmse, median_absolute_error, explained_variance_score
'''
if len(labels[0])==2: #labels is list of data/labels pairs
labels = np.concatenate([l[1] for l in labels])
predictions = predictions[:,0]
r2 = metrics.r2_score(labels, predictions)
mean_abs_error = np.abs(predictions - labels).mean()
mse = ((predictions - labels)**2).mean()
rmse = np.sqrt(mse)
median_absolute_error = metrics.median_absolute_error(labels, predictions) # robust to outliers
explained_variance_score = metrics.explained_variance_score(labels, predictions) # best score = 1, lower is worse
return {'r2':r2, 'mean_abs_error':mean_abs_error, 'mse':mse, 'rmse':rmse,
'median_absolute_error':median_absolute_error,
'explained_variance_score':explained_variance_score}
示例6: test_corrupted_regression
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def test_corrupted_regression(loss, weighting):
reg = RobustWeightedEstimator(
SGDRegressor(),
loss=loss,
max_iter=50,
weighting=weighting,
k=4,
c=None,
random_state=rng,
)
reg.fit(X_rc, y_rc)
score = median_absolute_error(reg.predict(X_rc), y_rc)
assert score < 0.2
示例7: test_regression_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(max_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
示例8: score
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None,
**kwargs) -> float:
return median_absolute_error(actual, predicted)
示例9: mae_upper_boundary
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def mae_upper_boundary(self, upper_boundary):
y_pred = self.reg.predict(self.X)
if metrics.median_absolute_error(self.y, y_pred) > upper_boundary:
return False
return True
示例10: cross_val_mae_result
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def cross_val_mae_result(self, reg, cv=3):
y_pred = cross_val_predict(reg, self.X, self.y)
return metrics.median_absolute_error(self.y, y_pred)
示例11: mae_result
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def mae_result(self, reg):
y_pred = reg.predict(self.X)
return metrics.median_absolute_error(self.y, y_pred)
示例12: mae_upper_boundary
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def mae_upper_boundary(upper_boundary):
y_pred = self.reg.predict(self.X)
if metrics.median_absolute_error(self.y, y_pred) > upper_boundary:
return False
return True
示例13: compute
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def compute(labels, pred_scores):
return median_absolute_error(labels, pred_scores)
示例14: gini_meae
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def gini_meae(truth, predictions):
score = median_absolute_error(truth, predictions)
return score
示例15: print_evaluation_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import median_absolute_error [as 别名]
def print_evaluation_metrics(trained_model, trained_model_name, X_test, y_test):
print('--------- For Model: ', trained_model_name, ' ---------\n')
predicted_values = trained_model.predict(X_test)
print("Mean absolute error: ",
metrics.mean_absolute_error(y_test, predicted_values))
print("Median absolute error: ",
metrics.median_absolute_error(y_test, predicted_values))
print("Mean squared error: ", metrics.mean_squared_error(
y_test, predicted_values))
print("R2: ", metrics.r2_score(y_test, predicted_values))