本文整理汇总了Python中sklearn.metrics.r2_score方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.r2_score方法的具体用法?Python metrics.r2_score怎么用?Python metrics.r2_score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.r2_score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_model
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def fit_model(self, data, cross_val_data, cross_val_labels):
eval_metrics = []
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
fp_train = get_fp(train_sm)
fp_test = get_fp(test_sm)
self.model[i].fit(fp_train, train_labels.ravel())
predicted = self.model[i].predict(fp_test)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
示例2: crossValidation
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def crossValidation(X, y, cvFolds, estimator):
r2 = np.zeros((cvFolds,1))
kf = KFold(len(X), n_folds=cvFolds, shuffle=True, random_state = 30)
cv_j=0
for train_index, test_index in kf:
train_X = X[train_index,:]
test_X = X[test_index,:]
train_y = y[train_index]
test_y = y[test_index]
est.fit(train_X,train_y)
y_true, y_pred = test_y,est.predict(test_X)
r2[cv_j] = r2_score(y_true, y_pred)
cv_j = cv_j + 1
return r2
#parameters: 'X' the predictors, 'y' the target, 'cvFolds' number of folds, 'estimator' machine learning algorithm
#returns: the R squared for each fold
示例3: r2
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def r2(self, log=False, pseudocount=1, clip=None):
""" Compute target R2 vector. """
r2_vec = np.zeros(self.num_targets)
for ti in range(self.num_targets):
if self.targets_na is not None:
preds_ti = self.preds[~self.targets_na, ti].astype('float64')
targets_ti = self.targets[~self.targets_na, ti].astype('float64')
else:
preds_ti = self.preds[:, :, ti].flatten().astype('float64')
targets_ti = self.targets[:, :, ti].flatten().astype('float64')
if clip is not None:
preds_ti = np.clip(preds_ti, 0, clip)
targets_ti = np.clip(targets_ti, 0, clip)
if log:
preds_ti = np.log2(preds_ti + pseudocount)
targets_ti = np.log2(targets_ti + pseudocount)
r2_vec[ti] = metrics.r2_score(targets_ti, preds_ti)
return r2_vec
示例4: score_regression
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def score_regression(y, y_hat, report=True):
"""
Create regression score
:param y:
:param y_hat:
:return:
"""
r2 = r2_score(y, y_hat)
rmse = sqrt(mean_squared_error(y, y_hat))
mae = mean_absolute_error(y, y_hat)
report_string = "---Regression Score--- \n"
report_string += "R2 = " + str(r2) + "\n"
report_string += "RMSE = " + str(rmse) + "\n"
report_string += "MAE = " + str(mae) + "\n"
if report:
print(report_string)
return mae, report_string
示例5: r2_score_vec
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def r2_score_vec(y_true,y_pred):
""" returns non-aggregate version of r2 score.
based on r2_score() function from sklearn (http://sklearn.org)
"""
numerator = (y_true - y_pred) ** 2
denominator = (y_true - np.average(y_true)) ** 2
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[0]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
return output_scores
示例6: test_few_fit_shapes
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def test_few_fit_shapes():
"""test_few.py: fit and predict return correct shapes """
np.random.seed(202)
# load example data
boston = load_boston()
d = pd.DataFrame(data=boston.data)
print("feature shape:",boston.data.shape)
learner = FEW(generations=1, population_size=5,
mutation_rate=0.2, crossover_rate=0.8,
ml = LassoLarsCV(), min_depth = 1, max_depth = 3,
sel = 'epsilon_lexicase', tourn_size = 2,
random_state=0, verbosity=0,
disable_update_check=False, fit_choice = 'mse')
score = learner.fit(boston.data[:300], boston.target[:300])
print("learner:",learner._best_estimator)
yhat_test = learner.predict(boston.data[300:])
test_score = learner.score(boston.data[300:],boston.target[300:])
print("train score:",score,"test score:",test_score,
"test r2:",r2_score(boston.target[300:],yhat_test))
assert yhat_test.shape == boston.target[300:].shape
示例7: test_metrics_from_list
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def test_metrics_from_list():
"""
Check getting functions from a list of metric names
"""
default = ModelBuilder.metrics_from_list()
assert default == [
metrics.explained_variance_score,
metrics.r2_score,
metrics.mean_squared_error,
metrics.mean_absolute_error,
]
specifics = ModelBuilder.metrics_from_list(
["sklearn.metrics.adjusted_mutual_info_score", "sklearn.metrics.r2_score"]
)
assert specifics == [metrics.adjusted_mutual_info_score, metrics.r2_score]
示例8: test_boston_dataset
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def test_boston_dataset(max_bins):
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(
boston.data, boston.target, random_state=42)
mapper = _BinMapper(max_bins=max_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 8
max_leaf_nodes = 31
grower = TreeGrower(X_train_binned, gradients, hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes, max_bins=max_bins,
actual_n_bins=mapper.actual_n_bins_)
grower.grow()
predictor = grower.make_predictor(bin_thresholds=mapper.bin_thresholds_)
assert r2_score(y_train, predictor.predict(X_train)) > 0.85
assert r2_score(y_test, predictor.predict(X_test)) > 0.70
示例9: test_multioutput_regression
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
示例10: test_regression_metrics_at_limits
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(max_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., 2., 3.], [1., -2., 3.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., -2., 3.], [1., 2., 3.])
示例11: compute_perf_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def compute_perf_metrics(self, per_task=False):
"""Returns the R-squared metrics for each task or averaged over tasks based on the accumulated values
Args:
per_task (bool): True if calculating per-task metrics, False otherwise.
Returns:
A tuple (r2_score, std):
r2_score (np.array): An array of scores for each task, if per_task is True.
Otherwise, it is a float containing the average R^2 score over tasks.
std: Always None for this class.
"""
r2_scores = self.perf_metrics[0]
if per_task or self.num_tasks == 1:
return (r2_scores, None)
else:
return (r2_scores.mean(), None)
# ****************************************************************************************
示例12: score
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def score(self, X, y):
"""Returns the coefficient of determination R^2 of the fitted linear
regression model, computed on the given features matrix and labels.
Parameters
----------
X : `np.ndarray` or `scipy.sparse.csr_matrix`, shape=(n_samples, n_features)
Features matrix.
y : `np.ndarray`, shape = (n_samples,)
Labels vector.
Returns
-------
score : `float`
R^2 of self.predict(X) against y
"""
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
示例13: neural_regression
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def neural_regression(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7):
reg = neural_network(X_train.shape[1])
reg.fit(X_train, Y_train,
nb_epoch=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_val, Y_val),
callbacks=[
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'),
]
)
pred = reg.predict(X_test)
pred = np.reshape(pred, pred.shape[0])
r2 = r2_score(Y_test, pred)
return r2
示例14: nestedCrossValidation
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def nestedCrossValidation(X, y, cvFolds, estimator):
kf = KFold(len(X), n_folds=cvFolds, shuffle=True, random_state = 30)
cv_j=0
param_grid = {'alpha': [0.0000001,0.000001,0.00001,0.0001,0.001,0.01,0.1,1,10,100,1000,10000,100000, 1000000, 10000000,1000000000]}
r2 = np.zeros((cvFolds,1))
for train_index, test_index in kf:
train_X = X[train_index,:]
test_X = X[test_index,:]
train_y = y[train_index]
test_y = y[test_index]
grid = GridSearchCV(estimator, param_grid=param_grid, verbose=0, cv=cvFolds, scoring='mean_squared_error')
grid.fit(train_X,train_y)
y_true, y_pred = test_y,grid.best_estimator_.predict(test_X)
r2[cv_j] = r2_score(y_true, y_pred)
cv_j = cv_j + 1
return r2
#%% main script
示例15: __init__
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import r2_score [as 别名]
def __init__(
self,
wrapped: BaseStep = None,
test_size: float = 0.2,
scoring_function=r2_score,
run_validation_split_in_test_mode=True,
cache_folder_when_no_handle=None
):
"""
:param wrapped: wrapped step
:param test_size: ratio for test size between 0 and 1
:param scoring_function: scoring function with two arguments (y_true, y_pred)
"""
BaseCrossValidationWrapper.__init__(self, wrapped=wrapped, cache_folder_when_no_handle=cache_folder_when_no_handle)
self.run_validation_split_in_test_mode = run_validation_split_in_test_mode
self.test_size = test_size
self.scoring_function = scoring_function