本文整理汇总了Python中sklearn.metrics.mean_squared_error方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.mean_squared_error方法的具体用法?Python metrics.mean_squared_error怎么用?Python metrics.mean_squared_error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.mean_squared_error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Train
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
model = xgb.XGBRegressor(max_depth=censhu, learning_rate=0.1, n_estimators=modelcount, silent=True, objective='reg:gamma')
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = mse(data[:, -1], train_out)
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例2: test_base_chain_crossval_fit_and_predict
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_base_chain_crossval_fit_and_predict():
# Fit chain with cross_val_predict and verify predict
# performance
X, Y = generate_multilabel_dataset_with_correlations()
for chain in [ClassifierChain(LogisticRegression()),
RegressorChain(Ridge())]:
chain.fit(X, Y)
chain_cv = clone(chain).set_params(cv=3)
chain_cv.fit(X, Y)
Y_pred_cv = chain_cv.predict(X)
Y_pred = chain.predict(X)
assert Y_pred_cv.shape == Y_pred.shape
assert not np.all(Y_pred == Y_pred_cv)
if isinstance(chain, ClassifierChain):
assert jaccard_score(Y, Y_pred_cv, average='samples') > .4
else:
assert mean_squared_error(Y, Y_pred_cv) < .25
示例3: test_regression_small
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_regression_small():
X, y = make_regression(n_samples=2000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS('regression').fit(X, y, 'test_regression_small')
cls.generate_module_sample()
from sklearn.metrics import mean_squared_error
pred = cls.predict(X)
print(mean_squared_error(y, pred))
assert len(cls.algorithms) == 4
assert cls.algorithms[0].best_score is not None
示例4: test_regression_medium
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_regression_medium():
X, y = make_regression(n_samples=20000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS('regression').fit(X, y, 'test_regression_medium')
cls.generate_module_sample()
from sklearn.metrics import mean_squared_error
pred = cls.predict(X)
print(mean_squared_error(y, pred))
assert len(cls.algorithms) == 2
assert cls.algorithms[0].best_score is not None
示例5: test_regression_big
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_regression_big():
X, y = make_regression(n_samples=200000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS('regression').fit(X, y, 'test_regression_big')
cls.generate_module_sample()
from sklearn.metrics import mean_squared_error
pred = cls.predict(X)
print(mean_squared_error(y, pred))
assert len(cls.algorithms) == 1
assert cls.algorithms[0].best_score is not None
示例6: calculate_regression_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
Returns:
dict: A dictionary of metrics objects
"""
# Get predictions
predictions = trained_sklearn_estimator.predict(x_test)
# Calculate individual metrics
mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)
result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}
return result
示例7: score_regression
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def score_regression(y, y_hat, report=True):
"""
Create regression score
:param y:
:param y_hat:
:return:
"""
r2 = r2_score(y, y_hat)
rmse = sqrt(mean_squared_error(y, y_hat))
mae = mean_absolute_error(y, y_hat)
report_string = "---Regression Score--- \n"
report_string += "R2 = " + str(r2) + "\n"
report_string += "RMSE = " + str(rmse) + "\n"
report_string += "MAE = " + str(mae) + "\n"
if report:
print(report_string)
return mae, report_string
示例8: mean_squared_error_scorer
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def mean_squared_error_scorer(
golds: ndarray,
probs: ndarray,
preds: Optional[ndarray],
uids: Optional[List[str]] = None,
) -> Dict[str, float]:
"""Mean squared error regression loss.
Args:
golds: Ground truth values.
probs: Predicted probabilities.
preds: Predicted values.
uids: Unique ids, defaults to None.
Returns:
Mean squared error regression loss.
"""
return {"mean_squared_error": float(mean_squared_error(golds, probs))}
示例9: Train
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def Train(data, treecount, tezh, yanzhgdata):
model = RF(n_estimators=treecount, max_features=tezh)
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = mse(data[:, -1], train_out)
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例10: Train
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=censhu),
n_estimators=modelcount, learning_rate=0.8)
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = mse(data[:, -1], train_out)
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例11: Train
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
model = lgbm.LGBMRegressor(boosting_type='gbdt', objective='regression', num_leaves=1200,
learning_rate=0.17, n_estimators=modelcount, max_depth=censhu,
metric='rmse', bagging_fraction=0.8, feature_fraction=0.8, reg_lambda=0.9)
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = mse(data[:, -1], train_out)
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例12: test_metrics_wrapper
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_metrics_wrapper():
# make the features in y be in different scales
y = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) * [1, 100]
# With no scaler provided it is relevant which of the two series gets an 80% error
metric_func_noscaler = model_utils.metric_wrapper(mean_squared_error)
mse_feature_one_wrong = metric_func_noscaler(y, y * [0.8, 1])
mse_feature_two_wrong = metric_func_noscaler(y, y * [1, 0.8])
assert not np.isclose(mse_feature_one_wrong, mse_feature_two_wrong)
# With a scaler provided it is not relevant which of the two series gets an 80%
# error
scaler = MinMaxScaler().fit(y)
metric_func_scaler = model_utils.metric_wrapper(mean_squared_error, scaler=scaler)
mse_feature_one_wrong = metric_func_scaler(y, y * [0.8, 1])
mse_feature_two_wrong = metric_func_scaler(y, y * [1, 0.8])
assert np.isclose(mse_feature_one_wrong, mse_feature_two_wrong)
示例13: test_get_metrics_dict_scaler
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_get_metrics_dict_scaler(scaler, mock):
mock_model = mock
metrics_list = [sklearn.metrics.mean_squared_error]
# make the features in y be in different scales
y = pd.DataFrame(
np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) * [1, 100],
columns=["Tag 1", "Tag 2"],
)
metrics_dict = ModelBuilder.build_metrics_dict(metrics_list, y, scaler=scaler)
metric_func = metrics_dict["mean-squared-error"]
mock_model.predict = lambda _y: _y * [0.8, 1]
mse_feature_one_wrong = metric_func(mock_model, y, y)
mock_model.predict = lambda _y: _y * [1, 0.8]
mse_feature_two_wrong = metric_func(mock_model, y, y)
if scaler:
assert np.isclose(mse_feature_one_wrong, mse_feature_two_wrong)
else:
assert not np.isclose(mse_feature_one_wrong, mse_feature_two_wrong)
示例14: test_metrics_from_list
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_metrics_from_list():
"""
Check getting functions from a list of metric names
"""
default = ModelBuilder.metrics_from_list()
assert default == [
metrics.explained_variance_score,
metrics.r2_score,
metrics.mean_squared_error,
metrics.mean_absolute_error,
]
specifics = ModelBuilder.metrics_from_list(
["sklearn.metrics.adjusted_mutual_info_score", "sklearn.metrics.r2_score"]
)
assert specifics == [metrics.adjusted_mutual_info_score, metrics.r2_score]
示例15: test_averaging_opt_minimize
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_squared_error [as 别名]
def test_averaging_opt_minimize():
X, y = make_regression_df(n_samples=1024)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
oof, test = _make_1st_stage_preds(X_train, y_train, X_test)
best_single_model = min(mean_squared_error(y_train, oof[0]),
mean_squared_error(y_train, oof[1]),
mean_squared_error(y_train, oof[2]))
result = averaging_opt(test, oof, y_train, mean_squared_error, higher_is_better=False)
assert result.score <= best_single_model
result_simple_avg = averaging(test, oof, y_train, eval_func=mean_squared_error)
assert result.score <= result_simple_avg.score