本文整理汇总了Python中sklearn.linear_model.HuberRegressor.fit方法的典型用法代码示例。如果您正苦于以下问题:Python HuberRegressor.fit方法的具体用法?Python HuberRegressor.fit怎么用?Python HuberRegressor.fit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.HuberRegressor
的用法示例。
在下文中一共展示了HuberRegressor.fit方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_huber_sample_weights
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
示例2: test_huber_equals_lr_for_high_epsilon
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
示例3: test_huber_sparse
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
示例4: test_huber_warm_start
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
示例5: test_huber_warm_start
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
示例6: test_huber_sample_weights
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
示例7: test_huber_better_r2_score
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
示例8: test_huber_and_sgd_same_results
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, max_iter=10000,
fit_intercept=False, epsilon=1.35, tol=None)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
示例9: get_outliers_by_huber
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def get_outliers_by_huber(self, table, column_indexes):
'''
Get outliers using huber regression, which outperforms RANSAC,
but doesn't scale well when the number of samples are very large.
Huber outputs both perfect precision (100%) and recall (100%) in our experiments.
'''
X = table[ :, column_indexes[ :-1]].astype(float)
X = utils.enforce_columns(X)
y = table[ :, column_indexes[-1]].astype(float)
# preprocessing could make HUBER fail on some dataset in our experiments
#x = preprocessing.minmax_scale(x)
#y = preprocessing.minmax_scale(y)
model_huber = HuberRegressor()
model_huber.fit(X, y)
outlier_mask = model_huber.outliers_
outliers = [idx for idx, val in enumerate(outlier_mask) if val]
residuals = abs(model_huber.predict(X) - y)
confidences = preprocessing.minmax_scale(residuals[outliers])*0.09+0.9
return (outliers, confidences)
示例10: test_huber_scaling_invariant
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
示例11: test_huber_scaling_invariant
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
示例12: enumerate
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
X_outliers[2:, :] += X.min() - X.mean() / 4.
y_outliers[:2] += y.min() - y.mean() / 4.
y_outliers[2:] += y.max() + y.mean() / 4.
X = np.vstack((X, X_outliers))
y = np.concatenate((y, y_outliers))
plt.plot(X, y, 'b.')
# Fit the huber regressor over a series of epsilon values.
colors = ['r-', 'b-', 'y-', 'm-']
x = np.linspace(X.min(), X.max(), 7)
epsilon_values = [1.35, 1.5, 1.75, 1.9]
for k, epsilon in enumerate(epsilon_values):
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100,
epsilon=epsilon)
huber.fit(X, y)
coef_ = huber.coef_ * x + huber.intercept_
plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon)
# Fit a ridge regressor to compare it to huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X, y)
coef_ridge = ridge.coef_
coef_ = ridge.coef_ * x + ridge.intercept_
plt.plot(x, coef_, 'g-', label="ridge regression")
plt.title("Comparison of HuberRegressor vs Ridge")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc=0)
plt.show()
示例13: test_huber_max_iter
# 需要导入模块: from sklearn.linear_model import HuberRegressor [as 别名]
# 或者: from sklearn.linear_model.HuberRegressor import fit [as 别名]
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter