本文整理汇总了Python中sklearn.dummy.DummyRegressor类的典型用法代码示例。如果您正苦于以下问题:Python DummyRegressor类的具体用法?Python DummyRegressor怎么用?Python DummyRegressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DummyRegressor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_regressor
def test_regressor():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [5. / 4] * len(X))
示例2: mean_model
def mean_model(features, solutions, verbose=0):
columns = solutions.columns
clf = DummyRegressor()
print('Training Model... ')
clf.fit(features, solutions)
print('Done Training')
return (clf, columns)
示例3: test_quantile_strategy_multioutput_regressor
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
示例4: test_y_mean_attribute_regressor
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.y_mean_, np.mean(y))
示例5: train_classifier
def train_classifier():
X_train = tfv.transform(video_captions_train)
X_test = tfv.transform(video_captions_test)
dummy = DummyRegressor(strategy="median")
dummy.fit(X_train, Y_train)
Y_pred_med = dummy.predict(X_test)
示例6: test_dummy_regressor_on_nan_value
def test_dummy_regressor_on_nan_value():
X = [[np.NaN]]
y = [1]
y_expected = [1]
clf = DummyRegressor()
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_equal(y_pred, y_expected)
示例7: test_dummy_regressor_on_3D_array
def test_dummy_regressor_on_3D_array():
X = np.array([[['foo']], [['bar']], [['baz']]])
y = np.array([2, 2, 2])
y_expected = np.array([2, 2, 2])
cls = DummyRegressor()
cls.fit(X, y)
y_pred = cls.predict(X)
assert_array_equal(y_pred, y_expected)
示例8: Regressor
class Regressor(BaseEstimator):
def __init__(self):
self.clf = DummyRegressor()
def fit(self, X, y):
self.clf.fit(X, y)
def predict(self, X):
return self.clf.predict(X)
示例9: test_scorer_sample_weight
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy="median")
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS]
+ [(name, sensible_clf) for name in CLF_SCORERS]
+ [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target, sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(
weighted,
unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted),
)
assert_almost_equal(
weighted,
ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted, ignored),
)
except TypeError as e:
assert_true(
"sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called " "with sample weights: {1}".format(name, str(e)),
)
示例10: test_median_strategy_regressor
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
示例11: test_dummy_regressor_return_std
def test_dummy_regressor_return_std():
X = [[0]] * 3 # ignored
y = np.array([2, 2, 2])
y_std_expected = np.array([0, 0, 0])
cls = DummyRegressor()
cls.fit(X, y)
y_pred_list = cls.predict(X, return_std=True)
# there should be two elements when return_std is True
assert_equal(len(y_pred_list), 2)
# the second element should be all zeros
assert_array_equal(y_pred_list[1], y_std_expected)
示例12: simplest
def simplest(cube, y, cv):
""" just use the mean to impute the missing values
"""
from sklearn.dummy import DummyRegressor
clf = DummyRegressor()
X = cube.reshape(cube.shape[0], cube.shape[1] * cube.shape[2])
sse = np.zeros(y.shape[1])
for train, test in cv:
y_train, y_test = y[train], y[test]
y_predict = clf.fit(X[train], y[train]).predict(X[test])
sse += np.mean((y_predict - y_test) ** 2, 0)
return sse
示例13: _make_estimators
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
示例14: test_multioutput_regressor
def test_multioutput_regressor():
X_learn = np.random.randn(10, 10)
y_learn = np.random.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = np.random.randn(20, 10)
y_test = np.random.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
assert_array_equal(np.tile(mean, (y_learn.shape[0], 1)), y_pred_learn)
assert_array_equal(np.tile(mean, (y_test.shape[0], 1)), y_pred_test)
_check_behavior_2d(est)
示例15: test_mean_strategy_multioutput_regressor
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)