本文整理汇总了Python中sklearn.multioutput.MultiOutputRegressor类的典型用法代码示例。如果您正苦于以下问题:Python MultiOutputRegressor类的具体用法?Python MultiOutputRegressor怎么用?Python MultiOutputRegressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MultiOutputRegressor类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_multioutput
def test_multioutput(self):
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import RandomForestRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
df = pdml.ModelFrame(X, target=y)
max_depth = 30
rf1 = df.ensemble.RandomForestRegressor(max_depth=max_depth,
random_state=self.random_state)
reg1 = df.multioutput.MultiOutputRegressor(rf1)
rf2 = RandomForestRegressor(max_depth=max_depth,
random_state=self.random_state)
reg2 = MultiOutputRegressor(rf2)
df.fit(reg1)
reg2.fit(X, y)
result = df.predict(reg2)
expected = pd.DataFrame(reg2.predict(X))
tm.assert_frame_equal(result, expected)
示例2: test_multi_target_sample_weights_api
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights", rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
示例3: test_acquisition_per_second_gradient
def test_acquisition_per_second_gradient(acq_func):
rng = np.random.RandomState(0)
X = rng.randn(20, 10)
# Make the second component large, so that mean_grad and std_grad
# do not become zero.
y = np.vstack((X[:, 0], np.abs(X[:, 0])**3)).T
for X_new in [rng.randn(10), rng.randn(10)]:
gpr = cook_estimator("GP", Space(((-5.0, 5.0),)), random_state=0)
mor = MultiOutputRegressor(gpr)
mor.fit(X, y)
check_gradient_correctness(X_new, mor, acq_func, 1.5)
示例4: test_multi_target_sparse_regression
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))
示例5: test_multi_target_sample_weight_partial_fit
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
示例6: test_multi_target_sample_weights
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
示例7: train_test_split
mplpyplot.show()
# nodebox section end
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k',
c="navy", s=s, marker="s", alpha=a, label="Data")
示例8: list
feature = "Diabetes"
# get X and y data
train = pd.read_csv("train.csv", delimiter=",")
train = train.drop_duplicates() # ensure no duplicates
y_train = train[feature].to_frame()
names = y_train[feature].unique()
X_train = train.drop(feature, 1)
X_names = list(X_train)
# Get test data
test = pd.read_csv("test.csv", delimiter=",")
X_test = test
max_depth = 3
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(n_estimators=20, max_depth=max_depth)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# put predictions into csv
IDs = pd.DataFrame(X_test["ID"])
y_pred = pd.DataFrame(y_multirf)
pred_data = IDs.join(y_pred)
pred_data.columns = ['ID', 'Prediction']
pred_data.to_csv(path_or_buf="prediction_multirf.csv", index=False)