本文整理汇总了Python中sklearn.datasets.make_friedman2函数的典型用法代码示例。如果您正苦于以下问题:Python make_friedman2函数的具体用法?Python make_friedman2怎么用?Python make_friedman2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_friedman2函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_make_friedman2
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
示例2: test_regression_synthetic
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
示例3: genFriedman
def genFriedman(self, i=1, N=240, D=10):
if i not in range(1,4):
raise Exception('not a correct dataset')
if i == 1:
X, Y = datasets.make_friedman1(N, D )
if i == 2:
X, Y = datasets.make_friedman2(N, D)
if i == 3:
X, Y = datasets.make_friedman3(N, D)
return X, Y
示例4: test_regression_synthetic
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
示例5: test_all_regressors
def test_all_regressors():
x, y = make_friedman2(10000)
x_train, y_train, x_test, y_test = test_helpers.split_dataset(x,y)
#print y_test[:100]
ols = LinearRegression()
ols.fit(x_train, y_train)
ols_pred = ols.predict(x_test)
#print ols_pred[:100]
ols_mse = mean_square_error(y_test, ols_pred)
for fn in regressors:
print fn
model = fn(x_train,y_train)
print model
pred = model.predict(x_test)
#print pred[:100]
mse = mean_square_error(y_test, pred)
print "OLS MSE:", ols_mse, " Current MSE:", mse
print "Ratio:", mse / ols_mse
assert ols_mse > 1.1*mse
示例6: print
'''Friedman #2 data example'''
import rvm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
print('Friedman data set')
np.random.seed(2)
n_exp = 1
rel_vec = 0
RMSE = 0
for i in range(n_exp):
train_x, train_y = make_friedman2(240, noise=0)
cl = rvm.RVMRegression(kernel="rbf", gamma=0.00001)
cl.fit(np.matrix(train_x), np.matrix(train_y.reshape(240, 1)))
rel_vec += len(cl.rel_ind)
valid_x, valid_y = make_friedman2(240)
pred_y = cl.predict(valid_x)
RMSE += mean_squared_error(valid_y, pred_y) ** 0.5
print("Vectors: ", rel_vec / n_exp)
print("Root mean square = ", RMSE / n_exp)
示例7: with_best_first
def with_best_first(cls, max_leaf_nodes):
return partial(cls, max_leaf_nodes=max_leaf_nodes)
def uniform_dataset(args):
X = np.random.random(size=(args.num_examples, args.num_features))
y = np.random.choice([-1, 1], size=args.num_examples)
return (X, y)
DATASETS = {
"uniform": uniform_dataset,
"hastie": lambda args: datasets.make_hastie_10_2(
n_samples=args.num_examples),
"friedman1": lambda args: datasets.make_friedman1(
n_samples=args.num_examples, n_features=args.num_features),
"friedman2": lambda args: datasets.make_friedman2(
n_samples=args.num_examples, noise=args.noise),
"friedman3": lambda args: datasets.make_friedman3(
n_samples=args.num_examples, noise=args.noise),
"make_regression": lambda args: datasets.make_regression(
n_samples=args.num_examples,
n_features=args.num_features,
n_informative=args.num_informative)
}
ENSEMBLE_REGRESSORS = [
("GB-D1", with_depth(ensemble.GradientBoostingRegressor, 1)),
("GB-D3", with_depth(ensemble.GradientBoostingRegressor, 3)),
("GB-B10", with_best_first(ensemble.GradientBoostingRegressor, 10)),
("RF-D1", with_depth(ensemble.RandomForestRegressor, 1)),
("RF-D3", with_depth(ensemble.RandomForestRegressor, 3)),
("RF-D5", with_depth(ensemble.RandomForestRegressor, 5)),
示例8: friedman2
def friedman2(n_samples=20000):
""" Generated data """
(data, target) = datasets.make_friedman2(n_samples=n_samples)
return DatasetFactory.Dataset(data=data, target=target)