当前位置: 首页>>代码示例>>Python>>正文


Python datasets.make_friedman3函数代码示例

本文整理汇总了Python中sklearn.datasets.make_friedman3函数的典型用法代码示例。如果您正苦于以下问题:Python make_friedman3函数的具体用法?Python make_friedman3怎么用?Python make_friedman3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了make_friedman3函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_regression_synthetic

def test_regression_synthetic():
    """Test on synthetic regression datasets used in Leo Breiman,
    `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
    random_state = check_random_state(1)
    regression_params = {'n_estimators': 100, 'max_depth': 4,
                         'min_samples_split': 1, 'learning_rate': 0.1,
                         'loss': 'ls'}

    # Friedman1
    X, y = datasets.make_friedman1(n_samples=1200,
                                   random_state=random_state, noise=1.0)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]
    clf = GradientBoostingRegressor()
    clf.fit(X_train, y_train)
    mse = mean_squared_error(y_test, clf.predict(X_test))
    assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse

    # Friedman2
    X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]
    clf = GradientBoostingRegressor(**regression_params)
    clf.fit(X_train, y_train)
    mse = mean_squared_error(y_test, clf.predict(X_test))
    assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse

    # Friedman3
    X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]
    clf = GradientBoostingRegressor(**regression_params)
    clf.fit(X_train, y_train)
    mse = mean_squared_error(y_test, clf.predict(X_test))
    assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
开发者ID:ChuntheQhai,项目名称:Dota2-Heroes-Recommendation,代码行数:35,代码来源:test_gradient_boosting.py

示例2: test_make_friedman3

def test_make_friedman3():
    X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)

    assert_equal(X.shape, (5, 4), "X shape mismatch")
    assert_equal(y.shape, (5,), "y shape mismatch")

    assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]))
开发者ID:93sam,项目名称:scikit-learn,代码行数:7,代码来源:test_samples_generator.py

示例3: genFriedman

    def genFriedman(self, i=1, N=240, D=10):
        if i not in range(1,4):
            raise Exception('not a correct dataset')

        if i == 1:
            X, Y = datasets.make_friedman1(N, D )

        if i == 2:
            X, Y = datasets.make_friedman2(N, D)

        if i == 3:
            X, Y = datasets.make_friedman3(N, D)
        return X, Y
开发者ID:adhaka,项目名称:dd2434project,代码行数:13,代码来源:DataSets.py

示例4: make_data

def make_data(n_samples=1000, n_features=1, n_targets=1, informative_prop=1.0,
              noise=0.0, test_prop=0.1, valid_prop=0.3, method='linear'):
    if method == 'linear':
        params = dict(n_features=n_features,
                      n_informative=int(n_features*informative_prop),
                      noise=noise,
                      n_targets=n_targets,
                      n_samples=n_samples,
                      shuffle=False,
                      bias=0.0)
        X, Y = make_regression(**params)
    elif method == 'boston':
        boston = load_boston()
        X = boston.data
        Y = boston.target
    else:
        params = dict(n_samples=n_samples,
                      n_features=n_features)
        X, Y = make_friedman3(n_samples=n_samples, n_features=n_features,
                                 noise=noise)

    X = MinMaxScaler(feature_range=(0.0,1.0)).fit_transform(X)
    X = X.astype(theano.config.floatX)
    Y = MinMaxScaler(feature_range=(0.0,1.0)).fit_transform(Y)
    Y = Y.astype(theano.config.floatX)
    if len(X.shape) > 1:
        n_features = X.shape[1]
    else:
        X = X.reshape(X.shape[0], -1)
        n_features = 1
    if len(Y.shape) > 1:
        n_targets = Y.shape[1]
    else:
        Y = Y.reshape(Y.shape[0], -1)
        n_targets = 1

    X_train, Y_train, X_valid, Y_valid, X_test, Y_test = \
        train_valid_test_split(X, Y,
                               test_prop=valid_prop, valid_prop=valid_prop)
    return dict(
        X_train=theano.shared(X_train),
        Y_train=theano.shared(Y_train),
        X_valid=theano.shared(X_valid),
        Y_valid=theano.shared(Y_valid),
        X_test=theano.shared(X_test),
        Y_test=theano.shared(Y_test),
        num_examples_train=X_train.shape[0],
        num_examples_valid=X_valid.shape[0],
        num_examples_test=X_test.shape[0],
        input_dim=n_features,
        output_dim=n_targets)
开发者ID:bootphon,项目名称:phonrulemodel,代码行数:51,代码来源:regression_test.py

示例5: test_regression_synthetic

def test_regression_synthetic():
    # Test on synthetic regression datasets used in Leo Breiman,
    # `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
    random_state = check_random_state(1)
    regression_params = {'n_estimators': 100, 'max_depth': 4,
                         'min_samples_split': 2, 'learning_rate': 0.1,
                         'loss': 'ls'}

    # Friedman1
    X, y = datasets.make_friedman1(n_samples=1200,
                                   random_state=random_state,
                                   noise=1.0)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]

    for presort in True, False:
        clf = GradientBoostingRegressor(presort=presort)
        clf.fit(X_train, y_train)
        mse = mean_squared_error(y_test, clf.predict(X_test))
        assert_less(mse, 5.0)

    # Friedman2
    X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]

    for presort in True, False:
        regression_params['presort'] = presort
        clf = GradientBoostingRegressor(**regression_params)
        clf.fit(X_train, y_train)
        mse = mean_squared_error(y_test, clf.predict(X_test))
        assert_less(mse, 1700.0)

    # Friedman3
    X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
    X_train, y_train = X[:200], y[:200]
    X_test, y_test = X[200:], y[200:]

    for presort in True, False:
        regression_params['presort'] = presort
        clf = GradientBoostingRegressor(**regression_params)
        clf.fit(X_train, y_train)
        mse = mean_squared_error(y_test, clf.predict(X_test))
        assert_less(mse, 0.015)
开发者ID:amueller,项目名称:scikit-learn,代码行数:44,代码来源:test_gradient_boosting.py

示例6: uniform_dataset


def uniform_dataset(args):
    X = np.random.random(size=(args.num_examples, args.num_features))
    y = np.random.choice([-1, 1], size=args.num_examples)
    return (X, y)

DATASETS = {
    "uniform": uniform_dataset,
    "hastie": lambda args: datasets.make_hastie_10_2(
        n_samples=args.num_examples),
    "friedman1": lambda args: datasets.make_friedman1(
        n_samples=args.num_examples, n_features=args.num_features),
    "friedman2": lambda args: datasets.make_friedman2(
        n_samples=args.num_examples, noise=args.noise),
    "friedman3": lambda args: datasets.make_friedman3(
        n_samples=args.num_examples, noise=args.noise),
    "make_regression": lambda args: datasets.make_regression(
        n_samples=args.num_examples,
        n_features=args.num_features,
        n_informative=args.num_informative)
}

ENSEMBLE_REGRESSORS = [
    ("GB-D1", with_depth(ensemble.GradientBoostingRegressor, 1)),
    ("GB-D3", with_depth(ensemble.GradientBoostingRegressor, 3)),
    ("GB-B10", with_best_first(ensemble.GradientBoostingRegressor, 10)),
    ("RF-D1", with_depth(ensemble.RandomForestRegressor, 1)),
    ("RF-D3", with_depth(ensemble.RandomForestRegressor, 3)),
    ("RF-D5", with_depth(ensemble.RandomForestRegressor, 5)),
]
开发者ID:ajtulloch,项目名称:sklearn-compiledtrees,代码行数:29,代码来源:bench_compiled_tree.py

示例7: friedman3

 def friedman3(n_samples=20000):
     """ Generated data """
     (data, target) = datasets.make_friedman3(n_samples=n_samples)
     return DatasetFactory.Dataset(data=data, target=target)
开发者ID:omerdr,项目名称:ensemble-regression,代码行数:4,代码来源:regression_datasets.py


注:本文中的sklearn.datasets.make_friedman3函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。