当前位置: 首页>>代码示例>>Python>>正文


Python neighbors.KNeighborsRegressor方法代码示例

本文整理汇总了Python中sklearn.neighbors.KNeighborsRegressor方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.KNeighborsRegressor方法的具体用法?Python neighbors.KNeighborsRegressor怎么用?Python neighbors.KNeighborsRegressor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.neighbors的用法示例。


在下文中一共展示了neighbors.KNeighborsRegressor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_regression

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_regression():
    # Check regression for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
                                                        boston.target[:50],
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [0.5, 1.0],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyRegressor(),
                           DecisionTreeRegressor(),
                           KNeighborsRegressor(),
                           SVR(gamma='scale')]:
        for params in grid:
            BaggingRegressor(base_estimator=base_estimator,
                             random_state=rng,
                             **params).fit(X_train, y_train).predict(X_test) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_bagging.py

示例2: build_ensemble

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def build_ensemble(**kwargs):
    """Generate ensemble."""

    ens = SuperLearner(**kwargs)
    prep = {'Standard Scaling': [StandardScaler()],
            'Min Max Scaling': [MinMaxScaler()],
            'No Preprocessing': []}

    est = {'Standard Scaling':
               [ElasticNet(), Lasso(), KNeighborsRegressor()],
           'Min Max Scaling':
               [SVR()],
           'No Preprocessing':
               [RandomForestRegressor(random_state=SEED),
                GradientBoostingRegressor()]}

    ens.add(est, prep)

    ens.add(GradientBoostingRegressor(), meta=True)

    return ens 
开发者ID:flennerhag,项目名称:mlens,代码行数:23,代码来源:friedman_scores.py

示例3: test_single_estimator

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_single_estimator():
    # Check singleton ensembles.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(boston.data,
                                                        boston.target,
                                                        random_state=rng)

    clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
                            n_estimators=1,
                            bootstrap=False,
                            bootstrap_features=False,
                            random_state=rng).fit(X_train, y_train)

    clf2 = KNeighborsRegressor().fit(X_train, y_train)

    assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test)) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_bagging.py

示例4: test_kneighbors_regressor

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_kneighbors_regressor(n_samples=40,
                              n_features=5,
                              n_test_pts=10,
                              n_neighbors=3,
                              random_state=0):
    # Test k-neighbors regression
    rng = np.random.RandomState(random_state)
    X = 2 * rng.rand(n_samples, n_features) - 1
    y = np.sqrt((X ** 2).sum(1))
    y /= y.max()

    y_target = y[:n_test_pts]

    weight_func = _weight_func

    for algorithm in ALGORITHMS:
        for weights in ['uniform', 'distance', weight_func]:
            knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
                                                weights=weights,
                                                algorithm=algorithm)
            knn.fit(X, y)
            epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
            y_pred = knn.predict(X[:n_test_pts] + epsilon)
            assert np.all(abs(y_pred - y_target) < 0.3) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:26,代码来源:test_neighbors.py

示例5: test_KNeighborsRegressor_multioutput_uniform_weight

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_KNeighborsRegressor_multioutput_uniform_weight():
    # Test k-neighbors in multi-output regression with uniform weight
    rng = check_random_state(0)
    n_features = 5
    n_samples = 40
    n_output = 4

    X = rng.rand(n_samples, n_features)
    y = rng.rand(n_samples, n_output)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
        knn = neighbors.KNeighborsRegressor(weights=weights,
                                            algorithm=algorithm)
        knn.fit(X_train, y_train)

        neigh_idx = knn.kneighbors(X_test, return_distance=False)
        y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
                               for idx in neigh_idx])

        y_pred = knn.predict(X_test)

        assert_equal(y_pred.shape, y_test.shape)
        assert_equal(y_pred_idx.shape, y_test.shape)
        assert_array_almost_equal(y_pred, y_pred_idx) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:27,代码来源:test_neighbors.py

示例6: test_kneighbors_regressor_multioutput

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_kneighbors_regressor_multioutput(n_samples=40,
                                          n_features=5,
                                          n_test_pts=10,
                                          n_neighbors=3,
                                          random_state=0):
    # Test k-neighbors in multi-output regression
    rng = np.random.RandomState(random_state)
    X = 2 * rng.rand(n_samples, n_features) - 1
    y = np.sqrt((X ** 2).sum(1))
    y /= y.max()
    y = np.vstack([y, y]).T

    y_target = y[:n_test_pts]

    weights = ['uniform', 'distance', _weight_func]
    for algorithm, weights in product(ALGORITHMS, weights):
        knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
                                            weights=weights,
                                            algorithm=algorithm)
        knn.fit(X, y)
        epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
        y_pred = knn.predict(X[:n_test_pts] + epsilon)
        assert_equal(y_pred.shape, y_target.shape)

        assert np.all(np.abs(y_pred - y_target) < 0.3) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:27,代码来源:test_neighbors.py

示例7: test_neighbors_iris

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_neighbors_iris():
    # Sanity checks on the iris dataset
    # Puts three points of each label in the plane and performs a
    # nearest neighbor query on points near the decision boundary.

    for algorithm in ALGORITHMS:
        clf = neighbors.KNeighborsClassifier(n_neighbors=1,
                                             algorithm=algorithm)
        clf.fit(iris.data, iris.target)
        assert_array_equal(clf.predict(iris.data), iris.target)

        clf.set_params(n_neighbors=9, algorithm=algorithm)
        clf.fit(iris.data, iris.target)
        assert np.mean(clf.predict(iris.data) == iris.target) > 0.95

        rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
        rgs.fit(iris.data, iris.target)
        assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
                       0.95) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:21,代码来源:test_neighbors.py

示例8: test_33_knn_regressor

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_33_knn_regressor(self):
        print("\ntest 33 (knn regressor without preprocessing)\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()

        model = KNeighborsRegressor()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test33sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:18,代码来源:testScoreWithAdapaSklearn.py

示例9: get_points_from_flow_data

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def get_points_from_flow_data(self, x_points, y_points, z_points):
        """
        Return the u-value of a set of points from with a FlowData object.
        Use a simple nearest neighbor regressor to do internal interpolation.

        Args:
            x_points (np.array): Array of x-locations of points.
            y_points (np.array): Array of y-locations of points.
            z_points (np.array): Array of z-locations of points.

        Returns:
            np.array: Array of u-velocity at specified points.
        """
        # print(x_points,y_points,z_points)
        X = np.column_stack([self.x, self.y, self.z])
        n_neighbors = 1
        knn = neighbors.KNeighborsRegressor(n_neighbors)
        y_ = knn.fit(X, self.u)  # .predict(T)

        # Predict new points
        T = np.column_stack([x_points, y_points, z_points])
        return knn.predict(T) 
开发者ID:NREL,项目名称:floris,代码行数:24,代码来源:flow_data.py

示例10: __init__

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def __init__(self,
                 transformer=None,
                 estimator=None,
                 normalize=True,
                 keep_tsne_outputs=False,
                 **kwargs):
        TransformerMixin.__init__(self)
        BaseEstimator.__init__(self)
        if estimator is None:
            estimator = KNeighborsRegressor()
        if transformer is None:
            transformer = TSNE()
        self.estimator = estimator
        self.transformer = transformer
        self.keep_tsne_outputs = keep_tsne_outputs
        if not hasattr(transformer, "fit_transform"):
            raise AttributeError(
                "transformer {} does not have a 'fit_transform' "
                "method.".format(type(transformer)))
        if not hasattr(estimator, "predict"):
            raise AttributeError("estimator {} does not have a 'predict' "
                                 "method.".format(type(estimator)))
        self.normalize = normalize
        if kwargs:
            self.set_params(**kwargs) 
开发者ID:onnx,项目名称:sklearn-onnx,代码行数:27,代码来源:test_custom_transformer.py

示例11: test_objectmapper

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.neighbors.NearestNeighbors,
                      neighbors.NearestNeighbors)
        self.assertIs(df.neighbors.KNeighborsClassifier,
                      neighbors.KNeighborsClassifier)
        self.assertIs(df.neighbors.RadiusNeighborsClassifier,
                      neighbors.RadiusNeighborsClassifier)
        self.assertIs(df.neighbors.KNeighborsRegressor,
                      neighbors.KNeighborsRegressor)
        self.assertIs(df.neighbors.RadiusNeighborsRegressor,
                      neighbors.RadiusNeighborsRegressor)
        self.assertIs(df.neighbors.NearestCentroid, neighbors.NearestCentroid)
        self.assertIs(df.neighbors.BallTree, neighbors.BallTree)
        self.assertIs(df.neighbors.KDTree, neighbors.KDTree)
        self.assertIs(df.neighbors.DistanceMetric, neighbors.DistanceMetric)
        self.assertIs(df.neighbors.KernelDensity, neighbors.KernelDensity) 
开发者ID:pandas-ml,项目名称:pandas-ml,代码行数:19,代码来源:test_neighbors.py

示例12: plot_kneighbors_regularization

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def plot_kneighbors_regularization():
    rnd = np.random.RandomState(42)
    x = np.linspace(-3, 3, 100)
    y_no_noise = np.sin(4 * x) + x
    y = y_no_noise + rnd.normal(size=len(x))
    X = x[:, np.newaxis]
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))

    x_test = np.linspace(-3, 3, 1000)

    for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
        kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
        kneighbor_regression.fit(X, y)
        ax.plot(x, y_no_noise, label="true function")
        ax.plot(x, y, "o", label="data")
        ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
                label="prediction")
        ax.legend()
        ax.set_title("n_neighbors = %d" % n_neighbors) 
开发者ID:amueller,项目名称:scipy_2015_sklearn_tutorial,代码行数:21,代码来源:plot_kneighbors_regularization.py

示例13: parameterChoosing

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def parameterChoosing(self):
        # Set the parameters by cross-validation
        tuned_parameters = [{'weights': ['uniform', 'distance'],
                             'n_neighbors': range(2,100)
                             }
                            ]


        reg = GridSearchCV(neighbors.KNeighborsRegressor(), tuned_parameters, cv=5, scoring='mean_squared_error')
        reg.fit(self.X_train, self.y_train)

        print "Best parameters set found on development set:\n"
        print reg.best_params_

        print "Grid scores on development set:\n"
        for params, mean_score, scores in reg.grid_scores_:
            print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params)

        print reg.scorer_

        print "MSE for test data set:"
        y_true, y_pred = self.y_test, reg.predict(self.X_test)
        print mean_squared_error(y_pred, y_true) 
开发者ID:junlulocky,项目名称:AirTicketPredicting,代码行数:25,代码来源:RegressionKNN.py

示例14: _get_regressor_object

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def _get_regressor_object(self, action, **func_args):
        """
        Return a sklearn estimator object based on the estimator and corresponding parameters

        - 'action': str
        The sklearn estimator used.
        - 'func_args': variable length keyworded argument
        The parameters passed to the sklearn estimator.
        """
        if  action == "linear_regression":
            return LinearRegression(**func_args)
        elif action == "knn":
            return KNeighborsRegressor(**func_args)
        elif action == "svm":
            return SVR(**func_args)
        elif action == "random_forest":
            return RandomForestRegressor(**func_args)
        elif action == "neural_network":
            return MLPRegressor(**func_args)
        else:
            raise ValueError("The function: {} is not supported by dowhy at the moment.".format(action)) 
开发者ID:microsoft,项目名称:dowhy,代码行数:23,代码来源:dummy_outcome_refuter.py

示例15: knn_interp

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsRegressor [as 别名]
def knn_interp(X, Y, perc):

    k_split = int(X.shape[0] * perc)
    X_train = X[:k_split]
    Y_train = Y[:k_split]
    X_test = X[k_split:]
    Y_test = Y[k_split:]

    n_neighbors = 5
    model = neighbors.KNeighborsRegressor(n_neighbors)

    print('Fitting...')
    model.fit(X_train, Y_train)

    print('Predicting...')
    Y_predict = model.predict(X_test)

    print('Scoring...')
    score = model.score(X_test, Y_test)

    print('Score:', score)

    Y_predict 
开发者ID:SanPen,项目名称:GridCal,代码行数:25,代码来源:ptdf_ts.py


注:本文中的sklearn.neighbors.KNeighborsRegressor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。