当前位置: 首页>>代码示例>>Python>>正文


Python svm.SVR属性代码示例

本文整理汇总了Python中sklearn.svm.SVR属性的典型用法代码示例。如果您正苦于以下问题:Python svm.SVR属性的具体用法?Python svm.SVR怎么用?Python svm.SVR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在sklearn.svm的用法示例。


在下文中一共展示了svm.SVR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_regression

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def test_regression():
    # Check regression for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
                                                        boston.target[:50],
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [0.5, 1.0],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyRegressor(),
                           DecisionTreeRegressor(),
                           KNeighborsRegressor(),
                           SVR(gamma='scale')]:
        for params in grid:
            BaggingRegressor(base_estimator=base_estimator,
                             random_state=rng,
                             **params).fit(X_train, y_train).predict(X_test) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_bagging.py

示例2: ensure_many_models

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        import warnings
        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
                        ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
                        KNeighborsRegressor, SVR, LinearSVR]:
            learner = learner()
            learner_name = str(learner).split("(", maxsplit=1)[0]
            with self.subTest("Test fit using {learner}".format(learner=learner_name)):
                model = self.estimator.__class__(learner)
                model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
                self.assertTrue(True)  # Fit did not crash 
开发者ID:IBM,项目名称:causallib,代码行数:22,代码来源:test_standardization.py

示例3: regression_svm

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def regression_svm(
    x_train, y_train, x_test, y_test, logC, logGamma):
    '''
        Estimate a SVM regressor
    '''
    # create the regressor object
    svm = sv.SVR(kernel='rbf', 
        C=0.1 * logC, gamma=0.1 * logGamma)

    # estimate the model
    svm.fit(x_train,y_train)

    # decision function
    decision_values = svm.decision_function(x_test)

    # return the object
    return mt.roc_auc(y_test, decision_values)

# find the optimal values of C and gamma 
开发者ID:drabastomek,项目名称:practicalDataAnalysisCookbook,代码行数:21,代码来源:regression_svm_alternative.py

示例4: build_ensemble

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def build_ensemble(**kwargs):
    """Generate ensemble."""

    ens = SuperLearner(**kwargs)
    prep = {'Standard Scaling': [StandardScaler()],
            'Min Max Scaling': [MinMaxScaler()],
            'No Preprocessing': []}

    est = {'Standard Scaling':
               [ElasticNet(), Lasso(), KNeighborsRegressor()],
           'Min Max Scaling':
               [SVR()],
           'No Preprocessing':
               [RandomForestRegressor(random_state=SEED),
                GradientBoostingRegressor()]}

    ens.add(est, prep)

    ens.add(GradientBoostingRegressor(), meta=True)

    return ens 
开发者ID:flennerhag,项目名称:mlens,代码行数:23,代码来源:friedman_scores.py

示例5: test_rfe_min_step

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def test_rfe_min_step():
    n_features = 10
    X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
    n_samples, n_features = X.shape
    estimator = SVR(kernel="linear")

    # Test when floor(step * n_features) <= 0
    selector = RFE(estimator, step=0.01)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2)

    # Test when step is between (0,1) and floor(step * n_features) > 0
    selector = RFE(estimator, step=0.20)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2)

    # Test when step is an integer
    selector = RFE(estimator, step=5)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_rfe.py

示例6: test_svr_predict

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def test_svr_predict():
    # Test SVR's decision_function
    # Sanity check, test that predict implemented in python
    # returns the same as the one in libsvm

    X = iris.data
    y = iris.target

    # linear kernel
    reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)

    dec = np.dot(X, reg.coef_.T) + reg.intercept_
    assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())

    # rbf kernel
    reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)

    rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
    dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
    assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_svm.py

示例7: test_21_svr

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def test_21_svr(self):
        print("\ntest 21 (SVR without preprocessing)\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()

        model = SVR()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test21sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:18,代码来源:testScoreWithAdapaSklearn.py

示例8: getModels

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def getModels():
    result = []
    result.append("LinearRegression")
    result.append("BayesianRidge")
    result.append("ARDRegression")
    result.append("ElasticNet")
    result.append("HuberRegressor")
    result.append("Lasso")
    result.append("LassoLars")
    result.append("Rigid")
    result.append("SGDRegressor")
    result.append("SVR")
    result.append("MLPClassifier")
    result.append("KNeighborsClassifier")
    result.append("SVC")
    result.append("GaussianProcessClassifier")
    result.append("DecisionTreeClassifier")
    result.append("RandomForestClassifier")
    result.append("AdaBoostClassifier")
    result.append("GaussianNB")
    result.append("LogisticRegression")
    result.append("QuadraticDiscriminantAnalysis")
    return result 
开发者ID:tech-quantum,项目名称:sia-cog,代码行数:25,代码来源:scikitlearn.py

示例9: test_support_vector_regressor

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def test_support_vector_regressor(self):
        for dtype in self.number_data_type.keys():
            scikit_model = SVR(kernel="rbf")
            data = self.scikit_data["data"].astype(dtype)
            target = self.scikit_data["target"].astype(dtype)
            scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target)
            test_data = data[0].reshape(1, -1)
            coreml_model = create_model(spec)
            try:
                self.assertEqual(
                    scikit_model.predict(test_data)[0],
                    coreml_model.predict({"data": test_data})["target"],
                    msg="{} != {} for Dtype: {}".format(
                        scikit_model.predict(test_data)[0],
                        coreml_model.predict({"data": test_data})["target"],
                        dtype,
                    ),
                )
            except RuntimeError:
                print("{} not supported. ".format(dtype)) 
开发者ID:apple,项目名称:coremltools,代码行数:22,代码来源:test_io_types.py

示例10: fit

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def fit(self, X, y, sample_weight=None):
        """Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : array-like of shape=(n_ts, sz, d)
            Time series dataset.
            
        y : array-like of shape=(n_ts, )
            Time series labels.
            
        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights force the 
            classifier to put more emphasis on these points.
        """
        sklearn_X, y = self._preprocess_sklearn(X, y, fit_time=True)

        self.svm_estimator_ = SVR(
            C=self.C, kernel=self.estimator_kernel_, degree=self.degree,
            gamma=self.gamma_, coef0=self.coef0, shrinking=self.shrinking,
            tol=self.tol, cache_size=self.cache_size,
            verbose=self.verbose, max_iter=self.max_iter
        )
        self.svm_estimator_.fit(sklearn_X, y, sample_weight=sample_weight)
        return self 
开发者ID:tslearn-team,项目名称:tslearn,代码行数:27,代码来源:svm.py

示例11: load_default

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def load_default(self, machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm']):
        """
        Loads 4 different scikit-learn regressors by default.

        Parameters
        ----------
        machine_list: optional, list of strings
            List of default machine names to be loaded.

        """
        for machine in machine_list:
            try:
                if machine == 'lasso':
                    self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'tree':
                    self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'ridge':
                    self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
                if machine == 'random_forest':
                    self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'svm':
                    self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_)
            except ValueError:
                continue 
开发者ID:bhargavvader,项目名称:pycobra,代码行数:26,代码来源:ewa.py

示例12: feature_selection

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def feature_selection(data,thrs, verbose=False):
    if thrs>= data.shape[0]:
        if verbose:
            print ("Trying to select %i features but only %i genes available." %( thrs, data.shape[0]))
            print ("Skipping feature selection")
        return arange(data.shape[0])
    ix_genes = arange(data.shape[0])
    threeperK = int(ceil(3*data.shape[1]/1000.))
    zerotwoperK = int(floor(0.3*data.shape[1]/1000.))
    # is at least 1 molecule in 0.3% of thecells, is at least 2 molecules in 0.03% of the cells
    condition = (sum(data>=1, 1)>= threeperK) & (sum(data>=2, 1)>=zerotwoperK) 
    ix_genes = ix_genes[condition]
    
    mu = data[ix_genes,:].mean(1)
    sigma = data[ix_genes,:].std(1, ddof=1)
    cv = sigma/mu

    try:
        score, mu_linspace, cv_fit , params = fit_CV(mu,cv,fit_method='SVR', verbose=verbose)
    except ImportError:
        print ("WARNING: Feature selection was skipped becouse scipy is required. Install scipy to run feature selection.")
        return arange(data.shape[0])
 
    return ix_genes[argsort(score)[::-1]][:thrs] 
开发者ID:linnarsson-lab,项目名称:BackSPIN,代码行数:26,代码来源:backSPIN.py

示例13: predict_features

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def predict_features(self, df_features, df_target, idx=0, **kwargs):
        """For one variable, predict its neighbouring nodes.

        Args:
            df_features (pandas.DataFrame):
            df_target (pandas.Series):
            idx (int): (optional) for printing purposes
            kwargs (dict): additional options for algorithms

        Returns:
            list: scores of each feature relatively to the target
        """
        estimator = SVR(kernel='linear')
        selector = RFECV(estimator, step=1)
        selector = selector.fit(df_features.values, np.ravel(df_target.values))

        return selector.grid_scores_ 
开发者ID:FenTechSolutions,项目名称:CausalDiscoveryToolbox,代码行数:19,代码来源:FSRegression.py

示例14: nbow_model

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def nbow_model(task, embeddings, word2idx):
    if task == "clf":
        algo = LogisticRegression(C=0.6, random_state=0,
                                  class_weight='balanced')
    elif task == "reg":
        algo = SVR(kernel='linear', C=0.6)
    else:
        raise ValueError("invalid task!")

    embeddings_features = NBOWVectorizer(aggregation=["mean"],
                                         embeddings=embeddings,
                                         word2idx=word2idx,
                                         stopwords=False)

    model = Pipeline([
        ('embeddings-feats', embeddings_features),
        ('normalizer', Normalizer(norm='l2')),
        ('clf', algo)
    ])

    return model 
开发者ID:cbaziotis,项目名称:ntua-slp-semeval2018,代码行数:23,代码来源:models.py

示例15: __init__

# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import SVR [as 别名]
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
                 tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
                 verbose=False, max_iter=-1):
        self.kernel = kernel
        self.C = C
        self.gamma = gamma
        self.coef0 = coef0
        self.tol = tol
        self.epsilon = epsilon
        self.shrinking = shrinking
        self.cache_size = cache_size
        self.verbose = verbose
        self.max_iter = max_iter
        self.model = SVR(kernel=self.kernel, C=self.C, gamma=self.gamma,
                         coef0=self.coef0, tol=self.tol, epsilon=self.epsilon,
                         shrinking=self.shrinking, cache_size=self.cache_size,
                         verbose=self.verbose, max_iter=self.max_iter) 
开发者ID:geekinglcq,项目名称:aca,代码行数:19,代码来源:ILearner.py


注:本文中的sklearn.svm.SVR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。