当前位置: 首页>>代码示例>>Python>>正文


Python linear_model.RidgeClassifier方法代码示例

本文整理汇总了Python中sklearn.linear_model.RidgeClassifier方法的典型用法代码示例。如果您正苦于以下问题:Python linear_model.RidgeClassifier方法的具体用法?Python linear_model.RidgeClassifier怎么用?Python linear_model.RidgeClassifier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.linear_model的用法示例。


在下文中一共展示了linear_model.RidgeClassifier方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_cv_partial_evaluate

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_cv_partial_evaluate():
    X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

    model = RidgeClassifier(alpha=1.0)

    n = 0

    def _fold_count(*args):
        nonlocal n
        n += 1

    cv = Take(2, KFold(5))

    pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=cv, eval_func=roc_auc_score,
                                                    on_each_fold=_fold_count)

    assert len(scores) == 2 + 1
    assert scores[-1] >= 0.8  # overall auc
    assert n == 2 
开发者ID:nyanp,项目名称:nyaggle,代码行数:22,代码来源:test_cross_validate.py

示例2: test_07_ridge_classifier

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_07_ridge_classifier(self):
        print("\ntest 07 (Ridge Classifier) [multi-class]\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()

        model = RidgeClassifier()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test07sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        model_prob = model._predict_proba_lr(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
        self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:20,代码来源:testScoreWithAdapaSklearn.py

示例3: test_08_ridge_classifier

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_08_ridge_classifier(self):
        print("\ntest 08 (Ridge Classifier) [binary-class]\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()

        model = RidgeClassifier()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test08sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        model_prob = model._predict_proba_lr(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
        self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:20,代码来源:testScoreWithAdapaSklearn.py

示例4: test_model_ridge_classifier_binary

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_model_ridge_classifier_binary(self):
        model, X = fit_classification_model(linear_model.RidgeClassifier(), 2)
        model_onnx = convert_sklearn(
            model,
            "binary ridge classifier",
            [("input", FloatTensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnRidgeClassifierBin",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
开发者ID:onnx,项目名称:sklearn-onnx,代码行数:18,代码来源:test_sklearn_glm_classifier_converter.py

示例5: test_model_ridge_classifier_multi_class

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_model_ridge_classifier_multi_class(self):
        model, X = fit_classification_model(linear_model.RidgeClassifier(), 5)
        model_onnx = convert_sklearn(
            model,
            "multi-class ridge classifier",
            [("input", FloatTensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnRidgeClassifierMulti",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
开发者ID:onnx,项目名称:sklearn-onnx,代码行数:18,代码来源:test_sklearn_glm_classifier_converter.py

示例6: test_model_ridge_classifier_int

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_model_ridge_classifier_int(self):
        model, X = fit_classification_model(
            linear_model.RidgeClassifier(), 5, is_int=True)
        model_onnx = convert_sklearn(
            model,
            "multi-class ridge classifier",
            [("input", Int64TensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnRidgeClassifierInt",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
开发者ID:onnx,项目名称:sklearn-onnx,代码行数:19,代码来源:test_sklearn_glm_classifier_converter.py

示例7: test_model_ridge_classifier_bool

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_model_ridge_classifier_bool(self):
        model, X = fit_classification_model(
            linear_model.RidgeClassifier(), 4, is_bool=True)
        model_onnx = convert_sklearn(
            model,
            "multi-class ridge classifier",
            [("input", BooleanTensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnRidgeClassifierBool",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
开发者ID:onnx,项目名称:sklearn-onnx,代码行数:19,代码来源:test_sklearn_glm_classifier_converter.py

示例8: test_cv_sklean_binary

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_cv_sklean_binary():
    X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

    model = RidgeClassifier(alpha=1.0)

    pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=5, eval_func=roc_auc_score)

    assert len(scores) == 5 + 1
    assert scores[-1] >= 0.85  # overall auc
    assert roc_auc_score(y_train, pred_oof) == scores[-1]
    assert roc_auc_score(y_test, pred_test) >= 0.85  # test score 
开发者ID:nyanp,项目名称:nyaggle,代码行数:14,代码来源:test_cross_validate.py

示例9: feature_selection

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def feature_selection(matrix, labels, train_ind, fnum):
    """
        matrix       : feature matrix (num_subjects x num_features)
        labels       : ground truth labels (num_subjects x 1)
        train_ind    : indices of the training samples
        fnum         : size of the feature vector after feature selection

    return:
        x_data      : feature matrix of lower dimension (num_subjects x fnum)
    """

    estimator = RidgeClassifier()
    selector = RFE(estimator, fnum, step=100, verbose=1)

    featureX = matrix[train_ind, :]
    featureY = labels[train_ind]
    selector = selector.fit(featureX, featureY.ravel())
    x_data = selector.transform(matrix)

    print("Number of labeled samples %d" % len(train_ind))
    print("Number of features selected %d" % x_data.shape[1])

    return x_data


# Make sure each site is represented in the training set when selecting a subset of the training set 
开发者ID:parisots,项目名称:population-gcn,代码行数:28,代码来源:ABIDEParser.py

示例10: build_model

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def build_model(hp):
    model_type = hp.Choice('model_type', ['random_forest', 'ridge'])
    if model_type == 'random_forest':
        with hp.conditional_scope('model_type', 'random_forest'):
            model = ensemble.RandomForestClassifier(
                n_estimators=hp.Int('n_estimators', 10, 50, step=10),
                max_depth=hp.Int('max_depth', 3, 10))
    elif model_type == 'ridge':
        with hp.conditional_scope('model_type', 'ridge'):
            model = linear_model.RidgeClassifier(
                alpha=hp.Float('alpha', 1e-3, 1, sampling='log'))
    else:
        raise ValueError('Unrecognized model_type')
    return model 
开发者ID:keras-team,项目名称:keras-tuner,代码行数:16,代码来源:sklearn_test.py

示例11: test_objectmapper

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression)
        self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge)
        self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet)
        self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV)

        self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor)

        self.assertIs(df.linear_model.Lars, lm.Lars)
        self.assertIs(df.linear_model.LarsCV, lm.LarsCV)
        self.assertIs(df.linear_model.Lasso, lm.Lasso)
        self.assertIs(df.linear_model.LassoCV, lm.LassoCV)
        self.assertIs(df.linear_model.LassoLars, lm.LassoLars)
        self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV)
        self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC)

        self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression)
        self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression)
        self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV)
        self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso)
        self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet)
        self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV)
        self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV)

        self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit)
        self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV)
        self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier)
        self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor)

        self.assertIs(df.linear_model.Perceptron, lm.Perceptron)
        self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso)
        self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression)
        self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor)
        self.assertIs(df.linear_model.Ridge, lm.Ridge)
        self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier)
        self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV)
        self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV)
        self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier)
        self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor)
        self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor) 
开发者ID:pandas-ml,项目名称:pandas-ml,代码行数:43,代码来源:test_linear_model.py

示例12: test_no_predict_proba_attribute

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_no_predict_proba_attribute():
    with pytest.raises(AttributeError):
        clf = CTClassifier(RidgeClassifier(), RidgeClassifier()) 
开发者ID:neurodata,项目名称:mvlearn,代码行数:5,代码来源:test_ctclassifier.py

示例13: test_cross_val_predict_decision_function_shape

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_cross_val_predict_decision_function_shape():
    X, y = make_classification(n_classes=2, n_samples=50, random_state=0)

    preds = cross_val_predict(LogisticRegression(), X, y,
                              method='decision_function')
    assert_equal(preds.shape, (50,))

    X, y = load_iris(return_X_y=True)

    preds = cross_val_predict(LogisticRegression(), X, y,
                              method='decision_function')
    assert_equal(preds.shape, (150, 3))

    # This specifically tests imbalanced splits for binary
    # classification with decision_function. This is only
    # applicable to classifiers that can be fit on a single
    # class.
    X = X[:100]
    y = y[:100]
    assert_raise_message(ValueError,
                         'Only 1 class/es in training fold,'
                         ' but 2 in overall dataset. This'
                         ' is not supported for decision_function'
                         ' with imbalanced folds. To fix '
                         'this, use a cross-validation technique '
                         'resulting in properly stratified folds',
                         cross_val_predict, RidgeClassifier(), X, y,
                         method='decision_function', cv=KFold(2))

    X, y = load_digits(return_X_y=True)
    est = SVC(kernel='linear', decision_function_shape='ovo')

    preds = cross_val_predict(est,
                              X, y,
                              method='decision_function')
    assert_equal(preds.shape, (1797, 45))

    ind = np.argsort(y)
    X, y = X[ind], y[ind]
    assert_raises_regex(ValueError,
                        r'Output shape \(599L?, 21L?\) of decision_function '
                        r'does not match number of classes \(7\) in fold. '
                        'Irregular decision_function .*',
                        cross_val_predict, est, X, y,
                        cv=KFold(n_splits=3), method='decision_function') 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:47,代码来源:test_validation.py

示例14: test_sklearn_29

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_sklearn_29(self):
        iris = datasets.load_iris()
        irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
        irisd['Species'] = iris.target

        features = irisd.columns.drop('Species')
        target = 'Species'
        f_name = "ridge.pmml"

        model = RidgeClassifier()
        pipeline_obj = Pipeline([
            ("model", model)
        ])

        pipeline_obj.fit(irisd[features], irisd[target])
        skl_to_pmml(pipeline_obj, features, target, f_name)
        pmml_obj = pml.parse(f_name, True)

        segmentation = pmml_obj.MiningModel[0].Segmentation

        # 1
        self.assertEqual(os.path.isfile(f_name), True)

        # 2
        self.assertEqual(model.classes_.__len__() + 1, segmentation.Segment.__len__())

        #  3

        self.assertEqual(MULTIPLE_MODEL_METHOD.MODEL_CHAIN.value, segmentation.multipleModelMethod)

        #  4

        self.assertEqual(REGRESSION_NORMALIZATION_METHOD.SIMPLEMAX.value,
                         segmentation.Segment[-1].RegressionModel.normalizationMethod)

        #  5
        for i in range(model.classes_.__len__()):
            self.assertEqual("{:.16f}".format(model.intercept_[i]), \
                             "{:.16f}".format(segmentation.Segment[i].RegressionModel.RegressionTable[0].intercept))

        # 6
        for model_coef, pmml_seg in zip(model.coef_, segmentation.Segment):
            if int(pmml_seg.id) < 4:
                num_predict = pmml_seg.RegressionModel.RegressionTable[0].NumericPredictor
                for model_val, pmml_val in zip(model_coef, num_predict):
                    self.assertEqual("{:.16f}".format(model_val), "{:.16f}".format(pmml_val.coefficient))

        # 7

        self.assertEqual(REGRESSION_NORMALIZATION_METHOD.LOGISTIC.value,
                         pmml_obj.MiningModel[0].Segmentation.Segment[
                             1].RegressionModel.normalizationMethod) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:54,代码来源:test_skl_to_pmml_UnitTest.py

示例15: test_cross_val_predict_decision_function_shape

# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import RidgeClassifier [as 别名]
def test_cross_val_predict_decision_function_shape():
    X, y = make_classification(n_classes=2, n_samples=50, random_state=0)

    preds = cross_val_predict(LogisticRegression(), X, y,
                              method='decision_function')
    assert_equal(preds.shape, (50,))

    X, y = load_iris(return_X_y=True)

    preds = cross_val_predict(LogisticRegression(), X, y,
                              method='decision_function')
    assert_equal(preds.shape, (150, 3))

    # This specifically tests imbalanced splits for binary
    # classification with decision_function. This is only
    # applicable to classifiers that can be fit on a single
    # class.
    X = X[:100]
    y = y[:100]
    assert_raise_message(ValueError,
                         'Only 1 class/es in training fold, this'
                         ' is not supported for decision_function'
                         ' with imbalanced folds. To fix '
                         'this, use a cross-validation technique '
                         'resulting in properly stratified folds',
                         cross_val_predict, RidgeClassifier(), X, y,
                         method='decision_function', cv=KFold(2))

    X, y = load_digits(return_X_y=True)
    est = SVC(kernel='linear', decision_function_shape='ovo')

    preds = cross_val_predict(est,
                              X, y,
                              method='decision_function')
    assert_equal(preds.shape, (1797, 45))

    ind = np.argsort(y)
    X, y = X[ind], y[ind]
    assert_raises_regex(ValueError,
                        'Output shape \(599L?, 21L?\) of decision_function '
                        'does not match number of classes \(7\) in fold. '
                        'Irregular decision_function .*',
                        cross_val_predict, est, X, y,
                        cv=KFold(n_splits=3), method='decision_function') 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:46,代码来源:test_validation.py


注:本文中的sklearn.linear_model.RidgeClassifier方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。