當前位置: 首頁>>代碼示例>>Python>>正文


Python logistic.LogisticRegression方法代碼示例

本文整理匯總了Python中sklearn.linear_model.logistic.LogisticRegression方法的典型用法代碼示例。如果您正苦於以下問題:Python logistic.LogisticRegression方法的具體用法?Python logistic.LogisticRegression怎麽用?Python logistic.LogisticRegression使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.linear_model.logistic的用法示例。


在下文中一共展示了logistic.LogisticRegression方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: check_l1_min_c

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
    min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)

    clf = {
        'log': LogisticRegression(penalty='l1', solver='liblinear',
                                  multi_class='ovr'),
        'squared_hinge': LinearSVC(loss='squared_hinge',
                                   penalty='l1', dual=False),
    }[loss]

    clf.fit_intercept = fit_intercept
    clf.intercept_scaling = intercept_scaling

    clf.C = min_c
    clf.fit(X, y)
    assert (np.asarray(clf.coef_) == 0).all()
    assert (np.asarray(clf.intercept_) == 0).all()

    clf.C = min_c * 1.01
    clf.fit(X, y)
    assert ((np.asarray(clf.coef_) != 0).any() or
            (np.asarray(clf.intercept_) != 0).any()) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:24,代碼來源:test_bounds.py

示例2: initialize_with_logistic_regression

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def initialize_with_logistic_regression(self, zs, xs):
        from sklearn.linear_model.logistic import LogisticRegression
        lr = LogisticRegression(verbose=False, multi_class="multinomial", solver="lbfgs")

        # Make the covariates
        K, D = self.num_states, self.covariate_dim
        zs = zs if isinstance(zs, np.ndarray) else np.concatenate(zs, axis=0)
        xs = xs if isinstance(xs, np.ndarray) else np.concatenate(xs, axis=0)
        assert zs.shape[0] == xs.shape[0]
        assert zs.ndim == 1 and zs.dtype == np.int32 and zs.min() >= 0 and zs.max() < K
        assert xs.ndim == 2 and xs.shape[1] == D

        lr_X = xs[:-1]
        lr_y = zs[1:]
        lr.fit(lr_X, lr_y)

        # Now convert the logistic regression into weights
        used = np.bincount(zs, minlength=K) > 0
        self.W = np.zeros((D, K))
        self.W[:, used] = lr.coef_.T
        b = np.zeros((K,))
        b[used] += lr.intercept_
        b[~used] += -100.
        self.b = b 
開發者ID:slinderman,項目名稱:recurrent-slds,代碼行數:26,代碼來源:transitions.py

示例3: __init__

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def __init__(self, penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=None):
        self._hyperparams = {
            'penalty': penalty,
            'dual': dual,
            'tol': tol,
            'C': C,
            'fit_intercept': fit_intercept,
            'intercept_scaling': intercept_scaling,
            'class_weight': class_weight,
            'random_state': random_state,
            'solver': solver,
            'max_iter': max_iter,
            'multi_class': multi_class,
            'verbose': verbose,
            'warm_start': warm_start,
            'n_jobs': n_jobs}
        self._wrapped_model = Op(**self._hyperparams) 
開發者ID:IBM,項目名稱:lale,代碼行數:19,代碼來源:logistic_regression.py

示例4: test_pipeline_same_results

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def test_pipeline_same_results(self):
        X, y, Z = self.make_classification(2, 10000, 2000)

        loc_clf = LogisticRegression()
        loc_filter = VarianceThreshold()
        loc_pipe = Pipeline([
            ('threshold', loc_filter),
            ('logistic', loc_clf)
        ])

        dist_clf = SparkLogisticRegression()
        dist_filter = SparkVarianceThreshold()
        dist_pipe = SparkPipeline([
            ('threshold', dist_filter),
            ('logistic', dist_clf)
        ])

        dist_filter.fit(Z)
        loc_pipe.fit(X, y)
        dist_pipe.fit(Z, logistic__classes=np.unique(y))

        assert_true(np.mean(np.abs(
            loc_pipe.predict(X) -
            np.concatenate(dist_pipe.predict(Z[:, 'X']).collect())
        )) < 0.1) 
開發者ID:lensacom,項目名稱:sparkit-learn,代碼行數:27,代碼來源:test_pipeline.py

示例5: check_l1_min_c

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
    min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)

    clf = {
        'log': LogisticRegression(penalty='l1'),
        'squared_hinge': LinearSVC(loss='squared_hinge',
                                   penalty='l1', dual=False),
    }[loss]

    clf.fit_intercept = fit_intercept
    clf.intercept_scaling = intercept_scaling

    clf.C = min_c
    clf.fit(X, y)
    assert_true((np.asarray(clf.coef_) == 0).all())
    assert_true((np.asarray(clf.intercept_) == 0).all())

    clf.C = min_c * 1.01
    clf.fit(X, y)
    assert_true((np.asarray(clf.coef_) != 0).any() or
                (np.asarray(clf.intercept_) != 0).any()) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:23,代碼來源:test_bounds.py

示例6: new_grid_search

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def new_grid_search():
    """ Create new GridSearch obj with models pipeline """
    pipeline = Pipeline([
        # TODO some smart preproc can be added here
        (u"clf", LogisticRegression(class_weight="balanced")),
    ])
    search_params = {"clf__C": (1e-4, 1e-2, 1e0, 1e2, 1e4)}
    return GridSearchCV(
        estimator=pipeline,
        param_grid=search_params,
        scoring="recall_macro",
        cv=10,
        n_jobs=-1,
        verbose=3,
    ) 
開發者ID:llSourcell,項目名稱:AI_for_Financial_Data,代碼行數:17,代碼來源:train.py

示例7: create_model

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def create_model():
    from sklearn.linear_model.logistic import LogisticRegression
    clf = LogisticRegression()

    return clf 
開發者ID:PacktPublishing,項目名稱:Building-Machine-Learning-Systems-With-Python-Second-Edition,代碼行數:7,代碼來源:02_ceps_based_classifier.py

示例8: getEstimator

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def getEstimator(scorer_type):
    if scorer_type == 'grad_boost':
        clf = GradientBoostingClassifier(n_estimators=200, random_state=14128, verbose=True)

    if scorer_type == 'svm1': # stochastic gradient decent classifier
        clf = svm.SVC(gamma=0.001, C=100., verbose=True)

    if scorer_type == 'logistic_regression' :
        clf = logistic.LogisticRegression()

    if scorer_type == 'svm3':
        clf = svm.SVC(kernel='poly', C=1.0, probability=True, class_weight='unbalanced')

    if scorer_type == "bayes":
        clf = naive_bayes.GaussianNB()

    if scorer_type == 'voting_hard_svm_gradboost_logistic':
        svm2 = svm.SVC(kernel='linear', C=1.0, probability=True, class_weight='balanced', verbose=True)
        log_reg = logistic.LogisticRegression()
        gradboost = GradientBoostingClassifier(n_estimators=200, random_state=14128, verbose=True)

        clf = VotingClassifier(estimators=[  # ('gb', gb),
            ('svm', svm2),
            ('grad_boost', gradboost),
            ('logisitc_regression', log_reg)
        ],  n_jobs=1,
            voting='hard')

    if scorer_type == 'voting_hard_bayes_gradboost':
        bayes = naive_bayes.GaussianNB()
        gradboost = GradientBoostingClassifier(n_estimators=200, random_state=14128, verbose=True)

        clf = VotingClassifier(estimators=[  # ('gb', gb),
            ('bayes', bayes),
            ('grad_boost', gradboost),
        ],  n_jobs=1,
            voting='hard')

    return clf 
開發者ID:UKPLab,項目名稱:coling2018_fake-news-challenge,代碼行數:41,代碼來源:meta_classifier.py

示例9: get_classification_models

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def get_classification_models():
        models = [
            LogisticRegression(random_state=1),
            RandomForestClassifier(n_estimators=64, max_depth=5, random_state=1),
        ]
        return models 
開發者ID:d909b,項目名稱:cxplain,代碼行數:8,代碼來源:test_util.py

示例10: fit_proxy

# 需要導入模塊: from sklearn.linear_model import logistic [as 別名]
# 或者: from sklearn.linear_model.logistic import LogisticRegression [as 別名]
def fit_proxy(explained_model, x, y):
        if isinstance(explained_model, LogisticRegression):
            y_cur = np.argmax(y, axis=-1)
        else:
            y_cur = y
        explained_model.fit(x, y_cur) 
開發者ID:d909b,項目名稱:cxplain,代碼行數:8,代碼來源:test_util.py


注:本文中的sklearn.linear_model.logistic.LogisticRegression方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。