当前位置: 首页>>代码示例>>Python>>正文


Python testing.ignore_warnings方法代码示例

本文整理汇总了Python中sklearn.utils.testing.ignore_warnings方法的典型用法代码示例。如果您正苦于以下问题:Python testing.ignore_warnings方法的具体用法?Python testing.ignore_warnings怎么用?Python testing.ignore_warnings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.utils.testing的用法示例。


在下文中一共展示了testing.ignore_warnings方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_selectkbest_tiebreaking

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_selectkbest_tiebreaking():
    # Test whether SelectKBest actually selects k features in case of ties.
    # Prior to 0.11, SelectKBest would return more features than requested.
    Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
    y = [1]
    dummy_score = lambda X, y: (X[0], X[0])
    for X in Xs:
        sel = SelectKBest(dummy_score, k=1)
        X1 = ignore_warnings(sel.fit_transform)([X], y)
        assert_equal(X1.shape[1], 1)
        assert_best_scores_kept(sel)

        sel = SelectKBest(dummy_score, k=2)
        X2 = ignore_warnings(sel.fit_transform)([X], y)
        assert_equal(X2.shape[1], 2)
        assert_best_scores_kept(sel) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_feature_select.py

示例2: test_qda_regularization

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_qda_regularization():
    # the default is reg_param=0. and will cause issues
    # when there is a constant variable
    clf = QuadraticDiscriminantAnalysis()
    with ignore_warnings():
        y_pred = clf.fit(X2, y6).predict(X2)
    assert np.any(y_pred != y6)

    # adding a little regularization fixes the problem
    clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
    with ignore_warnings():
        clf.fit(X2, y6)
    y_pred = clf.predict(X2)
    assert_array_equal(y_pred, y6)

    # Case n_samples_in_a_class < n_features
    clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
    with ignore_warnings():
        clf.fit(X5, y5)
    y_pred5 = clf.predict(X5)
    assert_array_equal(y_pred5, y5) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:23,代码来源:test_discriminant_analysis.py

示例3: test_one_hot_encoder_dense

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_one_hot_encoder_dense():
    # check for sparse=False
    X = [[3, 2, 1], [0, 1, 1]]
    enc = OneHotEncoder(sparse=False)
    with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
        # discover max values automatically
        X_trans = enc.fit_transform(X)
        assert_equal(X_trans.shape, (2, 5))
        assert_array_equal(enc.active_features_,
                           np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
        assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])

    # check outcome
    assert_array_equal(X_trans,
                       np.array([[0., 1., 0., 1., 1.],
                                 [1., 0., 1., 0., 1.]])) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_encoders.py

示例4: train

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def train(self, contexts, responses):
        """Fit the tf-idf transform and compute idf statistics."""
        with ignore_warnings():
            # Ignore deprecated `non_negative` warning.
            self._vectorizer = HashingVectorizer(non_negative=True)
        self._tfidf_transform = TfidfTransformer()
        count_matrix = self._tfidf_transform.fit_transform(
            self._vectorizer.transform(contexts + responses))
        n_samples, n_features = count_matrix.shape
        df = _document_frequency(count_matrix)
        idf = np.log((n_samples - df + 0.5) / (df + 0.5))
        self._idf_diag = sp.spdiags(
            idf, diags=0, m=n_features, n=n_features
        )
        document_lengths = count_matrix.sum(axis=1)
        self._average_document_length = np.mean(document_lengths)
        print(self._average_document_length) 
开发者ID:PolyAI-LDN,项目名称:conversational-datasets,代码行数:19,代码来源:keyword_based.py

示例5: test_qda_regularization

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_qda_regularization():
    # the default is reg_param=0. and will cause issues
    # when there is a constant variable
    clf = QuadraticDiscriminantAnalysis()
    with ignore_warnings():
        y_pred = clf.fit(X2, y6).predict(X2)
    assert_true(np.any(y_pred != y6))

    # adding a little regularization fixes the problem
    clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
    with ignore_warnings():
        clf.fit(X2, y6)
    y_pred = clf.predict(X2)
    assert_array_equal(y_pred, y6)

    # Case n_samples_in_a_class < n_features
    clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
    with ignore_warnings():
        clf.fit(X5, y5)
    y_pred5 = clf.predict(X5)
    assert_array_equal(y_pred5, y5) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:23,代码来源:test_discriminant_analysis.py

示例6: test_collinearity

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_collinearity():
    # Check that lars_path is robust to collinearity in input
    X = np.array([[3., 3., 1.],
                  [2., 2., 0.],
                  [1., 1., 0]])
    y = np.array([1., 0., 0])
    rng = np.random.RandomState(0)

    f = ignore_warnings
    _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
    assert_true(not np.isnan(coef_path_).any())
    residual = np.dot(X, coef_path_[:, -1]) - y
    assert_less((residual ** 2).sum(), 1.)  # just make sure it's bounded

    n_samples = 10
    X = rng.rand(n_samples, 5)
    y = np.zeros(n_samples)
    _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
                                              copy_Gram=False, alpha_min=0.,
                                              method='lasso', verbose=0,
                                              max_iter=500)
    assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:24,代码来源:test_least_angle.py

示例7: test_distances

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_distances():
    # Checks whether returned neighbors are from closest to farthest.
    n_samples = 12
    n_features = 2
    n_iter = 10
    rng = np.random.RandomState(42)
    X = rng.rand(n_samples, n_features)

    lshf = ignore_warnings(LSHForest, category=DeprecationWarning)()
    ignore_warnings(lshf.fit)(X)

    for i in range(n_iter):
        n_neighbors = rng.randint(0, n_samples)
        query = X[rng.randint(0, n_samples)].reshape(1, -1)
        distances, neighbors = lshf.kneighbors(query,
                                               n_neighbors=n_neighbors,
                                               return_distance=True)

        # Returned neighbors should be from closest to farthest, that is
        # increasing distance values.
        assert_true(np.all(np.diff(distances[0]) >= 0))

        # Note: the radius_neighbors method does not guarantee the order of
        # the results. 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:26,代码来源:test_approximate.py

示例8: test_graphs

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_graphs():
    # Smoke tests for graph methods.
    n_samples_sizes = [5, 10, 20]
    n_features = 3
    rng = np.random.RandomState(42)

    for n_samples in n_samples_sizes:
        X = rng.rand(n_samples, n_features)
        lshf = ignore_warnings(LSHForest, category=DeprecationWarning)(
            min_hash_match=0)
        ignore_warnings(lshf.fit)(X)

        kneighbors_graph = lshf.kneighbors_graph(X)
        radius_neighbors_graph = lshf.radius_neighbors_graph(X)

        assert_equal(kneighbors_graph.shape[0], n_samples)
        assert_equal(kneighbors_graph.shape[1], n_samples)
        assert_equal(radius_neighbors_graph.shape[0], n_samples)
        assert_equal(radius_neighbors_graph.shape[1], n_samples) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:21,代码来源:test_approximate.py

示例9: test_sparse_input

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_sparse_input():
    # note: Fixed random state in sp.rand is not supported in older scipy.
    #       The test should succeed regardless.
    X1 = sp.rand(50, 100)
    X2 = sp.rand(10, 100)
    forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)(
        radius=1, random_state=0).fit(X1)
    forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)(
        radius=1, random_state=0).fit(X1.A)

    d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
    d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)

    assert_almost_equal(d_sparse, d_dense)
    assert_almost_equal(i_sparse, i_dense)

    d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
                                                        return_distance=True)
    d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
                                                     return_distance=True)
    assert_equal(d_sparse.shape, d_dense.shape)
    for a, b in zip(d_sparse, d_dense):
        assert_almost_equal(a, b)
    for a, b in zip(i_sparse, i_dense):
        assert_almost_equal(a, b) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:27,代码来源:test_approximate.py

示例10: train_model

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def train_model(folds, model):
    """
    Evaluation with:
      Matthews correlation coefficient: represents thresholding measures
      AUC: represents ranking measures
      Brier score: represents calibration measures
    """
    scores = []
    fit_model_time = 0      # Sum of all the time spend on fitting the training data, later on normalized
    score_model_time = 0    # Sum of all the time spend on scoring the testing data, later on normalized

    for X_train, y_train, X_test, y_test in folds:
        # Training
        start_time = time.time()
        with ignore_warnings(category=ConvergenceWarning):  # Yes, neural networks do not always converge
            model.fit(X_train, y_train)
        fit_model_time += time.time() - start_time
        prediction_train_proba = model.predict_proba(X_train)[:, 1]
        prediction_train = (prediction_train_proba >= 0.5).astype('uint8')

        # Testing
        start_time = time.time()
        prediction_test_proba = model.predict_proba(X_test)[:, 1]
        score_model_time += time.time() - start_time
        prediction_test = (prediction_test_proba >= 0.5).astype('uint8')

        # When all the predictions are of a single class, we get a RuntimeWarning in matthews_corr
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            scores.append([
                sklearn.metrics.matthews_corrcoef(y_test, prediction_test),
                sklearn.metrics.matthews_corrcoef(y_train, prediction_train),
                sklearn.metrics.roc_auc_score(y_test, prediction_test_proba),
                sklearn.metrics.roc_auc_score(y_train, prediction_train_proba),
                sklearn.metrics.brier_score_loss(y_test, prediction_test_proba),
                sklearn.metrics.brier_score_loss(y_train, prediction_train_proba)
            ])

    return np.mean(scores, axis=0), fit_model_time/len(folds), score_model_time/len(folds) 
开发者ID:scikit-learn-contrib,项目名称:category_encoders,代码行数:41,代码来源:util.py

示例11: test_iforest

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_iforest():
    """Check Isolation Forest for various parameter settings."""
    X_train = np.array([[0, 1], [1, 2]])
    X_test = np.array([[2, 1], [1, 1]])

    grid = ParameterGrid({"n_estimators": [3],
                          "max_samples": [0.5, 1.0, 3],
                          "bootstrap": [True, False]})

    with ignore_warnings():
        for params in grid:
            IsolationForest(random_state=rng,
                            **params).fit(X_train).predict(X_test) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:15,代码来源:test_iforest.py

示例12: test_1d_input

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_1d_input(name):
    X = iris.data[:, 0]
    X_2d = iris.data[:, 0].reshape((-1, 1))
    y = iris.target

    with ignore_warnings():
        check_1d_input(name, X, X_2d, y) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:9,代码来源:test_forest.py

示例13: check_warm_start_oob

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def check_warm_start_oob(name):
    # Test that the warm start computes oob score when asked.
    X, y = hastie_X, hastie_y
    ForestEstimator = FOREST_ESTIMATORS[name]
    # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
    clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
                          random_state=1, bootstrap=True, oob_score=True)
    clf.fit(X, y)

    clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
                            random_state=1, bootstrap=True, oob_score=False)
    clf_2.fit(X, y)

    clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
    clf_2.fit(X, y)

    assert hasattr(clf_2, 'oob_score_')
    assert_equal(clf.oob_score_, clf_2.oob_score_)

    # Test that oob_score is computed even if we don't need to train
    # additional trees.
    clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
                            random_state=1, bootstrap=True, oob_score=False)
    clf_3.fit(X, y)
    assert not hasattr(clf_3, 'oob_score_')

    clf_3.set_params(oob_score=True)
    ignore_warnings(clf_3.fit)(X, y)

    assert_equal(clf.oob_score_, clf_3.oob_score_) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:32,代码来源:test_forest.py

示例14: test_1d_input

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_1d_input(name):
    with ignore_warnings():
        check_raise_error_on_1d_input(name) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:5,代码来源:test_tree.py

示例15: test_multiclass_jaccard_score

# 需要导入模块: from sklearn.utils import testing [as 别名]
# 或者: from sklearn.utils.testing import ignore_warnings [as 别名]
def test_multiclass_jaccard_score(recwarn):
    y_true = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat', 'bird', 'bird']
    y_pred = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird', 'bird', 'cat']
    labels = ['ant', 'bird', 'cat']
    lb = LabelBinarizer()
    lb.fit(labels)
    y_true_bin = lb.transform(y_true)
    y_pred_bin = lb.transform(y_pred)
    multi_jaccard_score = partial(jaccard_score, y_true,
                                  y_pred)
    bin_jaccard_score = partial(jaccard_score,
                                y_true_bin, y_pred_bin)
    multi_labels_list = [['ant', 'bird'], ['ant', 'cat'], ['cat', 'bird'],
                         ['ant'], ['bird'], ['cat'], None]
    bin_labels_list = [[0, 1], [0, 2], [2, 1], [0], [1], [2], None]

    # other than average='samples'/'none-samples', test everything else here
    for average in ('macro', 'weighted', 'micro', None):
        for m_label, b_label in zip(multi_labels_list, bin_labels_list):
            assert_almost_equal(multi_jaccard_score(average=average,
                                                    labels=m_label),
                                bin_jaccard_score(average=average,
                                                  labels=b_label))

    y_true = np.array([[0, 0], [0, 0], [0, 0]])
    y_pred = np.array([[0, 0], [0, 0], [0, 0]])
    with ignore_warnings():
        assert (jaccard_score(y_true, y_pred, average='weighted')
                == 0)

    assert not list(recwarn) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:33,代码来源:test_classification.py


注:本文中的sklearn.utils.testing.ignore_warnings方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。