当前位置: 首页>>代码示例>>Python>>正文


Python testing._skip_if_no_sklearn函数代码示例

本文整理汇总了Python中testing._skip_if_no_sklearn函数的典型用法代码示例。如果您正苦于以下问题:Python _skip_if_no_sklearn函数的具体用法?Python _skip_if_no_sklearn怎么用?Python _skip_if_no_sklearn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了_skip_if_no_sklearn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_regression_with_custom_objective

def test_regression_with_custom_objective():
    tm._skip_if_no_sklearn()
    from sklearn.metrics import mean_squared_error
    from sklearn.datasets import load_boston
    from sklearn.cross_validation import KFold

    def objective_ls(y_true, y_pred):
        grad = (y_pred - y_true)
        hess = np.ones(len(y_true))
        return grad, hess

    boston = load_boston()
    y = boston['target']
    X = boston['data']
    kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
    for train_index, test_index in kf:
        xgb_model = xgb.XGBRegressor(objective=objective_ls).fit(
            X[train_index], y[train_index]
        )
        preds = xgb_model.predict(X[test_index])
        labels = y[test_index]
    assert mean_squared_error(preds, labels) < 25

    # Test that the custom objective function is actually used
    class XGBCustomObjectiveException(Exception):
        pass

    def dummy_objective(y_true, y_pred):
        raise XGBCustomObjectiveException()

    xgb_model = xgb.XGBRegressor(objective=dummy_objective)
    np.testing.assert_raises(XGBCustomObjectiveException, xgb_model.fit, X, y)
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:32,代码来源:test_with_sklearn.py

示例2: test_feature_importances

def test_feature_importances():
    tm._skip_if_no_sklearn()
    from sklearn.datasets import load_digits

    digits = load_digits(2)
    y = digits['target']
    X = digits['data']
    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)

    exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0.,
                    0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0.,
                    0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0.,
                    0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0.,
                    0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667,
                    0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0.,
                    0.], dtype=np.float32)

    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

    # numeric columns
    import pandas as pd
    y = pd.Series(digits['target'])
    X = pd.DataFrame(digits['data'])
    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
开发者ID:Ihaveadreammoonlighter,项目名称:xgboost,代码行数:28,代码来源:test_with_sklearn.py

示例3: test_early_stopping_nonparallel

    def test_early_stopping_nonparallel(self):
        tm._skip_if_no_sklearn()
        from sklearn.datasets import load_digits
        try:
            from sklearn.model_selection import train_test_split
        except:
            from sklearn.cross_validation import train_test_split

        digits = load_digits(2)
        X = digits['data']
        y = digits['target']
        X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
        clf1 = xgb.XGBClassifier()
        clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc",
                 eval_set=[(X_test, y_test)])
        clf2 = xgb.XGBClassifier()
        clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc",
                 eval_set=[(X_test, y_test)])
        # should be the same
        assert clf1.best_score == clf2.best_score
        assert clf1.best_score != 1
        # check overfit
        clf3 = xgb.XGBClassifier()
        clf3.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
                 eval_set=[(X_test, y_test)])
        assert clf3.best_score == 1
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:26,代码来源:test_early_stopping.py

示例4: test_multiclass_classification

def test_multiclass_classification():
    tm._skip_if_no_sklearn()
    from sklearn.datasets import load_iris
    try:
        from sklearn.cross_validation import KFold
    except:
        from sklearn.model_selection import KFold

    def check_pred(preds, labels):
        err = sum(1 for i in range(len(preds))
                  if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
        assert err < 0.4

    iris = load_iris()
    y = iris['target']
    X = iris['data']
    kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
    for train_index, test_index in kf:
        xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
        preds = xgb_model.predict(X[test_index])
        # test other params in XGBClassifier().fit
        preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3)
        preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0)
        preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3)
        labels = y[test_index]

        check_pred(preds, labels)
        check_pred(preds2, labels)
        check_pred(preds3, labels)
        check_pred(preds4, labels)
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:30,代码来源:test_with_sklearn.py

示例5: test_boston_housing_regression_with_sample_weights

def test_boston_housing_regression_with_sample_weights():
    tm._skip_if_no_sklearn()
    from sklearn.metrics import mean_squared_error
    from sklearn.datasets import load_boston
    from sklearn.cross_validation import KFold

    boston = load_boston()
    y = boston['target']
    X = boston['data']
    sample_weight = np.ones_like(y, 'float')
    kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)

    for train_index, test_index in kf:
        xgb_model = xgb.XGBRegressor().fit(
            X[train_index], y[train_index],
            sample_weight=sample_weight[train_index]
        )

        preds = xgb_model.predict(X[test_index])
        # test other params in XGBRegressor().fit
        preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3)
        preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0)
        preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3)
        labels = y[test_index]

        assert mean_squared_error(preds, labels) < 25
        assert mean_squared_error(preds2, labels) < 370
        assert mean_squared_error(preds3, labels) < 25
        assert mean_squared_error(preds4, labels) < 370
开发者ID:AlexisMignon,项目名称:xgboost,代码行数:29,代码来源:test_with_sklearn.py

示例6: test_sklearn_nfolds_cv

def test_sklearn_nfolds_cv():
    tm._skip_if_no_sklearn()
    from sklearn.datasets import load_digits
    from sklearn.model_selection import StratifiedKFold

    digits = load_digits(3)
    X = digits['data']
    y = digits['target']
    dm = xgb.DMatrix(X, label=y)

    params = {
        'max_depth': 2,
        'eta': 1,
        'silent': 1,
        'objective':
        'multi:softprob',
        'num_class': 3
    }

    seed = 2016
    nfolds = 5
    skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=seed)

    cv1 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, seed=seed)
    cv2 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, folds=skf, seed=seed)
    cv3 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, stratified=True, seed=seed)
    assert cv1.shape[0] == cv2.shape[0] and cv2.shape[0] == cv3.shape[0]
    assert cv2.iloc[-1, 0] == cv3.iloc[-1, 0]
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:28,代码来源:test_with_sklearn.py

示例7: test_feature_importances

def test_feature_importances():
    tm._skip_if_no_sklearn()
    from sklearn.datasets import load_digits

    digits = load_digits(2)
    y = digits['target']
    X = digits['data']
    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)

    exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0.,
                    0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0.,
                    0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0.,
                    0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0.,
                    0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667,
                    0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0.,
                    0.], dtype=np.float32)

    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

    # numeric columns
    import pandas as pd
    y = pd.Series(digits['target'])
    X = pd.DataFrame(digits['data'])
    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

    # string columns, the feature order must be kept
    chars = list('abcdefghijklmnopqrstuvwxyz')
    X.columns = ["".join(random.sample(chars, 5)) for x in range(64)]

    xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
    np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:32,代码来源:test_with_sklearn.py

示例8: test_sklearn_clone

def test_sklearn_clone():
    tm._skip_if_no_sklearn()
    from sklearn.base import clone

    clf = xgb.XGBClassifier(n_jobs=2, nthread=3)
    clf.n_jobs = -1
    clone(clf)
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:7,代码来源:test_with_sklearn.py

示例9: evalerror_04

    def evalerror_04(self, preds, dtrain):
        tm._skip_if_no_sklearn()
        from sklearn.metrics import mean_squared_error

        labels = dtrain.get_label()
        return [('error', float(sum(labels != (preds > 0.0))) / len(labels)),
                ('rmse', mean_squared_error(labels, preds))]
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:7,代码来源:test_eval_metrics.py

示例10: test_fast_histmaker

    def test_fast_histmaker(self):
        tm._skip_if_no_sklearn()
        variable_param = {'tree_method': ['hist'], 'max_depth': [2, 8], 'max_bin': [2, 256],
                          'grow_policy': ['depthwise', 'lossguide'], 'max_leaves': [64, 0],
                          'silent': [1]}
        for param in parameter_combinations(variable_param):
            result = run_suite(param)
            assert_results_non_increasing(result, 1e-2)

        # hist must be same as exact on all-categorial data
        dpath = 'demo/data/'
        ag_dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
        ag_dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
        ag_param = {'max_depth': 2,
                    'tree_method': 'hist',
                    'eta': 1,
                    'silent': 1,
                    'objective': 'binary:logistic',
                    'eval_metric': 'auc'}
        hist_res = {}
        exact_res = {}

        xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')],
                  evals_result=hist_res)
        ag_param["tree_method"] = "exact"
        xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')],
                  evals_result=exact_res)
        assert hist_res['train']['auc'] == exact_res['train']['auc']
        assert hist_res['test']['auc'] == exact_res['test']['auc']
开发者ID:Ihaveadreammoonlighter,项目名称:xgboost,代码行数:29,代码来源:test_updaters.py

示例11: test_cv_early_stopping

    def test_cv_early_stopping(self):
        tm._skip_if_no_sklearn()
        from sklearn.datasets import load_digits

        digits = load_digits(2)
        X = digits['data']
        y = digits['target']
        dm = xgb.DMatrix(X, label=y)
        params = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}

        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=10)
        assert cv.shape[0] == 10
        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=5)
        assert cv.shape[0] == 3
        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=1)
        assert cv.shape[0] == 1

        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror,
                    early_stopping_rounds=10)
        assert cv.shape[0] == 10
        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror,
                    early_stopping_rounds=1)
        assert cv.shape[0] == 5
        cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror,
                    maximize=True, early_stopping_rounds=1)
        assert cv.shape[0] == 1
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:26,代码来源:test_early_stopping.py

示例12: test_sklearn_plotting

def test_sklearn_plotting():
    tm._skip_if_no_sklearn()
    from sklearn.datasets import load_iris

    iris = load_iris()

    classifier = xgb.XGBClassifier()
    classifier.fit(iris.data, iris.target)

    import matplotlib
    matplotlib.use('Agg')

    from matplotlib.axes import Axes
    from graphviz import Digraph

    ax = xgb.plot_importance(classifier)
    assert isinstance(ax, Axes)
    assert ax.get_title() == 'Feature importance'
    assert ax.get_xlabel() == 'F score'
    assert ax.get_ylabel() == 'Features'
    assert len(ax.patches) == 4

    g = xgb.to_graphviz(classifier, num_trees=0)
    assert isinstance(g, Digraph)

    ax = xgb.plot_tree(classifier, num_trees=0)
    assert isinstance(ax, Axes)
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:27,代码来源:test_with_sklearn.py

示例13: test_sklearn_n_jobs

def test_sklearn_n_jobs():
    tm._skip_if_no_sklearn()

    clf = xgb.XGBClassifier(n_jobs=1)
    assert clf.get_xgb_params()['nthread'] == 1

    clf = xgb.XGBClassifier(nthread=2)
    assert clf.get_xgb_params()['nthread'] == 2
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:8,代码来源:test_with_sklearn.py

示例14: test_sklearn_random_state

def test_sklearn_random_state():
    tm._skip_if_no_sklearn()

    clf = xgb.XGBClassifier(random_state=402)
    assert clf.get_xgb_params()['seed'] == 402

    clf = xgb.XGBClassifier(seed=401)
    assert clf.get_xgb_params()['seed'] == 401
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:8,代码来源:test_with_sklearn.py

示例15: test_kwargs

def test_kwargs():
    tm._skip_if_no_sklearn()

    params = {'updater': 'grow_gpu', 'subsample': .5, 'n_jobs': -1}
    clf = xgb.XGBClassifier(n_estimators=1000, **params)
    assert clf.get_params()['updater'] == 'grow_gpu'
    assert clf.get_params()['subsample'] == .5
    assert clf.get_params()['n_estimators'] == 1000
开发者ID:ChangXiaodong,项目名称:xgboost-withcomments,代码行数:8,代码来源:test_with_sklearn.py


注:本文中的testing._skip_if_no_sklearn函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。