当前位置: 首页>>代码示例>>Python>>正文


Python AdaBoostClassifier.staged_predict方法代码示例

本文整理汇总了Python中sklearn.ensemble.AdaBoostClassifier.staged_predict方法的典型用法代码示例。如果您正苦于以下问题:Python AdaBoostClassifier.staged_predict方法的具体用法?Python AdaBoostClassifier.staged_predict怎么用?Python AdaBoostClassifier.staged_predict使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.AdaBoostClassifier的用法示例。


在下文中一共展示了AdaBoostClassifier.staged_predict方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_staged_predict

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
def test_staged_predict():
    """Check staged predictions."""
    # AdaBoost classification
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
        clf.fit(iris.data, iris.target)

        predictions = clf.predict(iris.data)
        staged_predictions = [p for p in clf.staged_predict(iris.data)]
        proba = clf.predict_proba(iris.data)
        staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
        score = clf.score(iris.data, iris.target)
        staged_scores = [s for s in clf.staged_score(iris.data, iris.target)]

        assert_equal(len(staged_predictions), 10)
        assert_array_almost_equal(predictions, staged_predictions[-1])
        assert_equal(len(staged_probas), 10)
        assert_array_almost_equal(proba, staged_probas[-1])
        assert_equal(len(staged_scores), 10)
        assert_array_almost_equal(score, staged_scores[-1])

    # AdaBoost regression
    clf = AdaBoostRegressor(n_estimators=10)
    clf.fit(boston.data, boston.target)

    predictions = clf.predict(boston.data)
    staged_predictions = [p for p in clf.staged_predict(boston.data)]
    score = clf.score(boston.data, boston.target)
    staged_scores = [s for s in clf.staged_score(boston.data, boston.target)]

    assert_equal(len(staged_predictions), 10)
    assert_array_almost_equal(predictions, staged_predictions[-1])
    assert_equal(len(staged_scores), 10)
    assert_array_almost_equal(score, staged_scores[-1])
开发者ID:akobre01,项目名称:scikit-learn,代码行数:36,代码来源:test_weight_boosting.py

示例2: some

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
def some(X, Y, X_test, Y_test):
    ada = AdaBoostClassifier()
    print "Train Model ---"
    t1 = time()
    ada.fit(X, Y)
    t2 = time()
    print "Model Trained ----------", t2 - t1
    test_errors = []
    cur = 1
    Y_test2 = []
    for k in Y_test:
        Y_test2.append(k[0])
    print "Testing: "
    print  Y_test2
    pred =  ada.predict(X_test)
    print pred
    accu =  1. - accuracy_score(y_true= Y_test2, y_pred= pred)
    print accu
    print "STAGED _____________"
    for test_predict in (
        ada.staged_predict(X_test)):


            test_errors.append(
            1. - accuracy_score(test_predict, Y_test2))


    print  "errorss : "
    print test_errors
开发者ID:grimadas,项目名称:diploma,代码行数:31,代码来源:adaboost.py

示例3: test_staged_predict

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
def test_staged_predict():
    # Check staged predictions.
    rng = np.random.RandomState(0)
    iris_weights = rng.randint(10, size=iris.target.shape)
    boston_weights = rng.randint(10, size=boston.target.shape)

    # AdaBoost classification
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
        clf.fit(iris.data, iris.target, sample_weight=iris_weights)

        predictions = clf.predict(iris.data)
        staged_predictions = [p for p in clf.staged_predict(iris.data)]
        proba = clf.predict_proba(iris.data)
        staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
        score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
        staged_scores = [
            s for s in clf.staged_score(
                iris.data, iris.target, sample_weight=iris_weights)]

        assert_equal(len(staged_predictions), 10)
        assert_array_almost_equal(predictions, staged_predictions[-1])
        assert_equal(len(staged_probas), 10)
        assert_array_almost_equal(proba, staged_probas[-1])
        assert_equal(len(staged_scores), 10)
        assert_array_almost_equal(score, staged_scores[-1])

    # AdaBoost regression
    clf = AdaBoostRegressor(n_estimators=10, random_state=0)
    clf.fit(boston.data, boston.target, sample_weight=boston_weights)

    predictions = clf.predict(boston.data)
    staged_predictions = [p for p in clf.staged_predict(boston.data)]
    score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
    staged_scores = [
        s for s in clf.staged_score(
            boston.data, boston.target, sample_weight=boston_weights)]

    assert_equal(len(staged_predictions), 10)
    assert_array_almost_equal(predictions, staged_predictions[-1])
    assert_equal(len(staged_scores), 10)
    assert_array_almost_equal(score, staged_scores[-1])
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:44,代码来源:test_weight_boosting.py

示例4: ensembleProc

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
def ensembleProc(n_estimators, learning_rate, trainfile, testfile):
	features = np.genfromtxt(trainfile, delimiter=' ', usecols=(0, 1, 2))
	labels = np.genfromtxt(trainfile, delimiter=' ', usecols=(-1))
	tests = np.genfromtxt(testfile, delimiter=' ', usecols=(0, 1, 2))
	testlabels = np.genfromtxt(testfile, delimiter=' ', usecols=(-1))

	dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
	dt_stump.fit(features, labels)

	ada_real = AdaBoostClassifier(
		base_estimator=dt_stump,
		learning_rate=learning_rate,
		n_estimators=n_estimators,
		algorithm="SAMME")
	ada_real.fit(features, labels)

	error = np.zeros((n_estimators,))
	for i, predict in enumerate(ada_real.staged_predict(tests)):
		error[i] = zero_one_loss(predict, testlabels)
	
	return np.mean(error)
开发者ID:gbugaisky,项目名称:bimm_185_conotoxin,代码行数:23,代码来源:ensembleProc.py

示例5: test_sparse_classification

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
def test_sparse_classification():
    # Check classification with sparse input.

    class CustomSVC(SVC):
        """SVC variant that records the nature of the training set."""

        def fit(self, X, y, sample_weight=None):
            """Modification on fit caries data type for later verification."""
            super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
            self.data_type_ = type(X)
            return self

    X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
                                                   n_features=5,
                                                   random_state=42)
    # Flatten y to a 1d array
    y = np.ravel(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
                          dok_matrix]:
        X_train_sparse = sparse_format(X_train)
        X_test_sparse = sparse_format(X_test)

        # Trained on sparse format
        sparse_classifier = AdaBoostClassifier(
            base_estimator=CustomSVC(probability=True),
            random_state=1,
            algorithm="SAMME"
        ).fit(X_train_sparse, y_train)

        # Trained on dense format
        dense_classifier = AdaBoostClassifier(
            base_estimator=CustomSVC(probability=True),
            random_state=1,
            algorithm="SAMME"
        ).fit(X_train, y_train)

        # predict
        sparse_results = sparse_classifier.predict(X_test_sparse)
        dense_results = dense_classifier.predict(X_test)
        assert_array_equal(sparse_results, dense_results)

        # decision_function
        sparse_results = sparse_classifier.decision_function(X_test_sparse)
        dense_results = dense_classifier.decision_function(X_test)
        assert_array_equal(sparse_results, dense_results)

        # predict_log_proba
        sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
        dense_results = dense_classifier.predict_log_proba(X_test)
        assert_array_equal(sparse_results, dense_results)

        # predict_proba
        sparse_results = sparse_classifier.predict_proba(X_test_sparse)
        dense_results = dense_classifier.predict_proba(X_test)
        assert_array_equal(sparse_results, dense_results)

        # score
        sparse_results = sparse_classifier.score(X_test_sparse, y_test)
        dense_results = dense_classifier.score(X_test, y_test)
        assert_array_equal(sparse_results, dense_results)

        # staged_decision_function
        sparse_results = sparse_classifier.staged_decision_function(
            X_test_sparse)
        dense_results = dense_classifier.staged_decision_function(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)

        # staged_predict
        sparse_results = sparse_classifier.staged_predict(X_test_sparse)
        dense_results = dense_classifier.staged_predict(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)

        # staged_predict_proba
        sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
        dense_results = dense_classifier.staged_predict_proba(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)

        # staged_score
        sparse_results = sparse_classifier.staged_score(X_test_sparse,
                                                        y_test)
        dense_results = dense_classifier.staged_score(X_test, y_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)

        # Verify sparsity of data is maintained during training
        types = [i.data_type_ for i in sparse_classifier.estimators_]

        assert all([(t == csc_matrix or t == csr_matrix)
                   for t in types])
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:97,代码来源:test_weight_boosting.py

示例6: print

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
print("adaboost classifier training in %.2f" % (time() - start))

# use cross-validation to estimate accuracy
# start = time()
# train_pred = cross_val_predict(ada_clf, bag_of_words, train.cuisine, cv=2)
# print("adaboost evaluation finished in %.2f" % (time() - start))

# print("Estimated accuracy using cross-validation: " , accuracy_score(train.cuisine, train_pred))


# use rest of labelled training data to check accuracy score (for plotting)
test = pd.read_json("data/train2.json")
test_words = [" ".join(item) for item in test.ingredients]
test_bag = vec.transform(test_words).toarray()
test_errors = []
for test_predict in ada_clf.staged_predict(test_bag):
    test_errors.append(1.0 - accuracy_score(test_predict, test.cuisine))

plt.figure(figsize=(15, 5))

plt.plot(range(1, len(ada_clf) + 1), test_errors)
plt.ylabel("Test Error")
plt.xlabel("Number of Trees")
plt.show()

# Load in Testing Data
test = pd.read_json("data/test.json")

# Create test Bag of Words
test_words = [" ".join(item) for item in test.ingredients]
test_bag = vec.transform(test_words).toarray()
开发者ID:basheersubei,项目名称:kaggle-cuisine-classification,代码行数:33,代码来源:adaboost_cooking.py

示例7: AdaBoostClassifier

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
    ada_discrete = AdaBoostClassifier(
        base_estimator=dt_stump,
        learning_rate=learning_rate,
        n_estimators=n_estimators,
        algorithm="SAMME")
    ada_discrete.fit(X_train, y_train)

    ada_real = AdaBoostClassifier(
        base_estimator=dt_stump,
        learning_rate=learning_rate,
        n_estimators=n_estimators,
        algorithm="SAMME.R")
    ada_real.fit(X_train, y_train)

    ada_discrete_err = np.zeros((n_estimators,))
    for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
        ada_discrete_err[i] = zero_one_loss(y_pred, y_test)/10.
    ada_discrete_err_ave += ada_discrete_err

    ada_discrete_err_train = np.zeros((n_estimators,))
    for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
        ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)/10.
    ada_discrete_err_train_ave += ada_discrete_err_train

    ada_real_err = np.zeros((n_estimators,))
    for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
        ada_real_err[i] = zero_one_loss(y_pred, y_test)/10.
    ada_real_err_ave += ada_real_err

    ada_real_err_train = np.zeros((n_estimators,))
    for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
开发者ID:bcsiriuschen,项目名称:CMSC773-Schizophrenia-Detection,代码行数:33,代码来源:adaBoostPlot.py

示例8: read_hwfile

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
#!/usr/bin/env python
if __name__ == '__main__':
    from sklearn.ensemble import AdaBoostClassifier as ABC
    from sklearn.tree import DecisionTreeClassifier as DTC
    import numpy as np
    from sklearn.metrics import accuracy_score
    from final_utils import read_hwfile

    # initialize data
    dat, lab, nDat = read_hwfile('ml14fall_train_align.dat.hog.dat', 169)
    nVal = nDat/5
    nTrn = nDat-nVal
    datTrn = dat[:nTrn]
    labTrn = lab[:nTrn]
    datVal = dat[-nVal:]
    labVal = lab[-nVal:]
    print "#trn = {}, #val = {}".format(nTrn, nVal)


    classfier = ABC(DTC(max_depth=6, max_features=1), n_estimators=50000)
    classfier.fit(datTrn, labTrn)

    for i, labPre in enumerate(classfier.staged_predict(datVal)):
	if i % 10 == 9:
	    print accuracy_score(labPre, labVal)
开发者ID:johnjohnlin,项目名称:MyCourseProjects,代码行数:27,代码来源:aboost.py

示例9: print

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
    # Now predict the value of the digit on the second half:
    predicted = classifier.predict(X_test)
    r_predicted = r_classifier.predict(X_test)

    print("Classification report for classifier %s:\n%s\n"
          % (classifier, metrics.classification_report(y_test, predicted)))
    print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, predicted))

    print("Classification report for classifier %s:\n%s\n"
          % (r_classifier, metrics.classification_report(y_test, r_predicted)))
    print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, r_predicted))

    n_trees = xrange(1, len(classifier) + 1)
    test_errors = []
    train_errors = []
    for p in classifier.staged_predict(X_test):
        test_errors.append(1. - accuracy_score(p, y_test))
    for p in classifier.staged_predict(X_train):
        train_errors.append(1. - accuracy_score(p, y_train))

    test_errors_rand = []
    for i in xrange(1, args.estimators + 1):
        print '.',
        r_classifier = RandomForestClassifier(n_estimators=i, n_jobs=args.jobs, max_depth=args.max_depth)
        r_classifier.fit(X_train, y_train)
        r_predicted = r_classifier.predict(X_test)
        test_errors_rand.append(1. - accuracy_score(r_predicted, y_test))
    print '.'

    pl.subplot(1,1,1)
    pl.plot(n_trees, test_errors, c='red', label='AdaBoost.%s' % args.boost)
开发者ID:nemiliani,项目名称:script_recognition,代码行数:33,代码来源:random.vs.ada.py

示例10: train_test_split

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
                #split into training and testing samples. test_size = proportion of data used for test
                x_train, x_test, y_train, y_test = train_test_split(value_to_classify, targs_to_classify, test_size = .4) 

                #########################
                #ADABoost Classifier
                #########################
                bdt_real = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=600,learning_rate=1)

                bdt_discrete = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=600,learning_rate=1.5,algorithm="SAMME")
                bdt_real.fit(x_train, y_train)
                bdt_discrete.fit(x_train, y_train)
            
                real_test_errors = []
                discrete_test_errors = []

                for real_test_predict, discrete_train_predict in zip(bdt_real.staged_predict(x_test), bdt_discrete.staged_predict(x_test)):
                    real_test_errors.append(1. - accuracy_score(real_test_predict, y_test))
                    discrete_test_errors.append(1. - accuracy_score(discrete_train_predict, y_test))

                n_trees_discrete = len(bdt_discrete)
                n_trees_real = len(bdt_real)

                # Boosting might terminate early, but the following arrays are always
                # n_estimators long. We crop them to the actual number of trees here:
                discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
                real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
                discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
            
                # Test on the testing data set and display the accuracies
                ypred_r = bdt_real.predict(x_test)
                ypred_e = bdt_discrete.predict(x_test)
开发者ID:jhess90,项目名称:classification_scripts,代码行数:33,代码来源:rp_classifier.py

示例11: getPics

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
label=label[index]
(X_train,X_test)=(data[0:30000],data[30000:])
(y_train,y_test)=(label[0:30000],label[3000:])
#X_train, X_test = getPics().trainData,getPics().testData
#y_train, y_test = getPics().trainLabel,getPics().testLabel
#print X_train.shape
#print y_train.shape
bdt_discrete = AdaBoostClassifier(
    CnnModel(),
    n_estimators=500,
    learning_rate=0.3,
    algorithm="SAMME")
bdt_discrete.fit(X_train, y_train)
discrete_test_errors = []

for  discrete_train_predict in bdt_discrete.staged_predict(X_test):
    discrete_test_errors.append(
        1. - accuracy_score(discrete_train_predict, y_test))

n_trees_discrete = len(bdt_discrete)
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]

plt.figure(figsize=(15, 5))

plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
         discrete_test_errors, c='black')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
开发者ID:alyato,项目名称:dl,代码行数:33,代码来源:plot_adaboost_multiclass.py

示例12: results_from_examples

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
from sklearn.ensemble import AdaBoostClassifier
from read_data import read_data

def results_from_examples(ps,ls):
    return [1 if p == l else 0 for p,l in zip(ps,ls)]

def error_rate(rs):
    return 1.0-((1.0*sum(rs))/len(rs))

print "Sklearn"
examples,labels = read_data('Data/clean1_clean.data')
clf = AdaBoostClassifier(n_estimators=50)
a = AdaBoostClassifier.fit(clf,examples,labels)
score = a.score(examples, labels)
i = 0
print "Estimator, Ensemble error, Classifier error"
for value in AdaBoostClassifier.staged_predict(clf, examples):
    rs = results_from_examples(value, labels)
    #print "Estimator: " + str(i) + " Ensemble error: " + str(error_rate(rs)) + " Classifier error: " + str(clf.estimator_errors_[i])
    print str(i) + "," + str(error_rate(rs)) + "," + str(clf.estimator_errors_[i])
    i = i + 1

print score
开发者ID:cLuuLess,项目名称:CSE202,代码行数:25,代码来源:AdaBoost_sklearn.py

示例13: bdtModel

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]

#.........这里部分代码省略.........

    dt = DecisionTreeClassifier(max_depth=3,
                                min_samples_leaf=0.05*len(X))
    model = AdaBoostClassifier(dt,
                             algorithm='SAMME',
                             n_estimators=400,
                             learning_rate=0.5)
    
    model.fit(X, y)


    print '---------- Training/Testing info ----------'

    print 'Accuracy (training): ', model.score(X, y)
    print 'Null Error Rate (training): ', y.mean()


    #X_test = scaler.transform(X_test)
    predicted_test = model.predict(X_test)

    predicted_test_clever = (predicted_test + y_test).tolist()
    error_test = float(predicted_test_clever.count(1)) / float(len(predicted_test_clever))
    print "Error: ", error_test

    print "Accuracy (testing): ", metrics.accuracy_score(y_test, predicted_test)
    print "Recall (testing): ",   metrics.recall_score(y_test, predicted_test)
    print "F1 score (testing): ", metrics.f1_score(y_test, predicted_test)
    print "ROC area under curve (testing): ", metrics.roc_auc_score(y_test, predicted_test)

    #'PTS','AST','REB','STL','BLK','FG_PCT','FG3_PCT','FT_PCT','MIN','EFF','WL']
    #user_input = scaler.transform(np.array([10, 1, 2, 0, 2, 0.3, 0.3, 0.3, 10, 5, 1], dtype=float))
    #user_input = scaler.transform(np.array([10,1,2,2,2,2,2,2,2,2,1], dtype=float))
    #user_input = scaler.transform(np.array([10,1,2], dtype=float))
    user_input = np.array([10.15, 1.95, 6.77, 1.12, 0.28, 0.51, 0.37, 0.47, 32.5, 14.8, 0.53], dtype=float)

    score = model.decision_function(user_input)
    print 'Score (user input): ', score
    result = model.predict_proba(user_input)
    print 'Probability of 1 (user input): ', result



    # '--------- Visualization -----------'

    Classifier_training_S = model.decision_function(X[y>0.5]).ravel()
    Classifier_training_B = model.decision_function(X[y<0.5]).ravel()
    Classifier_testing_S = model.decision_function(X_test[y_test>0.5]).ravel()
    Classifier_testing_B = model.decision_function(X_test[y_test<0.5]).ravel()

    (h_test_s, h_test_b) =  visualSigBkg("BDT", Classifier_training_S, Classifier_training_B, Classifier_testing_S, Classifier_testing_B)


    # '-------- Variable Importance ---------'
    feature_importance = model.feature_importances_
    # make importances relative to max importance
    feature_importance = 100.0 * (feature_importance / feature_importance.max())
    sorted_idx = np.argsort(feature_importance)
    pos = np.arange(sorted_idx.shape[0]) + .5
    mpl.style.use('ggplot')
    pl.subplot(1, 2, 2)
    pl.barh(pos, feature_importance[sorted_idx], align='center')
    pl.yticks(pos, df_sig_train.columns[sorted_idx])
    pl.xlabel('Relative Importance', fontsize=15)
    pl.title('Variable Importance', fontsize=15)
    #pl.show()
    plt.savefig("Var_importance.pdf")
    plt.close()


    fig = plt.figure()
    ax = fig.add_subplot(111)

    model_err = np.zeros((400,))
    for i, y_pred in enumerate(model.staged_predict(X_test)):
        model_err[i] = zero_one_loss(y_pred, y_test)
    
    model_err_train = np.zeros((400,))
    for i, y_pred in enumerate(model.staged_predict(X)):
        model_err_train[i] = zero_one_loss(y_pred, y)

    ax.plot(np.arange(400) + 1, model_err,
            label='AdaBoost Test Error',
            color='orange')
    ax.plot(np.arange(400) + 1, model_err_train,
            label='AdaBoost Train Error',
            color='green')
    
    ax.set_ylim((0.25, 0.35))
    ax.set_xlabel('Number of Trees')
    ax.set_ylabel('Error Rate')
    
    leg = ax.legend(loc='upper right', fancybox=True)
    leg.get_frame().set_alpha(0.7)

    plt.savefig("ntrees.pdf")
    plt.close()    

    ########################################################### 

    return (model, X, y, result, model.score(X, y), error_test, score, h_test_s, h_test_b)
开发者ID:XiaoxiaoWang87,项目名称:InsightPrj,代码行数:104,代码来源:svm_model.py

示例14: PdfPages

# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import staged_predict [as 别名]
    y_train = np.append(os, zs)
    print "training"
    base_ada.fit(X=X_train, y=y_train)

    os = np.ones(len(bkgtest))
    zs = np.zeros(len(sigtest))
    print "adding samples together"
    X_test = pandas.concat([sigtest, bkgtest])
    y_test = np.append(os, zs)


    sigoutput = base_ada.decision_function(X=sigtest)
    bkgoutput = base_ada.decision_function(X=bkgtest)
    from sklearn.metrics import accuracy_score
    test_errors = []
    for te in base_ada.staged_predict(X_test):
        test_errors.append(1.- accuracy_score(te, y_test))
    ntrees = len(test_errors)
    estimator_errors = base_ada.estimator_errors_[:ntrees]
    estimator_weights = base_ada.estimator_weights_[:ntrees]

    from matplotlib.ticker import LinearLocator

    with PdfPages("bdtplots.pdf") as pdf:
        xs, xe, ys, ye = get_hist(bkgoutput)
        plt.errorbar(xs, ys, xerr=xe, yerr=ye,
                     color='red', fmt='.',
                     label='bkg')
        xs, xe, ys, ye = get_hist(sigoutput)
        plt.errorbar(xs, ys, xerr=xe, yerr=ye,
                     color='blue', fmt='.',
开发者ID:alexshires,项目名称:ml,代码行数:33,代码来源:read_tree.py


注:本文中的sklearn.ensemble.AdaBoostClassifier.staged_predict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。