本文整理汇总了Python中sklearn.ensemble.AdaBoostClassifier.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python AdaBoostClassifier.predict_proba方法的具体用法?Python AdaBoostClassifier.predict_proba怎么用?Python AdaBoostClassifier.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.AdaBoostClassifier
的用法示例。
在下文中一共展示了AdaBoostClassifier.predict_proba方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ADA_Classifier
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def ADA_Classifier(X_train, X_cv, X_test, Y_train,Y_cv,Y_test, Actual_DS):
print("***************Starting AdaBoost Classifier***************")
t0 = time()
clf = AdaBoostClassifier(n_estimators=300)
clf.fit(X_train, Y_train)
preds = clf.predict(X_cv)
score = clf.score(X_cv,Y_cv)
print("AdaBoost Classifier - {0:.2f}%".format(100 * score))
Summary = pd.crosstab(label_enc.inverse_transform(Y_cv), label_enc.inverse_transform(preds),
rownames=['actual'], colnames=['preds'])
Summary['pct'] = (Summary.divide(Summary.sum(axis=1), axis=1)).max(axis=1)*100
print(Summary)
#Check with log loss function
epsilon = 1e-15
#ll_output = log_loss_func(Y_cv, preds, epsilon)
preds2 = clf.predict_proba(X_cv)
ll_output2= log_loss(Y_cv, preds2, eps=1e-15, normalize=True)
print(ll_output2)
print("done in %0.3fs" % (time() - t0))
preds3 = clf.predict_proba(X_test)
#preds4 = clf.predict_proba((Actual_DS.ix[:,'feat_1':]))
preds4 = clf.predict_proba(Actual_DS)
print("***************Ending AdaBoost Classifier***************")
return pd.DataFrame(preds2) , pd.DataFrame(preds3),pd.DataFrame(preds4)
示例2: ab
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def ab(train_data,train_label,val_data,val_label,test_data,name="adaboost_submission.csv"):
print "Start training AdaBoost..."
abClf = AdaBoostClassifier()
abClf.fit(train_data,train_label)
#evaluate on validation set
val_pred_label = abClf.predict_proba(val_data)
logloss = preprocess.evaluation(val_label,val_pred_label)
print "logloss of validation set:",logloss
print "Start classify test set..."
test_label = abClf.predict_proba(test_data)
preprocess.saveResult(test_label,filename = name)
示例3: do_all_study
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def do_all_study(X,y):
names = [ "Decision Tree","Gradient Boosting",
"Random Forest", "AdaBoost", "Naive Bayes"]
classifiers = [
#SVC(),
DecisionTreeClassifier(max_depth=10),
GradientBoostingClassifier(max_depth=10, n_estimators=20, max_features=1),
RandomForestClassifier(max_depth=10, n_estimators=20, max_features=1),
AdaBoostClassifier()]
for name, clf in zip(names, classifiers):
estimator,score = plot_learning_curve(clf, X_train, y_train, scoring='roc_auc')
clf_GBC = GradientBoostingClassifier(max_depth=10, n_estimators=20, max_features=1)
param_name = 'n_estimators'
param_range = [1, 5, 10, 20,40]
plot_validation_curve(clf_GBC, X_train, y_train,
param_name, param_range, scoring='roc_auc')
clf_GBC.fit(X_train,y_train)
y_pred_GBC = clf_GBC.predict_proba(X_test)[:,1]
print("ROC AUC GradientBoostingClassifier: %0.4f" % roc_auc_score(y_test, y_pred_GBC))
clf_AB = AdaBoostClassifier()
param_name = 'n_estimators'
param_range = [1, 5, 10, 20,40]
plot_validation_curve(clf_AB, X_train, y_train,
param_name, param_range, scoring='roc_auc')
clf_AB.fit(X_train,y_train)
y_pred_AB = clf_AB.predict_proba(X_test)[:,1]
print("ROC AUC AdaBoost: %0.4f" % roc_auc_score(y_test, y_pred_AB))
示例4: Adaboost
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def Adaboost(TrainData,TestData):
features=['Time','Season','Hour','Minute','District']
clf = AdaBoostClassifier(tree.DecisionTreeClassifier(),n_estimators=30)
size=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for i in range(0,len(size)):
train,validation= train_test_split(TrainData, train_size=size[i])
while len(set(train['Category'])) != len(set(validation['Category'])):
train,validation= train_test_split(TrainData, train_size=size[i])
clf = clf.fit(train[features], train['Category'])
"""stop = timeit.default_timer()
print "Runnin time adaboost is ", stop-start"""
predicted=np.array(clf.predict_proba(validation[features]))
model=clf.predict(train[features])
model1=clf.predict(validation[features])
#scores = cross_val_score(clf, validation[features], validation['Category'])
#print "Scores mean is",scores.mean()
#accuracy
print "Training accuracy is", accuracy_score(train['Category'].values.tolist(),model)
print "Validation accuracy is",accuracy_score(validation['Category'].values.tolist(),model1)
print "Precision is ",precision_score(validation['Category'].values.tolist(),model1,average='macro')
print "Recall is ",recall_score(validation['Category'].values.tolist(),model1,average='macro')
print "Log loss is", log_loss(validation['Category'].values.tolist(),predicted,eps=1e-15, normalize=True, sample_weight=None)
#writing to file
"""Category_new=[]
示例5: test_oneclass_adaboost_proba
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
示例6: ab_predictedValue
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def ab_predictedValue():
print '----------AdaBoost----------'
ab_clf = AdaBoostClassifier(n_estimators = NoOfEstimators)
ab_clf.fit(train_df[features], train_df['SeriousDlqin2yrs'])
ab_predictedValue = ab_clf.predict_proba(test_df[features])
print 'Feature Importance = %s' % ab_clf.feature_importances_
return ab_predictedValue[:,1]
示例7: test_iris
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
示例8: ada_boost_cv
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def ada_boost_cv(x_train,
y_train,
cv,
max_tree_depth,
n_estimators,
learning_rate):
tree_classifier = DecisionTreeClassifier(max_depth=max_tree_depth,
class_weight="balanced")
ada_boost_classifier = AdaBoostClassifier(base_estimator=tree_classifier,
n_estimators=n_estimators,
learning_rate=learning_rate)
y_bar = cross_val_predict(estimator=ada_boost_classifier,
X=x_train,
y=y_train,
cv=cv,
n_jobs=cv)
y_bar_proba = ada_boost_classifier.predict_proba(x_train)
print(list(zip(y_bar,y_bar_proba)))
cm = confusion_matrix(y_train,y_bar)
accuracy_negative = cm[0,0] / np.sum(cm[0,:])
accuracy_positive = cm[1,1] / np.sum(cm[1,:])
precision = cm[1,1] / (cm[1,1] + cm[0,1])
recall = cm[1,1] / (cm[1,1] + cm[1,0])
f1_score = 2 * precision * recall / (precision + recall)
return accuracy_positive, accuracy_negative, precision, recall, f1_score
示例9: ada_prediction
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def ada_prediction(features_train, labels_train, features_test, ids):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(features_train, labels_train, random_state=1301, stratify=labels_train, test_size=0.3)
clf = AdaBoostClassifier(RandomForestClassifier(bootstrap=True,
criterion='entropy', max_depth=None, max_features=2,
max_leaf_nodes=16, min_samples_split=10, n_estimators=1000,
n_jobs=-1, oob_score=False),
algorithm="SAMME",
n_estimators=200)
#clf_acc = clf.fit(X_train, y_train)
# print(clf.best_estimator_)
#feature_importance = clf.feature_importances_
#print (feature_importance)
#pred = clf_acc.predict_proba(X_test)[:,1]
#print (y_test, pred)
# acc = accuracy_score(y_test, pred)
# print ("Acc {}".format(acc))
clf = clf.fit(features_train, labels_train)
pred = clf.predict_proba(features_test)[:,1]
predictions_file = open("data/canivel_ada_forest.csv", "wb")
predictions_file_object = csv.writer(predictions_file)
predictions_file_object.writerow(["ID", "TARGET"])
predictions_file_object.writerows(zip(ids, pred))
predictions_file.close()
示例10: training
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def training(baseclassparameters, adaparameters, queue):
treeclassifier = DecisionTreeClassifier(**baseclassparameters)
adaclassifier = AdaBoostClassifier(treeclassifier, **adaparameters)
print "\nBegin calculation with {0} and {1}".format(str(baseclassparameters), str(adaparameters))
adaclassifier.fit(Xtrain, ytrain)
#Predict with the model
prob_predict_test = adaclassifier.predict_proba(Xtest)[:,1]
#Calculate maximal significance
True_Signal_test = prob_predict_test[ytest==1]
True_Bkg_test = prob_predict_test[ytest==0]
best_significance = 0
for x in np.linspace(0, 1, 1000):
S = float(len(True_Signal_test[True_Signal_test>x]))
B = float(len(True_Bkg_test[True_Bkg_test>x]))
significance = S/np.sqrt(S+B)
if significance > best_significance:
best_significance = significance
best_x = x
best_S = S
best_B = B
print "\nCalculation with {} and {} done ".format(str(baseclassparameters), str(adaparameters))
print "Best significance of {0:.2f} archived when cutting at {1:.3f}".format(best_significance, best_x)
print "Signal efficiency: {0:.2f}%".format(100.*best_S/len(True_Signal_test))
print "Background efficiency: {0:.2f}%".format(100.*best_B/len(True_Bkg_test))
print "Purity: {0:.2f}%".format(100.*best_S/(best_S+best_B))
queue.put( (best_significance, baseclassparameters, adaparameters) )
示例11: test_staged_predict
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def test_staged_predict():
"""Check staged predictions."""
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target)
staged_scores = [s for s in clf.staged_score(iris.data, iris.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10)
clf.fit(boston.data, boston.target)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target)
staged_scores = [s for s in clf.staged_score(boston.data, boston.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
示例12: train
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def train(xTrain, yTrain, metric):
print 'adaboost'
global boost
boost = AdaBoostClassifier()
boost.fit(xTrain,yTrain)
global trainResults
trainResults = boost.predict_proba(xTrain)[:,1]
i.setSuccess(trainResults, metric)
示例13: adaboost
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def adaboost(X,training_target,Y,est):
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier(n_estimators=est)
clf.fit(X,training_target)
proba = clf.predict_proba(Y)
示例14: test_classification_toy
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
示例15: classify_AdaBoost
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import predict_proba [as 别名]
def classify_AdaBoost(train, test):
from sklearn.ensemble import AdaBoostClassifier as ABC
x, y = train
clf = ABC()
clf.fit(x, y)
x, y = test
proba = clf.predict_proba(x)
return proba