本文整理汇总了Python中sklearn.ensemble.AdaBoostClassifier.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python AdaBoostClassifier.get_params方法的具体用法?Python AdaBoostClassifier.get_params怎么用?Python AdaBoostClassifier.get_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.AdaBoostClassifier
的用法示例。
在下文中一共展示了AdaBoostClassifier.get_params方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cross_validation
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import get_params [as 别名]
def cross_validation(X, y):
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
assert(len(y) == len(X))
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42)
depth = [8, 16, 32, 64]
split = [1, 2, 4, 8, 16, 32, 64]
best_score = 0
best_train_score = 0
best_param = None
for d in depth:
for s in split:
estimator = DecisionTreeClassifier(max_features='sqrt', max_depth = d, min_samples_split = s)
model = AdaBoostClassifier(n_estimators=500, base_estimator = estimator)
model = model.fit(X_train, y_train)
print "Depth: %d split: %d" % (d, s)
print "Model trainning score:"
score_train = model.score(X_train, y_train)
print score_train
#ax.scatter(d, s, score_train, c='b', marker='o')
print "Model test score:"
score_test = model.score(X_test, y_test)
print score_test
#ax.scatter(d, s, score_test, c='r', marker='^')
if score_test > best_score:
best_score = score_test
best_train_score = score_train
best_param = model.get_params()
print "=================="
print best_train_score
print best_score
print best_param
return best_param
示例2: __init__
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import get_params [as 别名]
class adaBoost:
__all__=['run','plotFeatureRanking','plotScores']
def __init__(self, foundVariables, trainingData, trainingClasses, trainingWeights, testingData, testingClasses, adaName, bkg_name):
"""Build a forest and compute the feature importances.
Keyword args:
foundVariables -- The list of the names of found variabes, can get using Sample_x.returnFoundVariables()
trainingData -- The training data
trainingClasses -- The training data classes
testingData -- the testing data
testingClasses -- the testing data classes
adaName -- the name of the object (eg. sig+bkg_name)
"""
self.ada = AdaBoostClassifier(DecisionTreeClassifier(compute_importances=True,max_depth=4,min_samples_split=2,min_samples_leaf=100),n_estimators=400, learning_rate=0.5, algorithm="SAMME",compute_importances=True)
#class sklearn.tree.DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_density=0.10000000000000001, max_features=None, compute_importances=False, random_state=None)
self.foundVariables = foundVariables
self.trainingData = trainingData
self.trainingClasses = trainingClasses
self.testingData = testingData
self.testingClasses = testingClasses
self.trainingWeights = trainingWeights
self.name = adaName
self.bkg_name = bkg_name
self.elapsed = 0.0
def returnName(self):
return self.name
def run(self):
"""Run the fitting and testing."""
#start the fitting and time it
start = clock()
print 'starting training on AdaBoostClassifier'
self.ada.fit(self.trainingData, self.trainingClasses, self.trainingWeights)
self.elapsed = clock()-start
print 'time taken for training: ' + str(self.elapsed)
#set up the arrays for testing/ eval
#xtA_C = copy.deepcopy(self.testingData)
#pred = self.ada.predict(xtA_C)
#import createHists
#createHists.drawSigBkgDistrib(xtA_C, pred, self.foundVariables) # draw the signal and background distributions together
# list the importances of each variable in the bdt, get the score on the test data
self.importancesada = self.ada.feature_importances_
print 'importances'
print self.importancesada
self.score= self.ada.score(self.testingData,self.testingClasses)
self.params = self.ada.get_params()
self.std_mat = np.std([tree.feature_importances_ for tree in self.ada.estimators_],
axis=0)
self.indicesada = np.argsort(self.importancesada)[::-1]
self.variableNamesSorted = []
for i in self.indicesada:
self.variableNamesSorted.append(self.foundVariables[i])
# Print the feature ranking
print "Feature ranking:"
for f in xrange(12):
print "%d. feature %d (%f)" % (f + 1, self.indicesada[f], self.importancesada[self.indicesada[f]]) + " " +self.variableNamesSorted[f]
self.twoclass_output = self.ada.decision_function(self.testingData)
self.twoclass_output_train = self.ada.decision_function(self.trainingData)
self.class_proba = self.ada.predict_proba(self.testingData)[:, -1]
def plotFeatureRanking(self):
# We need this to run in batch because it complains about not being able to open display
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import pylab as pl
#plot the feature ranking
pl.figure()
pl.title("Feature importances Ada")
pl.bar(xrange(len(self.variableNamesSorted)), self.importancesada[self.indicesada],
color="r", yerr=self.std_mat[self.indicesada], align="center")
pl.xticks(xrange(12), self.variableNamesSorted)#indicesada)
pl.xlim([-1, 12])
pl.show()
def plotScores(self, returnROC = False, rocInput = []):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import pylab as pl
from sklearn.metrics import roc_curve, auc
plot_colors = "rb"
plot_step = 1000.0
class_names = "AB"
# Plot the training points
pl.subplot(131)
for i, n, c in zip(xrange(2), class_names, plot_colors):
idx = np.where(self.trainingClasses == i)
pl.scatter(self.trainingData[idx, 0], self.trainingData[idx, 1],
c=c, cmap=pl.cm.Paired,
label="Class %s" % n)
pl.axis("tight")
#.........这里部分代码省略.........
示例3: AdaBoostClassifier
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import get_params [as 别名]
print "Validation set score: ERF " , clf_etree.score(X_val, y_val)
clf_boost = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),algorithm="SAMME", n_estimators=500, random_state=74494, learning_rate=0.8)
clf_boost.fit(X_train, y_train)
print "Validation set score: ABOOST " , clf_boost.score(X_val, y_val)
#clf_gboost = GradientBoostingClassifier(n_estimators=int(reg), random_state=74494, learning_rate=0.2)
#clf_gboost.fit(X_train, y_train)
#print "Validation set score:LR " , clf_gboost.score(X_val, y_val)
print "Classifier:"
print clf, clf.get_params()
print clf_etree, clf_etree.get_params()
print clf_boost, clf_boost.get_params()
if(fe==1): #L1 norm based feature elimination
clf_fe = LogisticRegression(C=1000,penalty='l1',random_state=0)
clf_fe.fit(X_train, y_train)
X_train = X_train[:,clf_fe.coef_.ravel()!=0]
print "Xtrain.shape: ", X_train.shape
X_val = X_val[:,clf_fe.coef_.ravel()!=0]
clf2_l = svm.SVC(kernel='linear', C=reg)
clf2_l.fit(X_train, y_train)
print "Lasso Validation set score filtered coeff linear: " , clf2_l.score(X_val, y_val)
clf2 = svm.SVC(kernel='rbf', C=reg, gamma=g)
clf2.fit(X_train, y_train)
print "Lasso Validation set score filtered coeff: " , clf2.score(X_val, y_val)
示例4: print
# 需要导入模块: from sklearn.ensemble import AdaBoostClassifier [as 别名]
# 或者: from sklearn.ensemble.AdaBoostClassifier import get_params [as 别名]
test_error = []
train_error = []
for train_index, test_index in skf:
print("for iteration {}".format(i))
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
clf = clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
test_error.append(accuracy_score(y_pred,y_test))
y_pred = clf.predict(X_train)
train_error.append(accuracy_score(y_pred,y_train))
print(clf.get_params())
print('Time to fit the dataset of alpha = {} is {}'.format(i,time.clock()-start))
# y_pred = clf.predict(X)
# train_error = mean_absolute_error(y_pred,Y)
# y_pred = clf.predict(X_test)
test_error = sum(test_error)/len(test_error)
train_error = sum(train_error)/len(train_error)
f.write('{},{},{}\n'.format(i,train_error,test_error))
print('{},{},{}\n'.format(i,train_error,test_error))
f.flush()