本文整理汇总了Python中sklearn.ensemble.ExtraTreesClassifier.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python ExtraTreesClassifier.get_params方法的具体用法?Python ExtraTreesClassifier.get_params怎么用?Python ExtraTreesClassifier.get_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.ExtraTreesClassifier
的用法示例。
在下文中一共展示了ExtraTreesClassifier.get_params方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_UsingExtraTreesClassifier
# 需要导入模块: from sklearn.ensemble import ExtraTreesClassifier [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesClassifier import get_params [as 别名]
def train_UsingExtraTreesClassifier(df,header,x_train, y_train,x_test,y_test) :
# training
clf = ExtraTreesClassifier(n_estimators=200,random_state=0,criterion='gini',bootstrap=True,oob_score=1,compute_importances=True)
# Also tried entropy for the information gain but 'gini' seemed to give marginally better fit, bith in sample & out of sample
clf.fit(x_train, y_train)
#estimation of goodness of fit
print "Estimation of goodness of fit using the ExtraTreesClassifier is : %f \n" % clf.score(x_test,y_test)
print "Estimation of out of bag score using the ExtraTreesClassifier is : %f \n \n " % clf.oob_score_
# getting paramters back, if needed
clf.get_params()
# get the vector of predicted prob back
y_test_predicted= clf.predict(x_test)
X = df[df.columns - [header[-1]]]
feature_importance = clf.feature_importances_
# On a scale of 10 - make importances relative to max importance and plot them
feature_importance = 10.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance) #Returns the indices that would sort an array.
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(12, 6))
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
return y_test_predicted
示例2: AdaBoostClassifier
# 需要导入模块: from sklearn.ensemble import ExtraTreesClassifier [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesClassifier import get_params [as 别名]
clf_etree.fit(X_train, y_train)
print "Validation set score: ERF " , clf_etree.score(X_val, y_val)
clf_boost = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),algorithm="SAMME", n_estimators=500, random_state=74494, learning_rate=0.8)
clf_boost.fit(X_train, y_train)
print "Validation set score: ABOOST " , clf_boost.score(X_val, y_val)
#clf_gboost = GradientBoostingClassifier(n_estimators=int(reg), random_state=74494, learning_rate=0.2)
#clf_gboost.fit(X_train, y_train)
#print "Validation set score:LR " , clf_gboost.score(X_val, y_val)
print "Classifier:"
print clf, clf.get_params()
print clf_etree, clf_etree.get_params()
print clf_boost, clf_boost.get_params()
if(fe==1): #L1 norm based feature elimination
clf_fe = LogisticRegression(C=1000,penalty='l1',random_state=0)
clf_fe.fit(X_train, y_train)
X_train = X_train[:,clf_fe.coef_.ravel()!=0]
print "Xtrain.shape: ", X_train.shape
X_val = X_val[:,clf_fe.coef_.ravel()!=0]
clf2_l = svm.SVC(kernel='linear', C=reg)
clf2_l.fit(X_train, y_train)
print "Lasso Validation set score filtered coeff linear: " , clf2_l.score(X_val, y_val)
clf2 = svm.SVC(kernel='rbf', C=reg, gamma=g)
clf2.fit(X_train, y_train)
示例3: ExtraTreesClassifier
# 需要导入模块: from sklearn.ensemble import ExtraTreesClassifier [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesClassifier import get_params [as 别名]
# In[14]:
from sklearn import grid_search
from sklearn.metrics import f1_score, make_scorer
from sklearn.ensemble import ExtraTreesClassifier
parameters = {'n_estimators': [1, 32]}
model = ExtraTreesClassifier()
f1_scorer = make_scorer(f1_score, pos_label='yes')
clf = grid_search.GridSearchCV(model, param_grid=parameters, scoring=f1_scorer)
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
print model.get_params()
print "F1 score for test set: {}".format(metrics.f1_score(Y_test, Y_pred))
# In[12]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250, random_state=0)
forest.fit(X, Y)
importances = forest.feature_importances_