本文整理汇总了Python中sklearn.ensemble.ExtraTreesClassifier类的典型用法代码示例。如果您正苦于以下问题:Python ExtraTreesClassifier类的具体用法?Python ExtraTreesClassifier怎么用?Python ExtraTreesClassifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ExtraTreesClassifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calc_prob
def calc_prob(df_features_driver, df_features_other):
df_train = df_features_driver.append(df_features_other)
df_train.reset_index(inplace = True)
df_train.Driver = df_train.Driver.astype(int)
# So far, the best result was achieved by using a RandomForestClassifier with Bagging
# model = BaggingClassifier(base_estimator = ExtraTreesClassifier())
# model = BaggingClassifier(base_estimator = svm.SVC(gamma=2, C=1))
# model = BaggingClassifier(base_estimator = linear_model.LogisticRegression())
# model = BaggingClassifier(base_estimator = linear_model.LogisticRegression())
# model = BaggingClassifier(base_estimator = AdaBoostClassifier())
#model = RandomForestClassifier(200)
# model = BaggingClassifier(base_estimator = [RandomForestClassifier(), linear_model.LogisticRegression()])
# model = EnsembleClassifier([BaggingClassifier(base_estimator = RandomForestClassifier()),
# GradientBoostingClassifier])
#model = GradientBoostingClassifier(n_estimators = 10000)
model = ExtraTreesClassifier(n_estimators=100,max_features='auto',random_state=0, n_jobs=2, criterion='entropy', bootstrap=True)
# model = ExtraTreesClassifier(500, criterion='entropy')
feature_columns = df_train.iloc[:, 4:]
# Train the classifier
model.fit(feature_columns, df_train.Driver)
df_submission = pd.DataFrame()
df_submission['driver_trip'] = create_first_column(df_features_driver)
probs_array = model.predict_proba(feature_columns[:200]) # Return array with the probability for every driver
probs_df = pd.DataFrame(probs_array)
df_submission['prob'] = np.array(probs_df.iloc[:, 1])
return df_submission
示例2: ET_classif
def ET_classif(features_df=None, labels_df=None):
'''Scoring function to be used in SelectKBest feature selection class
object.
This scoring function assigns varaible importances to the features
passed in to it using the ExtraTreesClassifier. It then returns
the features as two identical arrays mimicking the scores and
p-values arrays required by SelectKBest to pick the top K
features.
Args:
features_df: Pandas dataframe of features to be used to predict
using the ExtraTreesClassifier.
labels_df: Pandas dataframe of the labels being predicted.
Returns:
Two identical arrays containing the feature importance scores
returned for each feature by the ExtraTreesClassifier.
'''
reducer = ExtraTreesClassifier(n_estimators=500, bootstrap=False,
oob_score=False, max_features=.10,
min_samples_split=10, min_samples_leaf=2,
criterion='gini', random_state=42)
reducer.fit(features_df, labels_df)
return reducer.feature_importances_, reducer.feature_importances_
示例3: learn
def learn(f):
global raw_data
print 'testing classifier'
data = raw_data[raw_data['label'] != 'unknown']
data = data[data['file type'] == 'EXECUTE']
X = data.as_matrix(f)
y = np.array(data['label'].tolist())
#clf = RandomForestClassifier(n_estimators=100)
clf = ExtraTreesClassifier(n_estimators=100)
#clf = AdaBoostClassifier()
scores = sklearn.cross_validation.cross_val_score(clf, X, y, cv=10)
print("predicted accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
seed = 3301
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
clf.fit(X_train, y_train)
scores = clf.score(X_test, y_test)
print("actual accuracy: %0.2f" % scores)
importances = zip(f, clf.feature_importances_)
importances.sort(key=lambda k:k[1], reverse=True)
for im in importances[0:20]:
print im[0].ljust(30), im[1]
#y_pred = clf.predict(X_test)
#labels = ['good', 'bad']
#cm = confusion_matrix(y_test, y_pred, labels)
#plot_cm(cm, labels)
#joblib.dump(clf, 'model.pkl')
return clf
示例4: tree_based_selection
def tree_based_selection(self, data_set, data_target, feature_names):
"""
:param data_set:
:return:
"""
clf = ExtraTreesClassifier()
clf = clf.fit(data_set, data_target)
print clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
feature_set = model.transform(data_set)
fea_index = []
for A_col in np.arange(data_set.shape[1]):
for B_col in np.arange(feature_set.shape[1]):
if (data_set[:, A_col] == feature_set[:, B_col]).all():
fea_index.append(A_col)
check = {}
for i in fea_index:
check[feature_names[i]] = data_set[0][i]
print np.array(check)
return feature_set, fea_index
示例5: train_random_forest
def train_random_forest(X_train,y_train,**kwargs):
from sklearn.ensemble import ExtraTreesClassifier
n_estimators = kwargs.pop('n_estimators',300)
max_features = kwargs.pop('max_features','auto')
n_jobs = kwargs.pop('n_jobs',-1)
verbose = kwargs.pop('verbose',0)
tuned_params = kwargs.pop('tuned_params',None)
# initialize baseline classifier
clf = ExtraTreesClassifier(n_estimators=n_estimators,random_state=42,
n_jobs=n_jobs,verbose=verbose,criterion='gini',
max_features=max_features,oob_score=True,
bootstrap=True)
if tuned_params is not None: # optimize if desired
from sklearn.grid_search import GridSearchCV
cv = GridSearchCV(clf,tuned_params,cv=5,scoring='roc_auc',
n_jobs=n_jobs,verbose=verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # otherwise train with the specified parameters (no tuning)
clf.fit(X_train,y_train)
return clf
示例6: tree_based_feature_selection
def tree_based_feature_selection(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
n = len(self.features)
forest = ExtraTreesClassifier(n_estimators=250, random_state=0)
forest.fit(x, y)
importances = forest.feature_importances_
print(importances)
std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
for f in range(n):
print("%d. feature %d: %s (%f)" % (f + 1, indices[f], self.features[indices[f]],importances[indices[f]]))
# Plot the feature importances of the forest
# plt.figure()
# plt.title("Feature importances")
# plt.bar(range(n), importances[indices],
# color="r", yerr=std[indices], align="center")
# plt.xticks(range(n), indices)
# plt.xlim([-1, n])
# plt.show()
n = 12
print(indices[0:n+1])
print(self.features[indices[0:n+1]])
new_x = x[:, indices[0:n+1]]
return new_x
示例7: crossVal
def crossVal(positions, X, y, missedYFile):
outF = open(missedYFile, 'w')
posArray = np.array(positions)
# Split into training and test
sss = StratifiedShuffleSplit(y, 4, test_size=0.1, random_state=442)
cvRound = 0
for train_index, test_index in sss:
clf = ExtraTreesClassifier(n_estimators=300,
random_state=13,
bootstrap=True,
max_features=20,
min_samples_split=1,
max_depth=8,
min_samples_leaf=13,
n_jobs=4
)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
pos_test = posArray[test_index]
clf = clf.fit(X_train, y_train)
preds = clf.predict(X_test)
metrics.confusion_matrix( y_test, preds )
print( metrics.classification_report(y_test, clf.predict(X_test)) )
for loc,t,p in zip(pos_test, y_test, preds):
if t=='0' and p=='1':
print >> outF, loc + '\t' + str(cvRound)
cvRound += 1
outF.close()
示例8: remove_feature_tree_based
def remove_feature_tree_based(train_X,train_Y):
'''
Removes features based on trees - see sklearn:
http://scikit-learn.org/dev/auto_examples/ensemble/plot_forest_importances.html#example-ensemble-plot-forest-importances-py
Actually removes based on "importance"
'''
forest = ExtraTreesClassifier(n_estimators=1000,
compute_importances = True,
random_state = 0)
forest.fit(train_X, train_Y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
x_labels = ['rc1', 'rc2', 'dca1', 'dca2','dcm1', 'dcm2','ace1','ace2','acsc1', 'acsc2', 'acsv1', 'acsv2', 'acss1','acss2', 'acsk1', 'acsk2', 'taca1', 'taca2', 'tdc1', 'tdc2', 'gmin', 'gmean', 'trd','ep111','ep112','ep211', 'ep212', 'ep311','ep312', 'ep411','ep412','ep511','ep512','ep611','ep612','ep121','ep122','ep221', 'ep222', 'ep321','ep322', 'ep421','ep422','ep521','ep522','ep621','ep622']
# Print the feature ranking
print "Feature ranking:"
for f in xrange(46):
print "%d. feature %s (%f)" % (f + 1, x_labels[indices[f]], importances[indices[f]])
# Transform the data to have only the features that are important
x_new = forest.transform(train_X)
return (forest, x_new)
示例9: algo_fit_cross_validated
def algo_fit_cross_validated(training_matrix, target):
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(training_matrix, target)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
l = list(training_matrix.columns.values)
for f in range(training_matrix.shape[1]):
print("%d. feature %d(%s) (%f)" % (f + 1, indices[f], l[indices[f]], importances[indices[f]]))
##### Works well ######
# SVM
# svm = SVC(kernel="linear", C=0.06)
# svm.fit(training_matrix, target)
#
# scores_svm = cross_validation.cross_val_score(svm, training_matrix, target, cv=5)
# print("(svm) Accuracy: %0.5f (+/- %0.2f)" % (scores_svm.mean(), scores_svm.std() * 2))
#
# return svm
##### Works well ######
# Random Forest
rf = RandomForestClassifier(n_estimators=1500, max_depth=2, max_features=4)
scores_rf = cross_validation.cross_val_score(rf, training_matrix, target, cv=5)
print("(Random Forest) Accuracy: %0.5f (+/- %0.2f)" % (scores_rf.mean(), scores_rf.std() * 2))
rf.fit(training_matrix, target)
return rf
示例10: extratreeclassifier
def extratreeclassifier(input_file,Output,test_size):
lvltrace.lvltrace("LVLEntree dans extratreeclassifier split_test")
ncol=tools.file_col_coma(input_file)
data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
X = data[:,1:]
y = data[:,0]
n_samples, n_features = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
print X_train.shape, X_test.shape
clf = ExtraTreesClassifier(n_estimators=10)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print "Extremely Randomized Trees"
print "classification accuracy:", metrics.accuracy_score(y_test, y_pred)
print "precision:", metrics.precision_score(y_test, y_pred)
print "recall:", metrics.recall_score(y_test, y_pred)
print "f1 score:", metrics.f1_score(y_test, y_pred)
print "\n"
results = Output+"_Extremely_Random_Forest_metrics_test.txt"
file = open(results, "w")
file.write("Extremely Random Forest Classifier estimator accuracy\n")
file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y_test, y_pred))
file.write("Precision Score: %f\n"%metrics.precision_score(y_test, y_pred))
file.write("Recall Score: %f\n"%metrics.recall_score(y_test, y_pred))
file.write("F1 Score: %f\n"%metrics.f1_score(y_test, y_pred))
file.write("\n")
file.write("True Value, Predicted Value, Iteration\n")
for n in xrange(len(y_test)):
file.write("%f,%f,%i\n"%(y_test[n],y_pred[n],(n+1)))
file.close()
title = "Extremely Randomized Trees %f"%test_size
save = Output + "Extremely_Randomized_Trees_confusion_matrix"+"_%s.png"%test_size
plot_confusion_matrix(y_test, y_pred,title,save)
lvltrace.lvltrace("LVLSortie dans extratreeclassifier split_test")
示例11: reduceRF
def reduceRF(label):
global x_data_rf_reduced, importantFeatureLocs
model = ExtraTreesClassifier()
model.fit(x_data, y_data[:, label])
# the relative importance of each attribute
importance = model.feature_importances_
weight = float(0)
del importantFeatureLocs[:] # reset
#print(importance)
for ele in np.sort(importance)[::-1]:
weight += float(ele)
featureIndex = np.where(importance==ele)
for loc in featureIndex[0]:
importantFeatureLocs.append(loc)
if weight > RFThreshold :
break
# remove duplications
importantFeatureLocs = list(set(importantFeatureLocs))
# extracting relevant columns from input data. Note that importantFeatureLocs
# may be unsorted (since python 'set' is unsorted), so features are extracted
# in unorderd fashion. This info is stored in the softmax model class
x_data_rf_reduced = x_data[:, importantFeatureLocs]
示例12: fit
def fit(self, X, Y, sample_weight=None):
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
num_features = X.shape[1]
max_features = int(float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
estimator = ExtraTreesClassifier(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
class_weight=self.class_weight,
)
estimator.fit(X, Y, sample_weight=sample_weight)
self.preprocessor = SelectFromModel(estimator=estimator, threshold="mean", prefit=True)
return self
示例13: MyExtraTree
class MyExtraTree(MyClassifier):
def __init__(self, params=dict()):
self._params = params
self._extree = ExtraTreesClassifier(**(self._params))
def update_params(self, updates):
self._params.update(updates)
self._extree = ExtraTreesClassifier(**(self._params))
def fit(self, Xtrain, ytrain):
self._extree.fit(Xtrain, ytrain)
# def predict(self, Xtest, option = None):
# return self._extree.predict(Xtest)
def predict_proba(self, Xtest, option = None):
return self._extree.predict_proba(Xtest)[:, 1]
def predict_proba_multi(self, Xtest, option = None):
return self._extree.predict_proba(Xtest)
def plt_feature_importance(self, fname_list, f_range = list()):
importances = self._extree.feature_importances_
std = np.std([tree.feature_importances_ for tree in self._extree.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
fname_array = np.array(fname_list)
if not f_range:
f_range = range(indices.shape[0])
n_f = len(f_range)
plt.figure()
plt.title("Extra Tree Feature importances")
plt.barh(range(n_f), importances[indices[f_range]],
color="b", xerr=std[indices[f_range]], ecolor='k',align="center")
plt.yticks(range(n_f), fname_array[indices[f_range]])
plt.ylim([-1, n_f])
plt.show()
def list_feature_importance(self, fname_list, f_range = list(), return_list = False):
importances = self._extree.feature_importances_
indices = np.argsort(importances)[::-1]
print 'Extra tree feature ranking:'
if not f_range :
f_range = range(indices.shape[0])
n_f = len(f_range)
for i in range(n_f):
f = f_range[i]
print '{0:d}. feature[{1:d}] {2:s} ({3:f})'.format(f + 1, indices[f], fname_list[indices[f]], importances[indices[f]])
if return_list:
return [indices[f_range[i]] for i in range(n_f)]
示例14: _cascade_layer
def _cascade_layer(self, X, y=None, layer=0):
n_tree = getattr(self, 'n_cascadeRFtree')
n_cascadeRF = getattr(self, 'n_cascadeRF')
min_samples = getattr(self, 'min_samples_cascade')
prf = RandomForestClassifier(
n_estimators=100, max_features=8,
bootstrap=True, criterion="entropy", min_samples_split=20,
max_depth=None, class_weight='balanced', oob_score=True)
crf = ExtraTreesClassifier(
n_estimators=100, max_depth=None,
bootstrap=True, oob_score=True)
prf_pred = []
if y is not None:
# print('Adding/Training Layer, n_layer={}'.format(self.n_layer))
for irf in range(n_cascadeRF):
prf.fit(X, y)
crf.fit(X, y)
setattr(self, '_casprf{}_{}'.format(self.n_layer, irf), prf)
setattr(self, '_cascrf{}_{}'.format(self.n_layer, irf), crf)
probas = prf.oob_decision_function_
probas += crf.oob_decision_function_
prf_pred.append(probas)
elif y is None:
for irf in range(n_cascadeRF):
prf = getattr(self, '_casprf{}_{}'.format(layer, irf))
crf = getattr(self, '_cascrf{}_{}'.format(layer, irf))
probas = prf.predict_proba(X)
probas += crf.predict_proba(X)
prf_pred.append(probas)
return prf_pred
示例15: plotImportance
def plotImportance(X,y):
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
n=X.shape[1]
#Print the feature ranking
#print("Feature ranking:")
#for f in range(n):
# print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure(figsize=(20,15))
plt.title("Feature importances")
plt.bar(range(n), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(n), X.columns[indices],rotation=90)
plt.xlim([-1, n])
plt.savefig('featuresel.pdf')