当前位置: 首页>>代码示例>>Python>>正文


Python VotingClassifier.estimators_方法代码示例

本文整理汇总了Python中sklearn.ensemble.VotingClassifier.estimators_方法的典型用法代码示例。如果您正苦于以下问题:Python VotingClassifier.estimators_方法的具体用法?Python VotingClassifier.estimators_怎么用?Python VotingClassifier.estimators_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.VotingClassifier的用法示例。


在下文中一共展示了VotingClassifier.estimators_方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: createVotingClassifier

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import estimators_ [as 别名]
def createVotingClassifier(n_trees,X,y,depth,min_saples=2,max_feat=0.2,overhead=2.0,voting_='soft'):
    N_data = int(overhead*len(X)/n_trees)
    print(str(N_data)+' will be used by classifier')
    estimators_ = []
    estimators = []
    for i in range(n_trees):
        clf = RandomForestClassifier(max_depth=depth,min_samples_leaf=min_saples,max_features=max_feat)
	if (i+1)*N_data<len(X):
        	clf.fit(X[i*N_data:(i+1)*N_data],y[i*N_data:(i+1)*N_data])
	else:
		X,y = shuffle(X,y)
		clf.fit(X[:N_data],y[:N_data])
        estimators_.append((str(i),clf))
        estimators.append(clf)
    tmp = VotingClassifier(estimators=estimators_, voting=voting_)
    tmp.estimators_ = estimators
    return tmp
开发者ID:Alkanoor,项目名称:refactored-garbanzo,代码行数:19,代码来源:find_best_parameters.py

示例2: fit_voting

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import estimators_ [as 别名]
 def fit_voting(self):
     voting = 'soft'
     names = [
         # 'svm(word_n_grams,char_n_grams,all_caps,hashtags,punctuations,punctuation_last,emoticons,emoticon_last,'
         # 'elongated,negation_count)',
         # 'logreg(w2v_doc)',
         # 'logreg(w2v_word_avg_google)',
         'word2vec_bayes',
         'cnn_word(embedding=google)',
         'rnn_word(embedding=google)',
     ]
     classifiers = [ExternalModel({
         self.val_docs: os.path.join(self.data_dir, 'results/val/{}.json'.format(name)),
         self.test_docs: os.path.join(self.data_dir, 'results/test/{}.json'.format(name)),
     }) for name in names]
     all_scores = []
     for classifier in classifiers:
         scores = classifier.predict_proba(self.val_docs)
         if voting == 'hard':
             scores = Binarizer(1 / 3).transform(scores)
         all_scores.append(scores)
     all_scores = np.array(all_scores)
     all_scores_first, all_scores_rest = all_scores[0], all_scores[1:]
     le = LabelEncoder().fit(self.classes_)
     val_label_indexes = le.transform(self.val_labels())
     # assume w_0=1 as w is invariant to scaling
     w = basinhopping(
         lambda w_: -(val_label_indexes == np.argmax((
             all_scores_first + all_scores_rest * w_.reshape((len(w_), 1, 1))
         ).sum(axis=0), axis=1)).sum(), np.ones(len(classifiers) - 1), niter=1000,
         minimizer_kwargs=dict(method='L-BFGS-B', bounds=[(0, None)] * (len(classifiers) - 1))
     ).x
     w = np.hstack([[1], w])
     w /= w.sum()
     logging.info('w: {}'.format(w))
     estimator = VotingClassifier(list(zip(names, classifiers)), voting=voting, weights=w)
     estimator.le_ = le
     estimator.estimators_ = classifiers
     return 'vote({})'.format(','.join(names)), estimator
开发者ID:meshiguge,项目名称:senti,代码行数:41,代码来源:senti_models.py

示例3: fit

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import estimators_ [as 别名]
    def fit(self):
        clf_list=[]
        # # KNN
        # print "KNN"
        # knn = KNeighborsClassifier(n_neighbors=35, weights='distance', leaf_size=2)
        # print "Fitting KNN"
        # knn.fit(self.X_train, self.y_train)
        # print('KNN {score}'.format(score=log_loss(self.y_test, knn.predict_proba(self.X_test))))
        # self.clfs['knn'] = knn
        # clf_list.append(knn)
        # Random forests
        print "Random forest on gini"
        rfc = RandomForestClassifier(n_estimators=43,
                                     criterion='gini',
                                     random_state=4141,
                                     n_jobs=-1,
                                     max_depth=21,
                                     max_features=0.12)
        print "Fitting random forest with gini"
        rfc.fit(self.X_train, self.y_train)
        print('RFC LogLoss {score}'.format(score=log_loss(self.y_test, rfc.predict_proba(self.X_test))))
        self.clfs['rfc']=rfc
        clf_list.append(rfc)
        print "Random forest with entropy"
        rfc2 = RandomForestClassifier(n_estimators=80,
                                      criterion='entropy',
                                      random_state=1337,
                                      n_jobs=-1,
                                      max_depth=36,
                                      max_features=0.06)
        print "Fitting random forest with entropy"
        rfc2.fit(self.X_train, self.y_train)
        print('RFC2 LogLoss {score}'.format(score=log_loss(self.y_test, rfc2.predict_proba(self.X_test))))
        self.clfs['rfc2']=rfc2
        clf_list.append(rfc2)
        # Logistic regression
        print "Logistic regression on logloss"
        logreg = LogisticRegression(C=1.05, penalty='l2')
        print "Fitting logistic regression"
        logreg.fit(self.X_train, self.y_train)
        print('LR LogLoss {score}'.format(score=log_loss(self.y_test, logreg.predict_proba(self.X_test))))
        self.clfs['lr']=logreg
        clf_list.append(logreg)

        # # gradient boosting
        # gbt1=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,max_depth = 1, random_state = 0)
        # print "Fitting gradient boosting tree"
        # gbt1.fit(self.X_train, self.y_train)
        # print('Gbt1 LogLoss {score}'.format(score=log_loss(self.y_test, gbt1.predict_proba(self.X_test))))
        # self.clfs['gbt1']=gbt1
        # clf_list.append(gbt1)

        # # Bad performance
        # # Multinomial Naive Bayes
        # print "Multinomial naive bayes"
        # mnb = MultinomialNB(fit_prior=False,alpha=0.25)
        # print "Fitting multinomial naive bayes"
        # mnb.fit(self.X_train, self.y_train)
        # print('MNB {score}'.format(score=log_loss(self.y_test, mnb.predict_proba(self.X_test))))
        # self.clfs['mnb'] = mnb
        # clf_list.append(mnb)

        # Adaboost
        print "Adaboost trees"
        abc = AdaBoostClassifier(n_estimators=100,learning_rate=0.5)
        print "Fitting Adaboost trees"
        abc.fit(self.X_train, self.y_train)
        print('ABC {score}'.format(score=log_loss(self.y_test, abc.predict_proba(self.X_test))))
        self.clfs['abc'] = abc
        clf_list.append(abc)


        # Ensemble to models
        eclf3 = VotingClassifier(estimators=[('lr', logreg), ('rf', rfc), ('rf2', rfc2),('abc',abc)], voting='soft',
                                 weights=[2, 2, 2, 1])
        eclf3.estimators_ = clf_list
        print "Dig into the voting classifier"
        innerClfs = eclf3.estimators_
        print "Check estimators"
        print innerClfs
        print('Ensemble LogLoss {score}'.format(score=log_loss(self.y_test, eclf3.predict_proba(self.X_test))))
        self.ensembleClf=eclf3
        print "Ensemble fitting finished"
开发者ID:jiacheliu3,项目名称:FypShared,代码行数:85,代码来源:tuning.py


注:本文中的sklearn.ensemble.VotingClassifier.estimators_方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。