当前位置: 首页>>代码示例>>Python>>正文


Python XGBClassifier.evals_result方法代码示例

本文整理汇总了Python中xgboost.sklearn.XGBClassifier.evals_result方法的典型用法代码示例。如果您正苦于以下问题:Python XGBClassifier.evals_result方法的具体用法?Python XGBClassifier.evals_result怎么用?Python XGBClassifier.evals_result使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xgboost.sklearn.XGBClassifier的用法示例。


在下文中一共展示了XGBClassifier.evals_result方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_xgboost_classifier

# 需要导入模块: from xgboost.sklearn import XGBClassifier [as 别名]
# 或者: from xgboost.sklearn.XGBClassifier import evals_result [as 别名]
def get_xgboost_classifier(X_train, y_train, X_val, y_val,params=None, tag=""):
    
    param_grid = {'max_depth':[3,5,7], 'min_child_weight': [1,3,5], 'n_estimators': [50]}
    
    if params is None:
        xgb = XGBClassifier(
                 learning_rate =0.2,
                 objective= 'binary:logistic',
                 seed=27)
                 
        t = start("training xgboost ")
        cv = cross_validation.ShuffleSplit(X_train.shape[0], n_iter=10,test_size=0.2, random_state=123)
        clf = grid_search.GridSearchCV(xgb, param_grid, cv=cv, n_jobs=1, scoring='roc_auc')
        clf = clf.fit(X_train,y_train)
        report(t, nitems=10*len(param_grid))
        
        print("Best score:{} with scorer {}".format(clf.best_score_, clf.scorer_))
        print "With parameters:"
    
        best_parameters = clf.best_estimator_.get_params()
        for param_name in sorted(param_grid.keys()):
            print '\t%s: %r' % (param_name, best_parameters[param_name]) 
    else:
        clf = XGBClassifier(**params)
        clf.fit(X_train, y_train, eval_set =  [(X_train,y_train),(X_val,y_val)], eval_metric='auc', verbose=False)
        
        if plot_cv_curves:
            train = clf.evals_result()['validation_0']['auc']
            val = clf.evals_result()['validation_1']['auc']
        
            plot_cv_curve(train, val, tag)
        
        if plot_feature_importance:
            plot_feature_importance(clf, tag)

    return clf
开发者ID:joostgp,项目名称:kaggle_ad_detection,代码行数:38,代码来源:analysis_basic.py

示例2: train_test_split

# 需要导入模块: from xgboost.sklearn import XGBClassifier [as 别名]
# 或者: from xgboost.sklearn.XGBClassifier import evals_result [as 别名]

#define X y
X, y = data.loc[:,data.columns != 'state'].values, data.loc[:,data.columns == 'state'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

#ClusterCentroids
cc = ClusterCentroids(random_state=0)
os_X,os_y = cc.fit_sample(X_train,y_train)

#XGboost
clf_XG = XGBClassifier(learning_rate= 0.3, min_child_weight=1,
                       max_depth=6,gamma=0,subsample=1, max_delta_step=0, colsample_bytree=1,
                       reg_lambda=1, n_estimators=100, seed=1000, scale_pos_weight=1000)  
clf_XG.fit(os_X, os_y,eval_set=[(os_X, os_y), (X_test, y_test)],eval_metric='auc',verbose=False)  
evals_result = clf_XG.evals_result()  
y_true, y_pred = y_test, clf_XG.predict(X_test)  

#F1_score, precision, recall, specifity, G score
print "F1_score : %.4g" % metrics.f1_score(y_true, y_pred)  
print "Recall : %.4g" % metrics.recall_score(y_true, y_pred)
recall = metrics.recall_score(y_true, y_pred)  
print "Precision : %.4g" % metrics.precision_score(y_true, y_pred)
 
#Compute confusion matrix
cnf_matrix = confusion_matrix(y_test,y_pred)
np.set_printoptions(precision=2)
print "Specifity: " , float(cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[0,1])
specifity = float(cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[0,1]) 
print "G score: " , math.sqrt(recall/ specifity) 
开发者ID:non27,项目名称:The-final-assignment,代码行数:31,代码来源:XGboost+ClusterCentroids.py

示例3: range

# 需要导入模块: from xgboost.sklearn import XGBClassifier [as 别名]
# 或者: from xgboost.sklearn.XGBClassifier import evals_result [as 别名]
        for i in range(n_iterations):
            folds = StratifiedKFold(y_train, n_folds=n_folds, shuffle=True)
            j = 0
            for train_index, test_index in folds:
                print(str(i)+str(j))
                X_train2, X_test2 = X_train.loc[train_index], X_train.loc[test_index]
                y_train2, y_test2 = y_train[train_index], y_train[test_index]

                X_train2, X_test2 = feature_engineering_extra(X_train2, X_test2, y_train2)

                X_train2 = csr_matrix(X_train2.values)
                X_test2 = csr_matrix(X_test2.values)

                clf.fit(X_train2, y_train2, eval_set=[(X_test2, y_test2)], eval_metric='mlogloss', verbose=False)
        
                df['column' + str(i)+str(j)] = clf.evals_result()['validation_0']['mlogloss']
                df['column' + str(i)+str(j)] = df['column' + str(i)+str(j)].astype(float)
                j = j + 1

        print('score', df.sum(axis=1).min()/(n_iterations*n_folds))
        print('iteration', df.sum(axis=1).argmin() + 1)

        #print(df.sum(axis=1)/(n_iterations*n_folds))
        for i in df.sum(axis=1)/(n_iterations*n_folds):
            print(i)

    if is_find_n == 1:
        X_train, X_test = feature_engineering(df_train, df_test, y_train)
    
        learning_rate, max_depth, ss, cs, gamma, min_child_weight, reg_lambda, reg_alpha = 0.1, 6, 0.7, 0.7, 0, 1, 1, 0
        #learning_rate, max_depth, ss, cs, gamma, min_child_weight, reg_lambda, reg_alpha = 0.1, 4, 0.8, 0.8, 0, 1, 1, 0
开发者ID:mircean,项目名称:ML,代码行数:33,代码来源:module1.py


注:本文中的xgboost.sklearn.XGBClassifier.evals_result方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。