本文整理匯總了Python中xgboost.sklearn.XGBClassifier.n_estimators方法的典型用法代碼示例。如果您正苦於以下問題:Python XGBClassifier.n_estimators方法的具體用法?Python XGBClassifier.n_estimators怎麽用?Python XGBClassifier.n_estimators使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xgboost.sklearn.XGBClassifier
的用法示例。
在下文中一共展示了XGBClassifier.n_estimators方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: eval_fn
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import n_estimators [as 別名]
def eval_fn(params):
model = XGBClassifier(n_estimators=n_estimators_max, learning_rate=learning_rate, seed=seed)
score = 0
n_estimators = 0
for tr, va in skf:
X_tr, y_tr = X_train[tr], y_train[tr]
X_va, y_va = X_train[va], y_train[va]
model.set_params(**params)
model.fit(X_tr, y_tr, eval_set=[(X_va, y_va)], eval_metric='logloss',
early_stopping_rounds=50, verbose=False)
score += model.best_score
n_estimators += model.best_iteration
score /= n_folds
n_estimators /= n_folds
n_estimators_lst.append(n_estimators)
result_str = "train:%.4f ntree:%5d " % (score, n_estimators)
if X_valid is not None:
model.n_estimators = n_estimators
model.fit(X_train, y_train)
pr = model.predict_proba(X_valid)[:,1]
sc_valid = log_loss(y_valid, pr)
score_valid.append(sc_valid)
result_str += "valid:%.4f" % sc_valid
if verbose:
print result_str
return score
示例2: RandomForestClassifier
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import n_estimators [as 別名]
for train_index, test_index in folds:
#has to be created here because warm start
clf = RandomForestClassifier(n_estimators=10, warm_start=True, n_jobs=-1)
X_train2, X_test2 = X_train.loc[train_index], X_train.loc[test_index]
y_train2, y_test2 = y_train[train_index], y_train[test_index]
X_train2, X_test2 = feature_engineering_extra(X_train2, X_test2, y_train2)
X_train2 = csr_matrix(X_train2.values)
X_test2 = csr_matrix(X_test2.values)
score = 100
iteration = 0
for i in range(1000):
clf.n_estimators = 10*(i+1)
clf.fit(X_train2, y_train2)
y_pred = clf.predict_proba(X_test2)
score_tmp = log_loss(y_test2, y_pred)
if score_tmp < score:
score = score_tmp
iteration = i
if i > iteration + 100:
break
print(score, clf.n_estimators)
scores.append(round(score, 6))
iterations.append(clf.n_estimators)
scores = np.array(scores)
iterations = np.array(iterations)