本文整理匯總了Python中xgboost.sklearn.XGBClassifier.set_params方法的典型用法代碼示例。如果您正苦於以下問題:Python XGBClassifier.set_params方法的具體用法?Python XGBClassifier.set_params怎麽用?Python XGBClassifier.set_params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類xgboost.sklearn.XGBClassifier
的用法示例。
在下文中一共展示了XGBClassifier.set_params方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: eval_fn
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import set_params [as 別名]
def eval_fn(params):
model = XGBClassifier(n_estimators=n_estimators_max, learning_rate=learning_rate, seed=seed)
score = 0
n_estimators = 0
for tr, va in skf:
X_tr, y_tr = X_train[tr], y_train[tr]
X_va, y_va = X_train[va], y_train[va]
model.set_params(**params)
model.fit(X_tr, y_tr, eval_set=[(X_va, y_va)], eval_metric='logloss',
early_stopping_rounds=50, verbose=False)
score += model.best_score
n_estimators += model.best_iteration
score /= n_folds
n_estimators /= n_folds
n_estimators_lst.append(n_estimators)
result_str = "train:%.4f ntree:%5d " % (score, n_estimators)
if X_valid is not None:
model.n_estimators = n_estimators
model.fit(X_train, y_train)
pr = model.predict_proba(X_valid)[:,1]
sc_valid = log_loss(y_valid, pr)
score_valid.append(sc_valid)
result_str += "valid:%.4f" % sc_valid
if verbose:
print result_str
return score
示例2: main
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import set_params [as 別名]
def main():
data_train = pd.read_csv(args.train_dataset)
X_train = data_train.drop(['Id', 'Class'], axis=1)
y_train = data_train.loc[:, 'Class']
data_test = pd.read_csv(args.test_dataset)
X_test = data_test.drop(['Id'], axis=1)
Id = data_test.loc[:, 'Id']
clf = XGBClassifier()
clf.set_params(**best_dicts)
clf.fit(X_train, y_train)
prediction = clf.predict_proba(X_test)
columns = ['Prediction'+str(i) for i in range(1, 10)]
prediction = pd.DataFrame(prediction, columns=columns)
results = pd.concat([Id, prediction], axis=1)
return (clf, results)
示例3: XGBClassifier
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import set_params [as 別名]
train.drop(x, axis=1, inplace=True)
test.drop(x, axis=1, inplace=True)
y_train = train['TARGET'].values
X_train = train.drop(['ID','TARGET'], axis=1).values
y_test = test['ID']
X_test = test.drop(['ID'], axis=1).values
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=600,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.6815,
colsample_bytree=0.701,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
xgtrain = xgb.DMatrix(X_train, label=y_train)
cvresult = xgb.cv(xgb1.get_xgb_params(), xgtrain, num_boost_round=xgb1.get_params()['n_estimators'], nfold=5,
metrics=['auc'], early_stopping_rounds=50, show_progress=False)
xgb1.set_params(n_estimators=cvresult.shape[0])
xgb1.fit(X_train, y_train, eval_metric='auc')
output = xgb1.predict_proba(X_test)[:,1]
submission = pd.DataFrame({"ID":y_test, "TARGET":output})
submission.to_csv("submission.csv", index=False)
示例4: StratifiedShuffleSplit
# 需要導入模塊: from xgboost.sklearn import XGBClassifier [as 別名]
# 或者: from xgboost.sklearn.XGBClassifier import set_params [as 別名]
train_size=n_train, random_state=123)
for idx, ignore in sss_train:
X_train = X[train_idx][idx]
y_train = target[train_idx][idx]
#
# 2.
sss_train_inner = StratifiedShuffleSplit(y_train, n_iter=n_iter_cv, test_size=.1,
random_state=456)
model = XGBClassifier(n_estimators=1000, max_depth=10, subsample=.8, seed=987)
params_lst_optimized = []
for params in xgb_params_lst:
n_estimators = 0
for tr, va in sss_train_inner:
X_tr, y_tr = X_train[tr], y_train[tr]
X_va, y_va = X_train[va], y_train[va]
model.set_params(**params)
model.fit(X_tr, y_tr, eval_set=[(X_va, y_va)], eval_metric="mlogloss",
early_stopping_rounds=50, verbose=False)
n_estimators += model.best_iteration
sc = params.copy()
sc.update({'n_estimators':n_estimators / n_iter_cv})
params_lst_optimized.append(sc)
print 'Step 2 Done.', datetime.now() - t0
# 3.
model = XGBClassifier(max_depth=10, subsample=.8)
for params in params_lst_optimized:
for seed_train in range(100, 100+n_iter_pred):
params.update({'seed':seed_train})
model.set_params(**params)
model.fit(X_train, y_train)
pr = model.predict_proba(X_test)