本文整理汇总了Python中xgboost.XGBClassifier.score方法的典型用法代码示例。如果您正苦于以下问题:Python XGBClassifier.score方法的具体用法?Python XGBClassifier.score怎么用?Python XGBClassifier.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xgboost.XGBClassifier
的用法示例。
在下文中一共展示了XGBClassifier.score方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: xgboost_classifier
# 需要导入模块: from xgboost import XGBClassifier [as 别名]
# 或者: from xgboost.XGBClassifier import score [as 别名]
def xgboost_classifier(self):
cls = XGBClassifier()
print 'xgboost cross validation score', cross_val_score(cls,self.x_data,self.y_data)
start_time = time.time()
cls.fit(self.x_train, self.y_train)
print 'score', cls.score(self.x_test, self.y_test)
print 'time cost', time.time() - start_time
示例2: XGBClassifier
# 需要导入模块: from xgboost import XGBClassifier [as 别名]
# 或者: from xgboost.XGBClassifier import score [as 别名]
n_iter=1,
train_size=0.75,
test_size=0.25,
random_state=dataset_repeat)))
training_features = input_data.loc[training_indices].drop('class', axis=1).values
training_classes = input_data.loc[training_indices, 'class'].values
testing_features = input_data.loc[testing_indices].drop('class', axis=1).values
testing_classes = input_data.loc[testing_indices, 'class'].values
# Create and fit the model on the training data
try:
clf = XGBClassifier(learning_rate=learning_rate, n_estimators=n_estimators, max_depth=max_depth)
clf.fit(training_features, training_classes)
testing_score = clf.score(testing_features, testing_classes)
except:
continue
param_string = ''
param_string += 'learning_rate={},'.format(learning_rate)
param_string += 'n_estimators={},'.format(n_estimators)
param_string += 'max_depth={}'.format(max_depth)
out_text = '\t'.join([dataset.split('/')[-1][:-7],
'XGBClassifier',
param_string,
str(testing_score)])
print(out_text)
示例3: train_test_split
# 需要导入模块: from xgboost import XGBClassifier [as 别名]
# 或者: from xgboost.XGBClassifier import score [as 别名]
import pandas as pd
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
X = titanic[['pclass', 'age', 'sex']]
y = titanic['survived']
X['age'].fillna(X['age'].mean(), inplace=True)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33)
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer(sparse=False)
X_train = vec.fit_transform(X_train.to_dict(orient='record'))
X_test = vec.transform(X_test.to_dict(orient='record'))
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
print('The accuracy of Random Forest Classifier on testing set:', rfc.score(X_test, y_test))
from xgboost import XGBClassifier
xgbc = XGBClassifier()
xgbc.fit(X_train, y_train)
print('The accuracy of eXtreme Gradient Boosting Classifier on testing set:', xgbc.score(X_test, y_test))
示例4: train_test_split
# 需要导入模块: from xgboost import XGBClassifier [as 别名]
# 或者: from xgboost.XGBClassifier import score [as 别名]
delimiter=";")
X = dataset[:, 0:-1]
y = dataset[:, -1]
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3,
random_state=42)
kfold = KFold(Xtrain.shape[0], n_folds=10, random_state=42)
best_model = None
best_score = 0.0
for curr_fold, (train_cv, test_cv) in enumerate(kfold):
Xtrain_cv, Xtest_cv, ytrain_cv, ytest_cv = \
Xtrain[train_cv], Xtrain[test_cv], ytrain[train_cv], ytrain[test_cv]
clf = XGBClassifier()
clf.fit(Xtrain_cv, ytrain_cv)
score = clf.score(Xtest_cv, ytest_cv)
print("Fold {:d}, score: {:.3f}".format(curr_fold, score))
if score > best_score:
best_score = score
best_model = clf
y_ = best_model.predict(Xtest)
print("Accuracy: {:.3f}".format(accuracy_score(ytest, y_)))
print()
print("Confusion Matrix")
print(confusion_matrix(ytest, y_))
print()
print("Classification Report")
print(classification_report(ytest, y_))
with open(os.path.join(DATA_DIR, "best-model.pkl"), "wb") as fmod: