本文整理汇总了Python中sklearn.ensemble.GradientBoostingClassifier.staged_predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python GradientBoostingClassifier.staged_predict_proba方法的具体用法?Python GradientBoostingClassifier.staged_predict_proba怎么用?Python GradientBoostingClassifier.staged_predict_proba使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.GradientBoostingClassifier
的用法示例。
在下文中一共展示了GradientBoostingClassifier.staged_predict_proba方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_staged_predict_proba
# 需要导入模块: from sklearn.ensemble import GradientBoostingClassifier [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingClassifier import staged_predict_proba [as 别名]
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
示例2: test_gbm_classifier_backupsklearn
# 需要导入模块: from sklearn.ensemble import GradientBoostingClassifier [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingClassifier import staged_predict_proba [as 别名]
def test_gbm_classifier_backupsklearn(backend='auto'):
df = pd.read_csv("./open_data/creditcard.csv")
X = np.array(df.iloc[:, :df.shape[1] - 1], dtype='float32', order='C')
y = np.array(df.iloc[:, df.shape[1] - 1], dtype='float32', order='C')
import h2o4gpu
Solver = h2o4gpu.GradientBoostingClassifier
# Run h2o4gpu version of RandomForest Regression
gbm = Solver(backend=backend, random_state=1234)
print("h2o4gpu fit()")
gbm.fit(X, y)
# Run Sklearn version of RandomForest Regression
from sklearn.ensemble import GradientBoostingClassifier
gbm_sk = GradientBoostingClassifier(random_state=1234, max_depth=3)
print("Scikit fit()")
gbm_sk.fit(X, y)
if backend == "sklearn":
assert (gbm.predict(X) == gbm_sk.predict(X)).all() == True
assert (gbm.predict_log_proba(X) == gbm_sk.predict_log_proba(X)).all() == True
assert (gbm.predict_proba(X) == gbm_sk.predict_proba(X)).all() == True
assert (gbm.score(X, y) == gbm_sk.score(X, y)).all() == True
assert (gbm.decision_function(X)[1] == gbm_sk.decision_function(X)[1]).all() == True
assert np.allclose(list(gbm.staged_predict(X)), list(gbm_sk.staged_predict(X)))
assert np.allclose(list(gbm.staged_predict_proba(X)), list(gbm_sk.staged_predict_proba(X)))
assert (gbm.apply(X) == gbm_sk.apply(X)).all() == True
print("Estimators")
print(gbm.estimators_)
print(gbm_sk.estimators_)
print("loss")
print(gbm.loss_)
print(gbm_sk.loss_)
assert gbm.loss_.__dict__ == gbm_sk.loss_.__dict__
print("init_")
print(gbm.init)
print(gbm_sk.init)
print("Feature importance")
print(gbm.feature_importances_)
print(gbm_sk.feature_importances_)
assert (gbm.feature_importances_ == gbm_sk.feature_importances_).all() == True
print("train_score_")
print(gbm.train_score_)
print(gbm_sk.train_score_)
assert (gbm.train_score_ == gbm_sk.train_score_).all() == True
示例3: __init__
# 需要导入模块: from sklearn.ensemble import GradientBoostingClassifier [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingClassifier import staged_predict_proba [as 别名]
class GBM:
def __init__(self, n, r):
self.n = n
self.clf = GradientBoostingClassifier(n_estimators=n, learning_rate=r, verbose=False, random_state=241)
def fit(self, X_train, y_train):
self.clf.fit(X_train, y_train)
def log_loss(self, X, y):
loss = [0] * self.n
for i, proba in zip(
range(0, self.n),
self.clf.staged_predict_proba(X)):
loss[i] = log_loss(y, proba)
return loss
示例4: enumerate
# 需要导入模块: from sklearn.ensemble import GradientBoostingClassifier [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingClassifier import staged_predict_proba [as 别名]
clf.fit(X_train, y_train)
#verify log loss
loss_on_test = []
for i, pred1 in enumerate(clf.staged_decision_function(X_test)):
## print(i)
## print(pred1)
## print(y_test)
x = log_loss(y_test, 1.0/(1.0+np.exp(-pred1)))
## print(x)
loss_on_test.append(x)
grd2 = clf.staged_predict_proba(X_test)
loss_on_test_proba = []
for i, pred2 in enumerate(grd2):
loss_on_test_proba.append(log_loss(y_test, pred2))
print(min(loss_on_test))
print(min(loss_on_test_proba))
print(loss_on_test_proba.index(min(loss_on_test_proba)))
loss_on_train = []
for i, pred3 in enumerate(clf.staged_decision_function(X_train)):
示例5: plot
# 需要导入模块: from sklearn.ensemble import GradientBoostingClassifier [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingClassifier import staged_predict_proba [as 别名]
def plot(train_loss, test_loss, fname):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure()
plt.plot(test_loss, 'r', linewidth=2)
plt.plot(train_loss, 'g', linewidth=2)
plt.legend(['test', 'train'])
plt.savefig(fname)
min_losses = {}
for index, learning_rate in enumerate([1, 0.5, 0.3, 0.2, 0.1], start=1):
clf = GradientBoostingClassifier(n_estimators=250, learning_rate=learning_rate, verbose=True, random_state=241)
clf.fit(X_train, y_train)
train_pred_iters = clf.staged_predict_proba(X_train)
test_pred_iters = clf.staged_predict_proba(X_test)
train_loss = [ log_loss(y_train, pred) for pred in train_pred_iters]
test_loss = [ log_loss(y_test, pred) for pred in test_pred_iters]
best_iter = np.argmin(test_loss)
min_losses[learning_rate] = (test_loss[best_iter], best_iter)
plot(train_loss, test_loss, 'plots/%d_%.1f.png' % (index, learning_rate))
# based on plots view
with open('q1.txt', 'w') as output:
output.write('overfitting')
with open('q2.txt', 'w') as output:
output.write('%.2f %d' % min_losses[0.2])
from sklearn.ensemble import RandomForestClassifier