本文整理匯總了Python中sklearn.ensemble.BaggingClassifier方法的典型用法代碼示例。如果您正苦於以下問題:Python ensemble.BaggingClassifier方法的具體用法?Python ensemble.BaggingClassifier怎麽用?Python ensemble.BaggingClassifier使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.ensemble
的用法示例。
在下文中一共展示了ensemble.BaggingClassifier方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_classification
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC(gamma="scale")]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
示例2: test_warm_start
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
示例3: test_warm_start_equal_n_estimators
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
示例4: test_warm_start_equivalence
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
示例5: main
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def main():
indata = np.load(inputs)
training_data = indata['data_training']
training_labels = indata['label_training']
validation_data = indata['data_val']
validation_labels = indata['label_val']
ts = range(1,11)
sampling_rates = [round(0.1*t, 1) for t in ts]
forest_sizes = [10, 20, 50, 100]
for sampling_rate in sampling_rates:
legend_label = 'sampling rate='+str(sampling_rate)
accuracy_results = []
for forest_size in forest_sizes:
rf_clf = ensemble.BaggingClassifier(n_estimators=forest_size, max_samples=sampling_rate)
rf_clf.fit(training_data, training_labels)
predictions = rf_clf.predict(validation_data)
accuracy = metrics.accuracy_score(validation_labels, predictions)
accuracy_results.append(accuracy)
plt.plot(range(len(forest_sizes)), accuracy_results, label=legend_label)
plt.xticks(range(len(forest_sizes)), forest_sizes, size='small')
plt.legend()
plt.show()
示例6: main
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/LDAdata.csv', split, trainingSet, testSet)
print('Train set: ' + repr(len(trainingSet)))
print('Test set: ' + repr(len(testSet)))
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = BaggingClassifier(LDA())
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例7: main
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/combined.csv', split, trainingSet, testSet)
print 'Train set: ' + repr(len(trainingSet))
print 'Test set: ' + repr(len(testSet))
# generate predictions
predictions=[]
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = BaggingClassifier(QDA())
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例8: main
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/combined.csv', split, trainingSet, testSet)
print 'Train set: ' + repr(len(trainingSet))
print 'Test set: ' + repr(len(testSet))
# generate predictions
predictions=[]
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = BaggingClassifier(KNN(n_neighbors=10, weights='uniform', algorithm='auto', leaf_size=10, p=1, metric='minkowski', metric_params=None, n_jobs=1))
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例9: main
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/combined.csv', split, trainingSet, testSet)
print 'Train set: ' + repr(len(trainingSet))
print 'Test set: ' + repr(len(testSet))
# generate predictions
predictions=[]
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = BaggingClassifier(SVC(C=1.0, kernel='linear', degree=5, gamma='auto', coef0=0.0, shrinking=True, probability=False,tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None))
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例10: __init__
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
# using the sklearn implementation of bagging for now
self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
n_estimators=n_classifiers,
max_samples=1.0,
max_features=1.0)
self.ensemble = Ensemble()
self.combiner = Combiner(rule=combination_rule)
示例11: test_classification
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
示例12: test_base
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(
base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
random_state = np.random.RandomState(3)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert isinstance(ensemble[0], Perceptron)
assert_equal(ensemble[0].random_state, None)
assert isinstance(ensemble[1].random_state, int)
assert isinstance(ensemble[2].random_state, int)
assert_not_equal(ensemble[1].random_state, ensemble[2].random_state)
np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=np.int32(3))
np_int_ensemble.fit(iris.data, iris.target)
示例13: test_base_zero_n_estimators
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
示例14: test_base_not_int_n_estimators
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_base_not_int_n_estimators():
# Check that instantiating a BaseEnsemble with a string as n_estimators
# raises a ValueError demanding n_estimators to be supplied as an integer.
string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators='3')
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be an integer",
string_ensemble.fit, iris.data, iris.target)
float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=3.0)
assert_raise_message(ValueError,
"n_estimators must be an integer",
float_ensemble.fit, iris.data, iris.target)
示例15: test_oob_score_classification
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import BaggingClassifier [as 別名]
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC(gamma="scale")]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)