本文整理汇总了Python中sklearn.tree.DecisionTreeClassifier方法的典型用法代码示例。如果您正苦于以下问题:Python tree.DecisionTreeClassifier方法的具体用法?Python tree.DecisionTreeClassifier怎么用?Python tree.DecisionTreeClassifier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.tree
的用法示例。
在下文中一共展示了tree.DecisionTreeClassifier方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Train
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
model = AdaBoostClassifier(DecisionTreeClassifier(max_depth=censhu),
algorithm="SAMME",
n_estimators=modelcount, learning_rate=0.8)
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = fmse(data[:, -1], train_out)[0]
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算f1度量
add_mse = fmse(yanzhgdata[:, -1], add_yan)[0]
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例2: predict
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def predict(self, fit=None, features=None, probabilities=False):
'''
Predict the class labels (e.g., endmember types) based on an existing
tree fit and new predictive features. Arguments:
fit The result of tree.DecisionTreeClassifier.fit(); uses
the last fit model if None.
features The new X array/ new predictive features to use;
should be (p x n), n samples with p features.
'''
if fit is None: fit = self.last_fit
if features is None: features = self.x_features_array
if probabilities:
shp = self.y_raster.shape
return fit.predict(features.T).T.reshape((self.n_labels, shp[1], shp[2]))
return fit.predict(features.T).reshape(self.y_raster.shape)
示例3: recspre
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def recspre(estrs, predata, datadict, zhe):
mo, ze = estrs.split('-')
model = AdaBoostClassifier(DecisionTreeClassifier(max_depth=int(ze)),
algorithm="SAMME",
n_estimators=int(mo), learning_rate=0.8)
model.fit(datadict[zhe]['train'][:, :-1], datadict[zhe]['train'][:, -1])
# 预测
yucede = model.predict(predata[:, :-1])
# 计算混淆矩阵
print(ConfuseMatrix(predata[:, -1], yucede))
return fmse(predata[:, -1], yucede)
# 主函数
示例4: __init__
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def __init__(self,
base_estimator=DecisionTreeClassifier(),
window_size=250,
slope=0.5,
crossing_point=10,
n_estimators=15,
pruning=None):
super().__init__()
self.ensemble = []
self.ensemble_weights = []
self.bkts = []
self.wkts = []
self.buffer = []
self.window_size = window_size
self.slope = slope
self.crossing_point = crossing_point
self.n_estimators = n_estimators
self.pruning = pruning
self.X_batch = []
self.y_batch = []
self.instance_weights = []
self.base_estimator = cp.deepcopy(base_estimator)
self.classes = None
示例5: __init__
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def __init__(self, base_estimator=DecisionTreeClassifier(),
error_threshold=0.5,
n_estimators=30,
n_ensembles=10,
window_size=100,
random_state=None):
super().__init__()
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.ensembles = []
self.ensemble_weights = []
self.classes = None
self.n_ensembles = n_ensembles
self.random = check_random_state(random_state)
self.random_state = random_state
self.error_threshold = error_threshold
self.X_batch = []
self.y_batch = []
self.window_size = window_size
示例6: __call__
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def __call__(self, estimator):
fitted_estimator = estimator.fit(self.X_train, self.y_train)
if isinstance(estimator, (LinearClassifierMixin, SVC, NuSVC,
LightBaseClassifier)):
y_pred = estimator.decision_function(self.X_test)
elif isinstance(estimator, DecisionTreeClassifier):
y_pred = estimator.predict_proba(self.X_test.astype(np.float32))
elif isinstance(
estimator,
(ForestClassifier, XGBClassifier, LGBMClassifier)):
y_pred = estimator.predict_proba(self.X_test)
else:
y_pred = estimator.predict(self.X_test)
return self.X_test, y_pred, fitted_estimator
示例7: test_classification
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC(gamma="scale")]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
示例8: test_gridsearch
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
示例9: test_plot_tree
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_plot_tree(pyplot):
# mostly smoke tests
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
feature_names = ['first feat', 'sepal_width']
nodes = plot_tree(clf, feature_names=feature_names)
assert len(nodes) == 3
assert nodes[0].get_text() == ("first feat <= 0.0\nentropy = 0.5\n"
"samples = 6\nvalue = [3, 3]")
assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]"
示例10: test_probability
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
示例11: test_importances_gini_equal_mse
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
示例12: test_sample_weight_invalid
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
示例13: test_huge_allocations
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
示例14: test_set_params_passes_all_parameters
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_set_params_passes_all_parameters():
# Make sure all parameters are passed together to set_params
# of nested estimator. Regression test for #9944
class TestDecisionTree(DecisionTreeClassifier):
def set_params(self, **kwargs):
super().set_params(**kwargs)
# expected_kwargs is in test scope
assert kwargs == expected_kwargs
return self
expected_kwargs = {'max_depth': 5, 'min_samples_leaf': 2}
for est in [Pipeline([('estimator', TestDecisionTree())]),
GridSearchCV(TestDecisionTree(), {})]:
est.set_params(estimator__max_depth=5,
estimator__min_samples_leaf=2)
示例15: test_score_sample_weight
# 需要导入模块: from sklearn import tree [as 别名]
# 或者: from sklearn.tree import DecisionTreeClassifier [as 别名]
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")