本文整理汇总了Python中sklearn.multiclass.OneVsOneClassifier方法的典型用法代码示例。如果您正苦于以下问题:Python multiclass.OneVsOneClassifier方法的具体用法?Python multiclass.OneVsOneClassifier怎么用?Python multiclass.OneVsOneClassifier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.multiclass
的用法示例。
在下文中一共展示了multiclass.OneVsOneClassifier方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_ovr_ovo_regressor
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
示例2: test_ovo_ties
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
示例3: _one_vs_one
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def _one_vs_one(self,X,Y):
self.cls = OneVsOneClassifier(KOMD(**self.get_params())).fit(X,Y)
self.is_fitted = True
return self
示例4: test_ovo_exceptions
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
示例5: test_ovo_fit_on_list
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
示例6: test_ovo_fit_predict
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
示例7: test_ovo_gridsearch
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
# 0.23. warning about tol not having its correct default value.
示例8: test_ovo_ties
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
# 0.23. warning about tol not having its correct default value.
示例9: test_ovo_ties2
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
示例10: test_ovo_string_y
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
示例11: test_ovo_one_class
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_ovo_one_class():
# Test error for OvO with one class
X = np.eye(4)
y = np.array(['a'] * 4)
ovo = OneVsOneClassifier(LinearSVC())
assert_raise_message(ValueError, "when only one class", ovo.fit, X, y)
示例12: test_pairwise_indices
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
示例13: test_pairwise_attribute
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._pairwise
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._pairwise
示例14: test_pairwise_cross_val_score
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
示例15: main
# 需要导入模块: from sklearn import multiclass [as 别名]
# 或者: from sklearn.multiclass import OneVsOneClassifier [as 别名]
def main():
"""
Use a linear SVM for multi-class classification.
One vs the rest : 77.61%
Default : 77.61%
One vs one : 85.07%
"""
seed = 123456789
np.random.seed(seed)
ntrain, ntest = 800, 200
(tr_x, tr_y), (te_x, te_y) = load_mnist()
x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y))
cv = MNISTCV(tr_y, te_y, ntrain, ntest, 1, seed)
for tr, te in cv:
clf = OneVsRestClassifier(LinearSVC(random_state=seed), -1)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])
clf = LinearSVC(random_state=seed)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])
clf = OneVsOneClassifier(LinearSVC(random_state=seed), -1)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])