本文整理汇总了Python中sklearn.multiclass.OneVsOneClassifier.score方法的典型用法代码示例。如果您正苦于以下问题:Python OneVsOneClassifier.score方法的具体用法?Python OneVsOneClassifier.score怎么用?Python OneVsOneClassifier.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.multiclass.OneVsOneClassifier
的用法示例。
在下文中一共展示了OneVsOneClassifier.score方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: multiclass_SVC
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
def multiclass_SVC(X, y):
from sklearn.svm import LinearSVC
from sklearn import cross_validation
# first move: split data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.35)
# one-vs-rest implementation
from sklearn.multiclass import OneVsRestClassifier
ovr = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X_train, y_train)
# one-vs-all implementation
from sklearn.multiclass import OneVsOneClassifier
ovo = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train)
one_vs_rest = ovr.score(X_test, y_test)
one_vs_one = ovo.score(X_test, y_test)
return one_vs_rest, one_vs_one
示例2: allSamples
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
for slip in slips:
train = []
label = []
for category in label_dict.keys():
X, Y = allSamples(path, category, label_dict, order, window, slip)
if train == []:
train = X
label = Y
else:
train = np.concatenate((train, X), 0)
label = np.concatenate((label, Y), 0)
#train = dim_reduction_PCA(train,0.999)
X_train, X_test, Y_train, Y_test = train_test_split(train, label, test_size=0.4, random_state=42)
C = 1.0
multiclassifier = OneVsOneClassifier(svm.SVC(kernel="rbf",gamma=0.7,C=C)).fit(X_train, Y_train)
score.append(multiclassifier.score(X_test, Y_test))
# svc = svm.SVC(kernel='linear', C=C).fit(X_train, Y_train)
# rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X_train, Y_train)
# poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X_train, Y_train)
# print score
# plt.figure("Score-order")
# plt.plot(slips,score)
# plt.show()
#Use validate data to test the model.
path = "./"
label_dict = {"test":0}
X_test,Y_test = allSamples(path,"test",label_dict,order,window,slip)
#print X_test,Y_test
print multiclassifier.predict(X_test)
示例3: open
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
Y_laban = pickle.load( open( "Y_Laban", "r" ) )
X, y = np.array(X), np.array(y)
baseClf = AdaBoostClassifier()
clf = OneVsOneClassifier(baseClf)
from sklearn import cross_validation
n=1
rs = cross_validation.ShuffleSplit(len(y), n_iter=n, test_size=.1, random_state=0)
res = []
resMixed = []
resLaban = []
for train_index, test_index in rs:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
Y_laban_train, Y_laban_test = Y_laban[train_index], Y_laban[test_index]
clf.fit(X_train, y_train)#, sample_weight)
r = clf.score(X_test, y_test)
res.append(r)
labanClf, selectedIndices = labanUtil.getMultiTaskclassifier(X_train, Y_laban_train)
X_train_transformed = transform(X_train, selectedIndices)
#X_train_laban = []
X_train_mixed = []
for x in X_train_transformed:
labans = labanClf.predict(x)
newVec = np.concatenate((x, labans))
X_train_mixed.append(newVec)
#X_train_laban.append(labans)
#X_train_laban=np.array(X_train_laban)
X_test_transformed = transform(X_test, selectedIndices)
X_test_laban = []
示例4: print
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
print(">>>> Loading finished")
feature_vec = np.zeros((len(data), kmeans.n_clusters))
for i in range(len(data)):
mydata = data[i]
# mydata = pca.transform(mydata)
feature_seq = kmeans.predict(mydata)
for j in feature_seq:
feature_vec[i][feature_seq[j]] += 1
feature_vec = normalize(feature_vec)
train_x, test_x, train_y, test_y = \
train_test_split(feature_vec, all_y, test_size = 1-train_ratio)
print(feature_vec.shape)
print(">>>> Data prepared")
# for alpha_ in [0.1, 0.01, 0.02, 0.03, 0.05, 0.008, 0.009, 0.006, 0.005]:
for alpha_ in [0.0001]:
clf = OneVsOneClassifier(linear_model.SGDClassifier(alpha = alpha_, n_iter=150000, shuffle=True), n_jobs=4)
clf.fit(train_x, train_y)
print(" alpha", alpha_)
print(" train score", clf.score(train_x, train_y))
print(" test score", clf.score(test_x, test_y))
print(clf)
pred_y = clf.predict(test_x)
print(test_x[:2,:5])
print(pred_y)
示例5: OneVsOneClassifier
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
tr_feat = tr[:,1:]
ts_feat = ts[:,1:]
tr_label = tr[:,0]
ts_label = ts[:,0]
# use sklearn C-Support Vector Classification
## == one-vs-one == ##
# The multiclass support is handled in a one-vs-one scheme
# train
ovo_clf = OneVsOneClassifier(LinearSVC())
ovo_clf.fit(tr_feat, tr_label)
# predict
ovo_pred = ovo_clf.predict(ts_feat)
ovo_err = 1- ovo_clf.score(ts_feat, ts_label)
# confusion matrix
#
#array([[159, 7],
# [ 5, 161]])
ovo_cmat = metrics.confusion_matrix(ts_label, ovo_pred)
pred_total = np.sum(ovo_cmat,axis = 1)
ovo_mis = 1- np.diag(ovo_cmat).astype(float) / pred_total
print("one vs. one svm - classification err: %s \n"%(ovo_err))
print("confusion matrix: \n %s"%(ovo_cmat))
print("class misclassification rate : \n %s"%(ovo_mis))
## == one-vs-rest == ##
# The multiclass support is handled in a one-vs-rest scheme
# train
ovr_clf = OneVsRestClassifier(LinearSVC())
示例6: print
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
# Output the hitrate and the confusion matrix for each model
print("SVM: ")
print(svm.score(X_train, y_train))
print(svm.score(X_test, y_test))
#print(confusion_matrix(pred, y_test))
svm2 = OneVsOneClassifier(LinearSVC(C=100.))
svm2.fit(X_train, y_train)
# Make an array of predictions on the test set
pred = svm2.predict(X_test)
# Output the hitrate and the confusion matrix for each model
print("LinearSVC: ")
print(svm2.score(X_train, y_train))
print(svm2.score(X_test, y_test))
from sklearn.neighbors import KNeighborsClassifier
neigh = (KNeighborsClassifier(n_neighbors=2))
neigh.fit(X_train, y_train)
pred = neigh.predict(X_test)
print("knn: ")
print(neigh.score(X_train,y_train))
print(neigh.score(X_test,y_test))
from sklearn.ensemble import RandomForestClassifier
clf = (RandomForestClassifier(n_estimators=5,max_depth=None,min_samples_split=5, random_state=15))
clf = clf.fit(X_train, y_train)
示例7: print
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
#y = np.array([0, 0, 1, 1, 2, 2, 3, 3])
print('start at %s' % startTime)
print('start training...')
clf = OneVsOneClassifier(LinearSVC(random_state = 0))
#clf = OneVsRestClassifier(LinearSVC(random_state = 0))
clf = clf.fit(X_train, y_train)
print(clf.get_params())
#joblib.dump(clf, modelPath) # save the trained model
#lists =[[5, -1], [-2, -6], [2,1], [-2, 5]]
#test = np.array(lists)
#test_label = np.array([3, 2, 0, 1])
print("start predicting...")
#clf = joblib.load(modelPath) # load the model
score = clf.score(X_test, y_test)
print('accuracy is {0}'.format(score))
#==============================================================================
# count = 0
# predictions = clf.predict(X_test)
# lens = len(predictions)
# for i in xrange(lens):
# if predictions[i] == y_test[i]:
# count +=1
# print('accuracy is %f' % (float(count) / lens ))
#==============================================================================
endTime = time.ctime()
end = time.time()
print("start at %s, end at %s" % (startTime, endTime))
print("consume ", (end - start))
示例8: if
# 需要导入模块: from sklearn.multiclass import OneVsOneClassifier [as 别名]
# 或者: from sklearn.multiclass.OneVsOneClassifier import score [as 别名]
tfidf_test_reduced = svd.transform(tfidf_test)
svm_test_data = tfidf_test_reduced
svm_test_tag = test.target
#for i in test.target:
# if(i < 4):
# svm_test_tag.append(-1)
# else:
# svm_test_tag.append(1)
svc = SVC(kernel='linear',C = 100)
svc_ovoc=OVOC(svc)
svc_ovoc.fit(svm_train_data, svm_train_tag)
svc_ovoc_predict=svc_ovoc.predict(svm_test_data)
#precision, recall, thresholds = precision_recall_curve(svm_test_tag, svc_ovoc_predict)
#BernoulliNB(alpha=1.0, binarize=0.5, class_prior=None, fit_prior=True)
score=svc_ovoc.score(svm_test_data,svm_test_tag)
precision = precision_score(svm_test_tag, svc_ovoc_predict, average = 'weighted')
recall = recall_score(svm_test_tag, svc_ovoc_predict, average = 'weighted')
print "1 VS 1 SVC"
print "confusion matrix:","\n",confusion_matrix(svm_test_tag, svc_ovoc_predict)
print "score=",score
print "precision=", precision
print "recall=", recall
print '\n'
svc = SVC(kernel='rbf',C = 100)
svc_ovrc=OVRC(svc)
svc_ovrc.fit(svm_train_data, svm_train_tag)
svc_ovrc_predict=svc_ovrc.predict(svm_test_data)
#precision, recall, thresholds = precision_recall_curve(svm_test_tag, svc_ovoc_predict)
#BernoulliNB(alpha=1.0, binarize=0.5, class_prior=None, fit_prior=True)