本文整理汇总了Python中sklearn.svm.LinearSVC.score方法的典型用法代码示例。如果您正苦于以下问题:Python LinearSVC.score方法的具体用法?Python LinearSVC.score怎么用?Python LinearSVC.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.svm.LinearSVC
的用法示例。
在下文中一共展示了LinearSVC.score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def main():
dataset = load_cifar.load_cifar(n_train=N_TRAIN, n_test=N_TEST,
grayscale=GRAYSCALE, shuffle=False)
train_data = dataset['train_data']
train_labels = dataset['train_labels']
test_data = dataset['test_data']
test_labels = dataset['test_labels']
print train_data.shape, test_data.shape
patch_extractor = image.PatchExtractor(patch_size=(PATCH_SIZE, PATCH_SIZE),
max_patches = N_PATCHES/
len(train_data))
pp = preprocessing.Preprocessor(n_components=0.99)
fl = feature_learner.FeatureLearner(pp, patch_extractor, n_clusters=N_CENTROIDS)
fl.fit(train_data)
train = fl.transform(train_data)
m_train = mean(train, axis=0)
train -= m_train
v_train = sqrt(var(train, axis=0) + 0.01)
train /= v_train
test = fl.transform(test_data)
test -= m_train
test /= v_train
classifier = SVC(C=10.0)#, gamma=1e-3, verbose=False)
classifier.fit(train, train_labels)
print classifier.score(test, test_labels)
return
示例2: sc_vq_train2
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def sc_vq_train2(images, labels, rfSize, alpha, num_bases, num_patches):
input_dim = images[0].shape
patches = extract_patches(images, rfSize, num_patches)
patches, M, P = ZCA_whitening(patches)
dictionary = run_omp1(patches, num_bases, 50)
trainXC = extract_features(images, dictionary, rfSize, input_dim, M, P, alpha)
L = 0.01
trainXC_mean = np.mean(trainXC, axis=0)
trainXC_sd = np.sqrt(np.var(trainXC, axis=0)+0.01)
trainXCs = (trainXC - trainXC_mean) / trainXC_sd
trainXCs = np.concatenate((trainXC, np.ones([trainXCs.shape[0], 1])), axis=1)
svmLearner = LinearSVC(C=1/L)
svmLearner.fit(trainXCs, labels)
print svmLearner.score(trainXCs, labels)
vq_result = {'dictionary': dictionary, 'M': M, 'P': P, 'rfSize': rfSize, 'alpha': alpha, 'input_dim': input_dim}
return (svmLearner, trainXC_mean, trainXC_sd, vq_result)
示例3: applySVMWithPCA
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def applySVMWithPCA():
'''
Same as the previous function, just change the file names..
'''
data = io.mmread(ROOTDIR+"TRAINDATA.mtx")
label = np.load(ROOTDIR+"label_train.npy")
testdata = io.mmread(ROOTDIR+"TESTDATA.mtx")
testLabel = np.load(ROOTDIR + "label_test.npy")
linear_svm = LinearSVC(C=1.0, class_weight=None, loss='hinge', dual=True, fit_intercept=True,
intercept_scaling=1, multi_class='ovr', penalty='l2',
random_state=None, tol=0.0001, verbose=1, max_iter=2000)
data = scale(data, with_mean=False)
linear_svm.fit(data, label)
joblib.dump(linear_svm, ROOTDIR+'originalTrain_hinge_2000.pkl')
# linear_svm = joblib.load(ROOTDIR+'originalTrain_hinge_2000.pkl')
print 'Trainning Done!'
scr = linear_svm.score(data, label)
print 'accuracy on the training set is:' + str(scr)
predLabel = linear_svm.predict(data)
calcualteRMSE(label, predLabel)
scr = linear_svm.score(testdata, testLabel)
print 'accuracy on the testing set is:' + str(scr)
predLabel = linear_svm.predict(testdata)
calcualteRMSE(testLabel, predLabel)
示例4: buildSVMTrial
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def buildSVMTrial(self):
feats = ['topic1hot','words2vec']
y_attribute = 'stance'
X,y = self.fe.getFeaturesMatrix('train',feats,y_attribute)
Xt,yt = self.fe.getFeaturesMatrix('test',feats,y_attribute)
clf = LinearSVC(C=0.001)
clf = clf.fit(X,y)
y_pred = clf.predict(Xt)
print clf.score(Xt, yt)
pprint(self.eval.computeFscores(self.data.testTweets, self.fe.labelenc.inverse_transform(y_pred)))
示例5: train_svm
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def train_svm(C=0.1, grid=False):
pascal = PascalSegmentation()
files_train = pascal.get_split("kTrain")
superpixels = [slic_n(pascal.get_image(f), n_superpixels=100,
compactness=10)
for f in files_train]
bow = SiftBOW(pascal, n_words=1000, color_sift=True)
data_train = bow.fit_transform(files_train, superpixels)
data_train = add_global_descriptor(data_train)
svm = LinearSVC(C=C, dual=False, class_weight='auto')
chi2 = AdditiveChi2Sampler()
X, y = np.vstack(data_train.X), np.hstack(data_train.Y)
X = chi2.fit_transform(X)
svm.fit(X, y)
print(svm.score(X, y))
eval_on_sp(pascal, data_train, [svm.predict(chi2.transform(x)) for x in
data_train.X], print_results=True)
files_val = pascal.get_split("kVal")
superpixels_val = [slic_n(pascal.get_image(f), n_superpixels=100,
compactness=10) for f in files_val]
data_val = bow.transform(files_val, superpixels_val)
data_val = add_global_descriptor(data_val)
eval_on_sp(pascal, data_val, [svm.predict(chi2.transform(x)) for x in
data_val.X], print_results=True)
tracer()
示例6: score_grid
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def score_grid():
"""
Classify with the gridded SP.
"""
p = 'results\\mnist_filter'
(tr_x, tr_y), (te_x, te_y) = load_mnist()
# Get the SPs
sps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']
sp2 = load(os.path.join(p, 'sp1-0.pkl'))
nwindows = 26 ** 2
nfeat = 100 * nwindows
# w = [sp2.p[sp2.syn_map == j] for j in xrange(nfeat)]
# ms = max(wi.shape[0] for wi in w)
# with open(os.path.join(p, 'data.pkl'), 'wb') as f:
# cPickle.dump((w, ms), f, cPickle.HIGHEST_PROTOCOL)
with open(os.path.join(p, 'data.pkl'), 'rb') as f:
w, ms = cPickle.load(f)
# Get training data
tr_x2 = np.zeros((tr_x.shape[0], nfeat))
for i, x in enumerate(tr_x):
nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
nwindows, 9)
x = np.array(np.zeros(nfeat), dtype='bool')
for j, (xi, sp) in enumerate(izip(nx, sps)):
sp.step(xi)
x[j*100:(j*100)+100] = sp.y[:, 0]
y = sp2.p * x[sp2.syn_map]
w = np.zeros((nfeat, ms))
for j in xrange(nfeat):
a = y[sp2.syn_map == j]
w[j][:a.shape[0]] = a
tr_x2[i] = np.mean(w, 1)
# Get testing data
te_x2 = np.zeros((te_x.shape[0], nfeat))
for i, x in enumerate(te_x):
nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
nwindows, 9)
x = np.array(np.zeros(nfeat), dtype='bool')
for j, (xi, sp) in enumerate(izip(nx, sps)):
sp.step(xi)
x[j*100:(j*100)+100] = sp.y[:, 0]
y = sp2.p * x[sp2.syn_map]
w = np.zeros((nfeat, ms))
for j in xrange(nfeat):
a = y[sp2.syn_map == j]
w[j][:a.shape[0]] = a
te_x2[i] = np.mean(w, 1)
# Classify
clf = LinearSVC(random_state=123456789)
clf.fit(tr_x2, tr_y)
print 'SVM Accuracy : {0:2.2f} %'.format(clf.score(te_x2, te_y) * 100)
示例7: train_svm
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def train_svm(C=0.1, grid=False):
ds = PascalSegmentation()
svm = LinearSVC(C=C, dual=False, class_weight='auto')
if grid:
data_train = load_pascal("kTrain")
X, y = shuffle(data_train.X, data_train.Y)
# prepare leave-one-label-out by assigning labels to images
image_indicators = np.hstack([np.repeat(i, len(x)) for i, x in
enumerate(X)])
# go down to only 5 "folds"
labels = image_indicators % 5
X, y = np.vstack(X), np.hstack(y)
cv = LeavePLabelOut(labels=labels, p=1)
param_grid = {'C': 10. ** np.arange(-3, 3)}
scorer = Scorer(recall_score, average="macro")
grid_search = GridSearchCV(svm, param_grid=param_grid, cv=cv,
verbose=10, scoring=scorer, n_jobs=-1)
grid_search.fit(X, y)
else:
data_train = load_pascal("train")
X, y = np.vstack(data_train.X), np.hstack(data_train.Y)
svm.fit(X, y)
print(svm.score(X, y))
eval_on_sp(ds, data_train, [svm.predict(x) for x in data_train.X],
print_results=True)
data_val = load_pascal("val")
eval_on_sp(ds, data_val, [svm.predict(x) for x in data_val.X],
print_results=True)
示例8: with_aureliens_potentials_svm
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def with_aureliens_potentials_svm(test=False):
data = load_data('train', independent=True)
data = add_kraehenbuehl_features(data)
features = [x[0] for x in data.X]
y = np.hstack(data.Y)
if test:
data_ = load_data('val', independent=True)
data_ = add_kraehenbuehl_features(data_)
features.extend([x[0] for x in data.X])
y = np.hstack([y, np.hstack(data_.Y)])
new_features_flat = np.vstack(features)
from sklearn.svm import LinearSVC
print("training svm")
svm = LinearSVC(C=.001, dual=False, class_weight='auto')
svm.fit(new_features_flat[y != 21], y[y != 21])
print(svm.score(new_features_flat[y != 21], y[y != 21]))
print("evaluating")
eval_on_pixels(data, [svm.predict(x) for x in features])
if test:
print("test data")
data_val = load_data('test', independent=True)
else:
data_val = load_data('val', independent=True)
data_val = add_kraehenbuehl_features(data_val)
features_val = [x[0] for x in data_val.X]
eval_on_pixels(data_val, [svm.predict(x) for x in features_val])
示例9: evaluation
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def evaluation(log_C):
#X_train, y_train = load_svmlight_file("/home/kleinaa/data/mnist/mnist")
#X_test, y_test = load_svmlight_file("/home/kleinaa/data/mnist/mnist.t")
X, y = load_svmlight_file(os.path.dirname(os.path.abspath(__file__)) + "/data/mnist")
X_train = X[0:40000]
y_train = y[0:40000]
X_test = X[40000:60000]
y_test = y[40000:60000]
C_param = 2 ** log_C
#gamma = 2 ** log_gamma
#size = int(10 ** size)
#print "number of complete data points: " + str(X_train.shape)
#X_train = X_train[0:size]
#y_train = y_train[0:size]
print "number of chosen data points: " + str(X_train.shape)
print "number of data points test: " + str(X_test.shape)
#clf = SVC(C=C, kernel='rbf', gamma=gamma)
clf = LinearSVC(C=C_param)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print "Mean Accuracy: " + str(score)
return 1 - score
示例10: compareClassifiers
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def compareClassifiers():
(observations, classes) = createObservations()
observations = np.array(observations)
classes = np.array(classes)
# make tree classifier
my_tree = tree.DecisionTreeClassifier()
my_tree.fit(observations, classes)
tree_score = my_tree.score(observations, classes)
tree_cv = cross_validation.cross_val_score(my_tree, observations, classes, scoring='accuracy', cv=10)
#print "tree score:", tree_score, "tree cv", np.mean(tree_cv)
# make naive classifier
naive = BernoulliNB(binarize=None)
naive.fit(observations, classes)
naive_score = naive.score(observations, classes)
naive_cv = cross_validation.cross_val_score(naive, observations, classes, scoring='accuracy', cv=10)
#print "naive score:", naive_score, "naive cv", np.mean(naive_cv)
# make SVM classifier
svm = LinearSVC()
svm.fit(observations, classes)
svm_score = svm.score(observations, classes)
svm_cv = cross_validation.cross_val_score(svm, observations, classes, scoring='accuracy', cv=10)
#print "svm score:", svm_score, "svm cv", np.mean(svm_cv)
# make Log classifier
log = LogisticRegression()
log.fit(observations, classes)
log_score = log.score(observations, classes)
log_cv = cross_validation.cross_val_score(log, observations, classes, scoring='accuracy', cv=10)
#print "log score:", log_score, "log cv", np.mean(log_cv)
return [(tree_score, np.mean(tree_cv)), (naive_score, np.mean(naive_cv)), (svm_score, np.mean(svm_cv)), (log_score, np.mean(log_cv))]
示例11: train_model
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def train_model(X, y, tweet_lst, c = 1, weight=None):
#MODEL
fmodel = os.path.join(folder,'model','model_earn.model')
print 'TRAINING MODEL'
#clf = svm.SVC()
clf = LinearSVC(C = c, class_weight= weight)
print 'No of Features, ', len(X[0])
clf.fit(X, y)
print 'clf score, ',clf.score(X, y)
match = 0
print 'PREDICTING DATA'
ypred = clf.predict(X)
ypred.tolist()
# prob_list(clf,X,tweet_lst,ypred,y)
for i in range(len(y)):
if y[i]==ypred[i]:
match += 1
# print 'match ', match
# print sum(ypred)
# print 'y', type(y)
# print 'ypred', type(ypred)
print clf
disp_evaluation(y,ypred)
jl.dump(clf, fmodel)
print 'MODEL DUMPED'
return
示例12: buildSVMWord2VecWithClusters
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def buildSVMWord2VecWithClusters(self):
#feats = ['topic1hot']
#feats = ['words2vec', 'top1grams', 'top2grams']
#feats = ['words2vec', 'top1grams']
#feats = ['words2vec', 'top2grams']
feats = ['words2vec', 'clusteredLexicons', 'topic1hot', 'pos']
#feats = ['words2vec','topic1hot', 'pos','clusteredLexicons', 'top2grams']
#feats = ['clusteredLexicons']
#feats = ['pos']
y_attribute = 'stance'
X,y = self.fe.getFeaturesMatrix('train',feats,y_attribute)
print (X.shape)
Xt,yt = self.fe.getFeaturesMatrix('test',feats,y_attribute)
clf = LinearSVC(C=1,penalty='l1',dual=False)
clf = clf.fit(X,y)
y_pred = clf.predict(Xt)
# f = open('pred','w')
# for i in y_pred:
# #print type(i)
# f.write('{0}'.format(i))
# f.close()
accuracy = clf.score(Xt, yt)
# print clf.score(Xt, yt)
fscores = self.eval.computeFscores(self.data.testTweets, self.fe.labelenc.inverse_transform(y_pred))
# print type(fscores)
# print fscores
# pprint(self.eval.computeFscores(self.data.testTweets, self.fe.labelenc.inverse_transform(y_pred)))
# print (accuracy, fscores['Macro'])
return (accuracy, fscores['Macro'])
示例13: svm_for_multiclass
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def svm_for_multiclass():
text_file = "/home/web_server/wangyuanfu/age/temp1"
dataset = np.loadtxt(text_file, delimiter=" ")
X = dataset[:,1:]
y = dataset[:,0:1]
min_max_scaler = preprocessing.MinMaxScaler()
normalized_X = min_max_scaler.fit_transform(X)
print len(normalized_X)
X_train, X_test, y_train, y_test = train_test_split(normalized_X, y, test_size=0.1, random_state=7)
clf = LinearSVC(random_state=0, C=1, multi_class='ovr', penalty='l2')
clf = clf.fit(X_train, y_train.reshape(-1))
# print the training scores
print("training score : %.3f " % (clf.score(X_train, y_train)))
# make predictions
predicted = clf.predict(X_test)
length_predicted = len(predicted)
print predicted.shape
#for i in range(0,length_predicted):
# print predicted[i],y_test[i]
#print X_test[i,:],predicted[i],y_test[i],probability[i]
# summarize the fit of the model
print(metrics.classification_report(y_test, predicted))
print(metrics.confusion_matrix(y_test, predicted))
print(metrics.precision_score(y_test, predicted, average='micro'))
示例14: svm_vecteur
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def svm_vecteur():
"Interprétation des images comme vecteurs de pixels et classification via le SVM"
best=np.zeros(4)
for npix in range(50,200,50):
_, data, target, _ = utils.chargementVecteursImages(mer,ailleurs,1,-1,npix)
X_train,X_test,Y_train,Y_test=train_test_split(data,target,test_size=0.3,random_state=random.seed())
for iterations in range(250,1000,250):
start_time = time.time()
svc = LinearSVC(random_state=random.seed(), max_iter=iterations)
x1=np.array(X_train)
x1 = np.reshape(x1, (x1.shape[0],x1.shape[2]))
x2=np.array(X_test)
x2 = np.reshape(x2, (x2.shape[0],x2.shape[2]))
svc.fit(X=x1, y=Y_train)
score = svc.score(x2,Y_test)
end_time = time.time()
if score>best[0]:
best[0] = score
best[1] = iterations
best[2] = end_time-start_time
best[3] = npix
print("| SVM linéaire | V.Pix {:4.0f} | iterations={:1.0f} | {:10.3f}ms | {:1.3f} |".format(best[3],best[1],best[3]*1000,best[0]))
示例15: linearSVCClass
# 需要导入模块: from sklearn.svm import LinearSVC [as 别名]
# 或者: from sklearn.svm.LinearSVC import score [as 别名]
def linearSVCClass():
trainData, trainLabel = featureArray(conf['train']['feature_vector'])
testData, testLabel = featureArray(conf['test']['feature_vector'])
print "Linear SVC"
clf = LinearSVC(penalty='l2', loss='hinge', dual=True, tol=0.0001, C=1.0, multi_class='crammer_singer', fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000)
clf = clf.fit(trainData,trainLabel)
print str(clf.score(testData,testLabel))