本文整理汇总了Python中svmlight.classify函数的典型用法代码示例。如果您正苦于以下问题:Python classify函数的具体用法?Python classify怎么用?Python classify使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了classify函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ball_only_classifier
def ball_only_classifier(circles, color_image, bonus_radius):
model = svmlight.read_model("./output/best_single_cup_model_for_ball")
ff = find_features()
# TODO: fix
label = 0
best_classification = 0.5
best_circle = None
best_circle_pixels = None
for c in circles[:6]:
pixels, circle = find_pixels(c, color_image, bonus_radius)
# create features for that circle
features = ff.generate_features(pixels, label)
features = parse_one_line(features)
print features
# run the classifier on that circle
classification = svmlight.classify(model, [features])
print classification
if classification[0] > best_classification:
best_classification = classification
best_circle = [c]
best_circle_pixels = pixels
# make a decision about whether that circle is circly enough
# cv2.imshow("Image processed", circle)
# cv2.waitKey()
# for the strict form of the classifier, I require that all of the detected circles
# are in fact circles. other classifiers may be more lenient
return best_circle, best_classification, best_circle_pixels
示例2: main_svmlight
def main_svmlight():
# copied:
import svmlight
import pdb
training_data = syntheticData(30, 1)
test_data = syntheticData(30, 1)
#training_data = __import__('data').train0
#test_data = __import__('data').test0
print 'HERE 0'
print 'training_data is', training_data
print 'test_data is', test_data
# train a model based on the data
#pdb.set_trace()
print 'HERE 1'
model = svmlight.learn(training_data, type='regression', kernelType=2, verbosity=3)
print 'HERE 2'
# model data can be stored in the same format SVM-Light uses, for interoperability
# with the binaries.
svmlight.write_model(model, 'my_model.dat')
print 'HERE 3'
# classify the test data. this function returns a list of numbers, which represent
# the classifications.
#predictions = svmlight.classify(model, test_data)
pdb.set_trace()
predictions = svmlight.classify(model, training_data)
print 'HERE 4'
for p,example in zip(predictions, test_data):
print 'pred %.8f, actual %.8f' % (p, example[0])
示例3: predict_proba
def predict_proba(self, data):
''' returns the confidence of being included in the positive class '''
dummy_target = np.zeros(data.shape[0])
svm_test_data = npToSVMLightFormat(data, dummy_target)
predictions = svmlight.classify(self.model, svm_test_data)
return np.array(predictions)
示例4: predict_proba
def predict_proba(self, X):
y = np.zeros(X.shape[0]).tolist()
test_data = self.toSvmlight(X, y)
scores = np.array(svmlight.classify(self.model, test_data))
scores = 1 / (1 + np.exp(-scores))
scores.shape = (len(scores),1)
scores = np.hstack([1-scores, scores])
return scores
示例5: run_svm
def run_svm(article_count, feature_functions, kernel='polynomial', split=0.9, model_path='svm.model'):
# https://bitbucket.org/wcauchois/pysvmlight
articles, total_token_count = preprocess_wsj(article_count, feature_functions)
dictionary = Dictionary()
dictionary.add_one('ZZZZZ') # so that no features are labeled 0
data = []
for article in articles:
for sentence in article:
for tag, token_features in zip(sentence.def_tags, sentence.data):
# only use def / indef tokens
if tag in ('DEF', 'INDEF'):
features = dictionary.add(token_features)
features = sorted(list(set(features)))
feature_values = zip(features, [1]*len(features))
data.append((+1 if tag == 'DEF' else -1, feature_values))
train, test = bifurcate(data, split, shuffle=True)
# for corpus, name in [(train, 'train'), (test, 'test')]:
# write_svm(corpus, 'wsj_svm-%s.data' % name)
#####################
# do svm in Python...
model = svmlight.learn(train, type='classification', kernel=kernel)
# svmlight.learn options
# type: select between 'classification', 'regression', 'ranking' (preference ranking), and 'optimization'.
# kernel: select between 'linear', 'polynomial', 'rbf', and 'sigmoid'.
# verbosity: set the verbosity level (default 0).
# C: trade-off between training error and margin.
# poly_degree: parameter d in polynomial kernel.
# rbf_gamma: parameter gamma in rbf kernel.
# coef_lin
# coef_const
# costratio (corresponds to -j option to svm_learn)
svmlight.write_model(model, model_path)
gold_labels, test_feature_values = zip(*test)
# total = len(gold_labels)
test_pairs = [(0, feature_values) for feature_values in test_feature_values]
predictions = svmlight.classify(model, test_pairs)
correct, wrong = matches(
[(gold > 0) for gold in gold_labels],
[(prediction > 0) for prediction in predictions])
return dict(
total_articles_count=len(articles), # int
total_token_count=total_token_count, # int
train_count=len(train), # int
test_count=len(test), # int
kernel=kernel,
correct=correct,
wrong=wrong,
total=correct + wrong,
)
示例6: test
def test(test_data, fmodel_name):
print ('[ test ] ===================')
model = svmlight.read_model(fmodel_name)
# classify the test data. this function returns a list of numbers, which represent
# the classifications.
predictions = svmlight.classify(model, test_data)
for p in predictions:
print '%.8f' % p
示例7: runSVMLight
def runSVMLight(trainName,testName, kerneltype, c_param = 1.0, gamma_param = 1.0, verbosity = 0):
"""
converts data to python format only if not already in python format
(files in python format are of type list, otherwise they are filenames)
inputs: trainName, either the training data in svm-light format or the name of the training data file in LIBSVM/sparse format
testName, either the test data in svm-light format or the name of the test data file in LIBSVM/sparse format
kerneltype, (str)the type of kernel (linear, polynomial, sigmoid, rbf, custom)
c_param, the C parameter (default 1)
gamma_param, the gamma parameter (default 1)
verbosity, 0, 1, or 2 for less or more information (default 0)
outputs: (positiveAccuracy, negativeAccuracy, accuracy)
"""
if type(trainName) == list:
trainingData = trainName
else:
trainingData = sparseToList(trainName)
if type(testName) == list:
testData = testName
else:
testData = sparseToList(testName)
if verbosity == 2:
print "Training svm......."
# train a model based on the data
model = svmlight.learn(trainingData, type='classification', verbosity=2, kernel=kerneltype, C = c_param, rbf_gamma = gamma_param )
# model data can be stored in the same format SVM-Light uses, for interoperability
# with the binaries.
# if type(trainName) == list:
# svmlight.write_model(model, time.strftime('%Y-%m-%d-')+datetime.datetime.now().strftime('%H%M%S%f')+'_model.dat')
# else:
# svmlight.write_model(model, trainName[:-4]+'_model.dat')
if verbosity == 2:
print "Classifying........"
# classify the test data. this function returns a list of numbers, which represent
# the classifications.
predictions = svmlight.classify(model, testData)
# for p in predictions:
# print '%.8f' % p
correctLabels = correctLabelRemove(testData)
# print 'Predictions:'
# print predictions
# print 'Correct Labels:'
# print correctLabels
return predictionCompare(predictions, correctLabels, verbosity)
示例8: create_classifications
def create_classifications(models, test_set):
'''
For each supplied model, use svm light to classify the
test_set with that model
'''
classifications= {}
for m in models.keys():
classifications[m]= svmlight.classify(models[m], test_set)
return classifications
示例9: test_model
def test_model(model,ind,n=3):
test = []
for i in ind.get_pos_train_ind():
item = os.listdir("pos")[i]
test.append((1,[(fmap.getID(item[0]),item[1]) for item in ngrams.ngrams(n, open("pos/"+item).read()).items() if fmap.hasFeature(item[0])]))
for i in ind.get_neg_test_ind():
item = os.listdir("neg")[i]
test.append((-1,[(fmap.getID(item[0]),item[1]) for item in ngrams.ngrams(n, open("neg/"+item).read()).items() if fmap.hasFeature(item[0])]))
predictions = svmlight.classify(model, test)
return predictions
示例10: trainAndTest
def trainAndTest(training, test):
#trainingNames = [x[0] for x in training] # never used, but might be someday
trainingData = [d.dataTuple() for d in training]
testNames = [d.name for d in test]
testData = [d.dataTuple() for d in test]
testLabels = [d.label for d in test]
model = svmlight.learn(trainingData)
predictions = svmlight.classify(model,testData)
return zip(predictions, testLabels, testNames)
示例11: tsvm_test0
def tsvm_test0():
# data processing
data, target = load_svmlight_file('dataset/following.scale')
data, target = shuffle(data, target)
target = binarize(target)[:,0]
cutoff = int(round(data.shape[0] * 0.8))
train_data = data[:cutoff]
train_target = target[:cutoff]
transductive_train_data = data
transductive_target = target.copy()
transductive_target[cutoff:] = 0
test_data = data[cutoff:]
test_target = target[cutoff:]
# convert the data into svmlight format
svm_train_data = npToSVMLightFormat(train_data, train_target)
svm_transductive_train_data = npToSVMLightFormat(transductive_train_data,
transductive_target)
svm_test_data = npToSVMLightFormat(test_data, test_target)
print 'labels in the training data'
print countLabels(svm_transductive_train_data).most_common()
# svmlight routine
model = svmlight.learn(svm_train_data,
j=3.0, kernel='linear', type='classification', verbosity=0)
trans_model = svmlight.learn(svm_transductive_train_data,
j=3.0, kernel='linear', type='classification', verbosity=0)
predictions = svmlight.classify(model, svm_test_data)
trans_predictions = svmlight.classify(trans_model, svm_test_data)
print 'inductive learning'
print accuracy(predictions, test_target)
print '(recall, precision)', recall_precision(predictions, test_target)
print 'transductive learning'
print accuracy(trans_predictions, test_target)
print '(recall, precision)', recall_precision(trans_predictions, test_target)
示例12: trainAndTest
def trainAndTest(training, test):
#trainingNames = [x[0] for x in training] # never used, but might be someday
trainingData = [(d[1],d[2]) for d in training]
testNames = [d[0] for d in test]
testData = [(d[1],d[2]) for d in test]
testLabels = [d[1] for d in test]
model = svm.learn(trainingData)
predictions = svm.classify(model,testData)
return zip(predictions, testLabels, testNames)
示例13: five_fold_validation
def five_fold_validation(training_sets, validation_sets, c_value):
total_accuracy= 0.0
for i in range(len(training_sets)):
model= svmlight.learn(training_sets[i], type='classification', C=c_value)
classifications= svmlight.classify(model, validation_sets[i])
predictions= change_to_binary_predictions(classifications)
accuracy= find_accuracy(validation_sets[i], predictions)
total_accuracy += accuracy[0]
return total_accuracy/len(training_sets)
示例14: predict
def predict(self, X):
num_data = X.shape[0]
scores = np.zeros((num_data, self.num_classes_,), dtype=np.float32)
for i in xrange(self.num_classes_):
scores[:, i] = svm.classify(
self.model_[i],
self.__data2docs(X, np.zeros((num_data,), dtype=np.float32)))
if self.num_classes_ == 1:
indices = (scores.ravel() > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_()[indices]
示例15: predict
def predict(self, x_test):
if self.trained != True:
raise Exception("first train a model")
x = self.svmlfeaturise(x_test)
y_score = []
for j in xrange(len(self.models)):
m = np.array(svmlight.classify(self.models[j], x))
y_score.append(m)
y_predicted = np.argmax(y_score, axis=0)
return y_predicted