本文整理汇总了Python中shogun.Classifier.SVMLight类的典型用法代码示例。如果您正苦于以下问题:Python SVMLight类的具体用法?Python SVMLight怎么用?Python SVMLight使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SVMLight类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: classifier_domainadaptationsvm_modular
def classifier_domainadaptationsvm_modular(fm_train_dna=traindna,fm_test_dna=testdna, \
label_train_dna=label_traindna, \
label_test_dna=label_testdna,fm_train_dna2=traindna2,fm_test_dna2=testdna2, \
label_train_dna2=label_traindna2,label_test_dna2=label_testdna2,C=1,degree=3):
feats_train = StringCharFeatures(fm_train_dna, DNA)
feats_test = StringCharFeatures(fm_test_dna, DNA)
kernel = WeightedDegreeStringKernel(feats_train, feats_train, degree)
labels = Labels(label_train_dna)
svm = SVMLight(C, kernel, labels)
svm.train()
#svm.io.set_loglevel(MSG_DEBUG)
#####################################
#print "obtaining DA SVM from previously trained SVM"
feats_train2 = StringCharFeatures(fm_train_dna, DNA)
feats_test2 = StringCharFeatures(fm_test_dna, DNA)
kernel2 = WeightedDegreeStringKernel(feats_train, feats_train, degree)
labels2 = Labels(label_train_dna)
# we regularize against the previously obtained solution
dasvm = DomainAdaptationSVM(C, kernel2, labels2, svm, 1.0)
dasvm.train()
out = dasvm.apply(feats_test2).get_labels()
return out #,dasvm TODO
示例2: classifier_svmlight_linear_term_modular
def classifier_svmlight_linear_term_modular(fm_train_dna=traindna,fm_test_dna=testdna, \
label_train_dna=label_traindna,degree=3, \
C=10,epsilon=1e-5,num_threads=1):
from shogun.Features import StringCharFeatures, BinaryLabels, DNA
from shogun.Kernel import WeightedDegreeStringKernel
from shogun.Classifier import SVMLight
feats_train=StringCharFeatures(DNA)
feats_train.set_features(fm_train_dna)
feats_test=StringCharFeatures(DNA)
feats_test.set_features(fm_test_dna)
kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)
labels=BinaryLabels(label_train_dna)
svm=SVMLight(C, kernel, labels)
svm.set_qpsize(3)
svm.set_linear_term(-numpy.array([1,2,3,4,5,6,7,8,7,6], dtype=numpy.double));
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(num_threads)
svm.train()
kernel.init(feats_train, feats_test)
out = svm.apply().get_labels()
return out,kernel
示例3: svm_learn
def svm_learn(kernel, labels, options):
"""train SVM using SVMLight or LibSVM
Arguments:
kernel -- kernel object from Shogun toolbox
lebels -- list of labels
options -- object containing option data
Return:
trained svm object
"""
try:
svm=SVMLight(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
except NameError:
svm=LibSVM(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
if options.quiet == False:
svm.io.set_loglevel(MSG_INFO)
svm.io.set_target_to_stderr()
svm.set_epsilon(options.epsilon)
svm.parallel.set_num_threads(1)
if options.weight != 1.0:
svm.set_C(options.svmC, options.svmC*options.weight)
svm.train()
if options.quiet == False:
svm.io.set_loglevel(MSG_ERROR)
return svm
示例4: ShogunPredictor
class ShogunPredictor(object):
"""
basic single-task promoter model using string kernels
"""
def __init__(self, degree=4, shifts=32, kernel_cache=10000, cost=1.0):
#TODO: clean up degree
self.degree = degree
self.degree_wdk = degree
self.degree_spectrum = degree
self.shifts = shifts
self.kernel_cache = kernel_cache
self.cost = cost
self.center_offset = 50
self.center_pos = 1200
self.epsilon = 10e-2
self.num_threads = 4
def train(self, data, labels):
kernel = create_promoter_kernel(data, self.center_offset, self.center_pos, self.degree_wdk, self.degree_spectrum, self.shifts, kernel_cache=self.kernel_cache)
print "len(labels) = %i" % (len(labels))
lab = create_labels(labels)
self.svm = SVMLight(self.cost, kernel, lab)
# show debugging output
self.svm.io.enable_progress()
self.svm.io.set_loglevel(MSG_DEBUG)
# optimization settings
num_threads = self.num_threads
self.svm.parallel.set_num_threads(num_threads)
self.svm.set_epsilon(self.epsilon)
self.svm.train()
return self
def predict(self, data):
feat = create_promoter_features(data, self.center_offset, self.center_pos)
out = self.svm.apply(feat).get_values()
return out
示例5: svm_light
def svm_light ():
print 'SVMLight'
from shogun.Features import StringCharFeatures, Labels, DNA
from shogun.Kernel import WeightedDegreeStringKernel
try:
from shogun.Classifier import SVMLight
except ImportError:
print 'No support for SVMLight available.'
return
feats_train=StringCharFeatures(DNA)
feats_train.set_features(fm_train_dna)
feats_test=StringCharFeatures(DNA)
feats_test.set_features(fm_test_dna)
degree=20
kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)
C=1.2
epsilon=1e-5
num_threads=1
labels=Labels(label_train_dna)
svm=SVMLight(C, kernel, labels)
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(num_threads)
svm.train()
kernel.init(feats_train, feats_test)
svm.classify().get_labels()
示例6: classifier_svmlight_modular
def classifier_svmlight_modular (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,C=1.2,epsilon=1e-5,num_threads=1):
from shogun.Features import StringCharFeatures, Labels, DNA
from shogun.Kernel import WeightedDegreeStringKernel
try:
from shogun.Classifier import SVMLight
except ImportError:
print 'No support for SVMLight available.'
return
feats_train=StringCharFeatures(DNA)
feats_train.set_features(fm_train_dna)
feats_test=StringCharFeatures(DNA)
feats_test.set_features(fm_test_dna)
degree=20
kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)
labels=Labels(label_train_dna)
svm=SVMLight(C, kernel, labels)
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(num_threads)
svm.train()
kernel.init(feats_train, feats_test)
svm.apply().get_labels()
return kernel
示例7: _train_single_svm
def _train_single_svm(self, param, kernel, lab):
kernel.set_cache_size(500)
#lab = shogun_factory.create_labels(data.labels)
svm = SVMLight(param.cost, kernel, lab)
# set up SVM
num_threads = 8
svm.io.enable_progress()
svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
svm.parallel.set_num_threads(num_threads)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
# normalize cost
#norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
#norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
#svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
return svm
示例8: train
def train(self, data, labels):
"""
model training
"""
# centered WDK/WDK-shift
if self.param["shifts"] == 0:
kernel_center = WeightedDegreeStringKernel(self.param["degree"])
else:
kernel_center = WeightedDegreePositionStringKernel(10, self.param["degree"])
shifts_vector = numpy.ones(self.param["center_offset"]*2, dtype=numpy.int32)*self.param["shifts"]
kernel_center.set_shifts(shifts_vector)
kernel_center.set_cache_size(self.param["kernel_cache"]/3)
# border spetrum kernels
size = self.param["kernel_cache"]/3
use_sign = False
kernel_left = WeightedCommWordStringKernel(size, use_sign)
kernel_right = WeightedCommWordStringKernel(size, use_sign)
# assemble combined kernel
kernel = CombinedKernel()
kernel.append_kernel(kernel_center)
kernel.append_kernel(kernel_left)
kernel.append_kernel(kernel_right)
## building features
feat = create_features(data, self.param["center_offset"], self.param["center_pos"])
# init combined kernel
kernel.init(feat, feat)
print "len(labels) = %i" % (len(labels))
lab = BinaryLabels(numpy.double(labels))
self.svm = SVMLight(self.param["cost"], kernel, lab)
# show debugging output
self.svm.io.enable_progress()
self.svm.io.set_loglevel(MSG_DEBUG)
# optimization settings
num_threads = 2
self.svm.parallel.set_num_threads(num_threads)
self.svm.set_epsilon(10e-8)
self.svm.train()
return self
示例9: train
def train(self, data, labels):
kernel = create_promoter_kernel(data, self.center_offset, self.center_pos, self.degree_wdk, self.degree_spectrum, self.shifts, kernel_cache=self.kernel_cache)
print "len(labels) = %i" % (len(labels))
lab = create_labels(labels)
self.svm = SVMLight(self.cost, kernel, lab)
# show debugging output
self.svm.io.enable_progress()
self.svm.io.set_loglevel(MSG_DEBUG)
# optimization settings
num_threads = self.num_threads
self.svm.parallel.set_num_threads(num_threads)
self.svm.set_epsilon(self.epsilon)
self.svm.train()
return self
示例10: svm_learn
def svm_learn(kernel, labels, svmC, epsilon, weight):
"""
"""
try:
svm=SVMLight(svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
except NameError:
print 'No support for SVMLight available.'
return
svm.io.set_loglevel(MSG_INFO)
svm.io.set_target_to_stderr()
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(1)
if weight != 1.0:
svm.set_C(svmC, svmC*weight)
svm.train()
svm.io.set_loglevel(MSG_ERROR)
return svm
示例11: serialization_svmlight_modular
def serialization_svmlight_modular(num, dist, width, C):
from shogun.IO import MSG_DEBUG
from shogun.Features import RealFeatures, BinaryLabels, DNA, Alphabet
from shogun.Kernel import WeightedDegreeStringKernel, GaussianKernel
from shogun.Classifier import SVMLight
from numpy import concatenate, ones
from numpy.random import randn, seed
import sys
import types
import random
import bz2
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
import inspect
def save(filename, myobj):
"""
save object to file using pickle
@param filename: name of destination file
@type filename: str
@param myobj: object to save (has to be pickleable)
@type myobj: obj
"""
try:
f = bz2.BZ2File(filename, "wb")
except IOError as details:
sys.stderr.write("File " + filename + " cannot be written\n")
sys.stderr.write(details)
return
pickle.dump(myobj, f, protocol=2)
f.close()
def load(filename):
"""
Load from filename using pickle
@param filename: name of file to load from
@type filename: str
"""
try:
f = bz2.BZ2File(filename, "rb")
except IOError as details:
sys.stderr.write("File " + filename + " cannot be read\n")
sys.stderr.write(details)
return
myobj = pickle.load(f)
f.close()
return myobj
##################################################
# set up toy data and svm
traindata_real = concatenate((randn(2, num) - dist, randn(2, num) + dist), axis=1)
testdata_real = concatenate((randn(2, num) - dist, randn(2, num) + dist), axis=1)
trainlab = concatenate((-ones(num), ones(num)))
testlab = concatenate((-ones(num), ones(num)))
feats_train = RealFeatures(traindata_real)
feats_test = RealFeatures(testdata_real)
kernel = GaussianKernel(feats_train, feats_train, width)
# kernel.io.set_loglevel(MSG_DEBUG)
labels = BinaryLabels(trainlab)
svm = SVMLight(C, kernel, labels)
svm.train()
# svm.io.set_loglevel(MSG_DEBUG)
##################################################
# serialize to file
fn = "serialized_svm.bz2"
# print("serializing SVM to file", fn)
save(fn, svm)
##################################################
# unserialize and sanity check
# print("unserializing SVM")
svm2 = load(fn)
# print("comparing objectives")
svm2.train()
# print("objective before serialization:", svm.get_objective())
# print("objective after serialization:", svm2.get_objective())
# print("comparing predictions")
#.........这里部分代码省略.........
示例12: _train
def _train(self, train_data, param):
"""
training procedure using training examples and labels
@param train_data: Data relevant to SVM training
@type train_data: dict<str, list<instances> >
@param param: Parameters for the training procedure
@type param: ParameterSvm
"""
assert(param.base_similarity >= 1)
# merge data sets
data = PreparedMultitaskData(train_data, shuffle=False)
# create shogun data objects
base_wdk = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)
# set normalizer
normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
# load data
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_pearson.txt")
f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/All_PseudoSeq_Hamming.txt")
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_euklid.txt")
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_RAxML.txt")
num_lines = int(f.readline().strip())
task_distances = numpy.zeros((num_lines, num_lines))
name_to_id = {}
for (i, line) in enumerate(f):
tokens = line.strip().split("\t")
name = str(tokens[0])
name_to_id[name] = i
entry = numpy.array([v for (j,v) in enumerate(tokens) if j!=0])
assert len(entry)==num_lines, "len_entry %i, num_lines %i" % (len(entry), num_lines)
task_distances[i,:] = entry
# cut relevant submatrix
active_ids = [name_to_id[name] for name in data.get_task_names()]
tmp_distances = task_distances[active_ids, :]
tmp_distances = tmp_distances[:, active_ids]
print "distances ", tmp_distances.shape
# normalize distances
task_distances = task_distances / numpy.max(tmp_distances)
similarities = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
# convert distance to similarity
for task_name_lhs in data.get_task_names():
for task_name_rhs in data.get_task_names():
# convert similarity with simple transformation
similarity = param.base_similarity - task_distances[name_to_id[task_name_lhs], name_to_id[task_name_rhs]]
normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
# save for later
similarities[data.name_to_id(task_name_lhs),data.name_to_id(task_name_rhs)] = similarity
# set normalizer
base_wdk.set_normalizer(normalizer)
base_wdk.init_normalizer()
# set up svm
svm = SVMLight(param.cost, base_wdk, lab)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
# normalize cost
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
# save additional information
self.additional_information["svm objective"] = svm.get_objective()
self.additional_information["num sv"] = svm.get_num_support_vectors()
#self.additional_information["distances"] = distances
self.additional_information["similarities"] = similarities
# wrap up predictors
svms = {}
#.........这里部分代码省略.........
示例13: SVMLight
##################################################################
# Train SVMs
##################################################################
# create shogun objects
wdk_tree = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)
wdk_tree.set_normalizer(tree_normalizer)
wdk_tree.init_normalizer()
print "--->",wdk_tree.get_normalizer().get_name()
svm_tree = SVMLight(cost, wdk_tree, lab)
svm_tree.set_linadd_enabled(False)
svm_tree.set_batch_computation_enabled(False)
svm_tree.train()
del wdk_tree
del tree_normalizer
print "finished training tree-norm SVM:", svm_tree.get_objective()
wdk = shogun_factory.create_kernel(data.examples, param)
wdk.set_normalizer(normalizer)
wdk.init_normalizer()
示例14: test_data
#.........这里部分代码省略.........
taxonomy = shogun_factory.create_taxonomy(mss.taxonomy.data)
support = numpy.linspace(0, 100, 4)
distances = [[0, 1, 2, 2], [1, 0, 2, 2], [2, 2, 0, 1], [2, 2, 1, 0]]
# create tree normalizer
tree_normalizer = MultitaskKernelPlifNormalizer(support, data.task_vector_names)
task_names = data.get_task_names()
FACTOR = 1.0
# init gamma matrix
gammas = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
for t1_name in task_names:
for t2_name in task_names:
similarity = taxonomy.compute_node_similarity(taxonomy.get_id(t1_name), taxonomy.get_id(t2_name))
gammas[data.name_to_id(t1_name), data.name_to_id(t2_name)] = similarity
helper.save("/tmp/gammas", gammas)
gammas = gammas * FACTOR
cost = param.cost * numpy.sqrt(FACTOR)
print gammas
##########
# regular normalizer
normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
for t1_name in task_names:
for t2_name in task_names:
similarity = gammas[data.name_to_id(t1_name), data.name_to_id(t2_name)]
normalizer.set_task_similarity(data.name_to_id(t1_name), data.name_to_id(t2_name), similarity)
##################################################################
# Train SVMs
##################################################################
# create shogun objects
wdk_tree = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)
wdk_tree.set_normalizer(tree_normalizer)
wdk_tree.init_normalizer()
print "--->",wdk_tree.get_normalizer().get_name()
svm_tree = SVMLight(cost, wdk_tree, lab)
svm_tree.set_linadd_enabled(False)
svm_tree.set_batch_computation_enabled(False)
svm_tree.train()
del wdk_tree
del tree_normalizer
print "finished training tree-norm SVM:", svm_tree.get_objective()
wdk = shogun_factory.create_kernel(data.examples, param)
wdk.set_normalizer(normalizer)
wdk.init_normalizer()
print "--->",wdk.get_normalizer().get_name()
svm = SVMLight(cost, wdk, lab)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
svm.train()
print "finished training manually set SVM:", svm.get_objective()
alphas_tree = svm_tree.get_alphas()
alphas = svm.get_alphas()
assert(len(alphas_tree)==len(alphas))
for i in xrange(len(alphas)):
assert(abs(alphas_tree[i] - alphas[i]) < 0.0001)
print "success: all alphas are the same"
示例15: do_batch_linadd
def do_batch_linadd ():
print 'SVMlight batch'
from shogun.Features import StringCharFeatures, Labels, DNA
from shogun.Kernel import WeightedDegreeStringKernel
try:
from shogun.Classifier import SVMLight
except ImportError:
print 'No support for SVMLight available.'
return
feats_train=StringCharFeatures(DNA)
feats_train.set_features(fm_train_dna)
feats_test=StringCharFeatures(DNA)
feats_test.set_features(fm_test_dna)
degree=20
kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)
C=1
epsilon=1e-5
num_threads=2
labels=Labels(label_train_dna)
svm=SVMLight(C, kernel, labels)
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(num_threads)
svm.train()
kernel.init(feats_train, feats_test)
#print 'SVMLight Objective: %f num_sv: %d' % \
# (svm.get_objective(), svm.get_num_support_vectors())
svm.set_batch_computation_enabled(False)
svm.set_linadd_enabled(False)
svm.classify().get_labels()
svm.set_batch_computation_enabled(True)
svm.classify().get_labels()