本文整理汇总了Python中shogun.Classifier.SVMLight.set_C方法的典型用法代码示例。如果您正苦于以下问题:Python SVMLight.set_C方法的具体用法?Python SVMLight.set_C怎么用?Python SVMLight.set_C使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shogun.Classifier.SVMLight
的用法示例。
在下文中一共展示了SVMLight.set_C方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: svm_learn
# 需要导入模块: from shogun.Classifier import SVMLight [as 别名]
# 或者: from shogun.Classifier.SVMLight import set_C [as 别名]
def svm_learn(kernel, labels, options):
"""train SVM using SVMLight or LibSVM
Arguments:
kernel -- kernel object from Shogun toolbox
lebels -- list of labels
options -- object containing option data
Return:
trained svm object
"""
try:
svm=SVMLight(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
except NameError:
svm=LibSVM(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
if options.quiet == False:
svm.io.set_loglevel(MSG_INFO)
svm.io.set_target_to_stderr()
svm.set_epsilon(options.epsilon)
svm.parallel.set_num_threads(1)
if options.weight != 1.0:
svm.set_C(options.svmC, options.svmC*options.weight)
svm.train()
if options.quiet == False:
svm.io.set_loglevel(MSG_ERROR)
return svm
示例2: svm_learn
# 需要导入模块: from shogun.Classifier import SVMLight [as 别名]
# 或者: from shogun.Classifier.SVMLight import set_C [as 别名]
def svm_learn(kernel, labels, svmC, epsilon, weight):
"""
"""
try:
svm=SVMLight(svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
except NameError:
print 'No support for SVMLight available.'
return
svm.io.set_loglevel(MSG_INFO)
svm.io.set_target_to_stderr()
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(1)
if weight != 1.0:
svm.set_C(svmC, svmC*weight)
svm.train()
svm.io.set_loglevel(MSG_ERROR)
return svm
示例3: _train
# 需要导入模块: from shogun.Classifier import SVMLight [as 别名]
# 或者: from shogun.Classifier.SVMLight import set_C [as 别名]
def _train(self, train_data, param):
"""
training procedure using training examples and labels
@param train_data: Data relevant to SVM training
@type train_data: dict<str, list<instances> >
@param param: Parameters for the training procedure
@type param: ParameterSvm
"""
assert(param.base_similarity >= 1)
# merge data sets
data = PreparedMultitaskData(train_data, shuffle=False)
# create shogun data objects
base_wdk = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)
# set normalizer
normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
# load data
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_pearson.txt")
f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/All_PseudoSeq_Hamming.txt")
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_euklid.txt")
#f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_RAxML.txt")
num_lines = int(f.readline().strip())
task_distances = numpy.zeros((num_lines, num_lines))
name_to_id = {}
for (i, line) in enumerate(f):
tokens = line.strip().split("\t")
name = str(tokens[0])
name_to_id[name] = i
entry = numpy.array([v for (j,v) in enumerate(tokens) if j!=0])
assert len(entry)==num_lines, "len_entry %i, num_lines %i" % (len(entry), num_lines)
task_distances[i,:] = entry
# cut relevant submatrix
active_ids = [name_to_id[name] for name in data.get_task_names()]
tmp_distances = task_distances[active_ids, :]
tmp_distances = tmp_distances[:, active_ids]
print "distances ", tmp_distances.shape
# normalize distances
task_distances = task_distances / numpy.max(tmp_distances)
similarities = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
# convert distance to similarity
for task_name_lhs in data.get_task_names():
for task_name_rhs in data.get_task_names():
# convert similarity with simple transformation
similarity = param.base_similarity - task_distances[name_to_id[task_name_lhs], name_to_id[task_name_rhs]]
normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
# save for later
similarities[data.name_to_id(task_name_lhs),data.name_to_id(task_name_rhs)] = similarity
# set normalizer
base_wdk.set_normalizer(normalizer)
base_wdk.init_normalizer()
# set up svm
svm = SVMLight(param.cost, base_wdk, lab)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
# normalize cost
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
# save additional information
self.additional_information["svm objective"] = svm.get_objective()
self.additional_information["num sv"] = svm.get_num_support_vectors()
#self.additional_information["distances"] = distances
self.additional_information["similarities"] = similarities
# wrap up predictors
svms = {}
#.........这里部分代码省略.........
示例4: _inner_train
# 需要导入模块: from shogun.Classifier import SVMLight [as 别名]
# 或者: from shogun.Classifier.SVMLight import set_C [as 别名]
def _inner_train(self, train_data, param):
"""
perform inner training by processing the tree
"""
data_keys = []
# top-down processing of taxonomy
classifiers = []
classifier_at_node = {}
root = param.taxonomy.data
grey_nodes = [root]
while len(grey_nodes)>0:
node = grey_nodes.pop(0) # pop first item
# enqueue children
if node.children != None:
grey_nodes.extend(node.children)
#####################################################
# init data structures
#####################################################
# get data below current node
data = [train_data[key] for key in node.get_data_keys()]
data_keys.append(node.get_data_keys())
print "data at current level"
for instance_set in data:
print instance_set[0].dataset
# initialize containers
examples = []
labels = []
# concatenate data
for instance_set in data:
print "train split_set:", instance_set[0].dataset.organism
for inst in instance_set:
examples.append(inst.example)
labels.append(inst.label)
# create shogun data objects
k = shogun_factory.create_kernel(examples, param)
lab = shogun_factory.create_labels(labels)
#####################################################
# train weak learners
#####################################################
cost = param.cost
# set up svm
svm = SVMLight(cost, k, lab)
if param.flags["normalize_cost"]:
# set class-specific Cs
norm_c_pos = param.cost / float(len([l for l in labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
print "using cost: negative class=%f, positive class=%f" % (norm_c_neg, norm_c_pos)
# enable output
svm.io.enable_progress()
svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
# train
svm.train()
# append svm object
classifiers.append(svm)
classifier_at_node[node.name] = svm
# save some information
self.additional_information[node.name + " svm obj"] = svm.get_objective()
self.additional_information[node.name + " svm num sv"] = svm.get_num_support_vectors()
self.additional_information[node.name + " runtime"] = svm.get_runtime()
return (classifiers, classifier_at_node)
示例5: _train
# 需要导入模块: from shogun.Classifier import SVMLight [as 别名]
# 或者: from shogun.Classifier.SVMLight import set_C [as 别名]
def _train(self, train_data, param):
"""
training procedure using training examples and labels
@param train_data: Data relevant to SVM training
@type train_data: dict<str, list<instances> >
@param param: Parameters for the training procedure
@type param: ParameterSvm
"""
# merge data sets
data = PreparedMultitaskData(train_data, shuffle=False)
# create shogun data objects
base_wdk = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)
# set normalizer
normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
########################################################
print "creating a kernel for each node:"
########################################################
# init seq handler
task_kernel = SequencesHandlerRbf(1, param.base_similarity, data.get_task_names(), param.flags["wdk_rbf_on"])
similarities = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
# convert distance to similarity
for task_name_lhs in data.get_task_names():
for task_name_rhs in data.get_task_names():
# convert similarity with simple transformation
similarity = task_kernel.get_similarity(task_name_lhs, task_name_rhs)
print similarity
print "similarity (%s,%s)=%f" % (task_name_lhs, task_name_rhs, similarity)
normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
# save for later
similarities[data.name_to_id(task_name_lhs),data.name_to_id(task_name_rhs)] = similarity
# set normalizer
base_wdk.set_normalizer(normalizer)
base_wdk.init_normalizer()
# set up svm
svm = SVMLight(param.cost, base_wdk, lab)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
# normalize cost
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
# save additional information
self.additional_information["svm objective"] = svm.get_objective()
self.additional_information["num sv"] = svm.get_num_support_vectors()
#self.additional_information["distances"] = distances
self.additional_information["similarities"] = similarities
# wrap up predictors
svms = {}
# use a reference to the same svm several times
for task_name in data.get_task_names():
task_num = data.name_to_id(task_name)
# save svm and task_num
svms[task_name] = (task_num, param, svm)
return svms