本文整理汇总了Python中shogun.Kernel.CombinedKernel.set_cache_size方法的典型用法代码示例。如果您正苦于以下问题:Python CombinedKernel.set_cache_size方法的具体用法?Python CombinedKernel.set_cache_size怎么用?Python CombinedKernel.set_cache_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shogun.Kernel.CombinedKernel
的用法示例。
在下文中一共展示了CombinedKernel.set_cache_size方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: training_run
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import set_cache_size [as 别名]
def training_run(options):
"""Conduct a training run and return a trained SVM kernel"""
settings = MotifFinderSettings(kirmes_ini.MOTIF_LENGTH, options.window_width, options.replace)
positives = MotifFinder(finder_settings=settings)
positives.setFastaFile(options.positives)
positives.setMotifs(options.pgff)
pmotifs, ppositions = positives.getResults()
negatives = MotifFinder(finder_settings=settings)
negatives.setFastaFile(options.negatives)
negatives.setMotifs(options.ngff)
nmotifs, npositions = negatives.getResults()
wds_kparams = kirmes_ini.WDS_KERNEL_PARAMETERS
wds_svm = EasySVM.EasySVM(wds_kparams)
num_positives = len(pmotifs.values()[0])
num_negatives = len(nmotifs.values()[0])
# Creating Kernel Objects
kernel = CombinedKernel()
features = CombinedFeatures()
kernel_array = []
motifs = pmotifs.keys()
motifs.sort()
# Adding Kmer Kernels
for motif in motifs:
all_examples = pmotifs[motif] + nmotifs[motif]
motif_features = wds_svm.createFeatures(all_examples)
wds_kernel = WeightedDegreePositionStringKernel(motif_features, motif_features, wds_kparams["degree"])
wds_kernel.set_shifts(wds_kparams["shift"] * ones(wds_kparams["seqlength"], dtype=int32))
features.append_feature_obj(motif_features)
kernel_array.append(wds_kernel)
kernel.append_kernel(wds_kernel)
rbf_svm = EasySVM.EasySVM(kirmes_ini.RBF_KERNEL_PARAMETERS)
positions = array(ppositions + npositions, dtype=float64).T
position_features = rbf_svm.createFeatures(positions)
features.append_feature_obj(position_features)
motif_labels = append(ones(num_positives), -ones(num_negatives))
complete_labels = Labels(motif_labels)
rbf_kernel = GaussianKernel(position_features, position_features, kirmes_ini.RBF_KERNEL_PARAMETERS["width"])
kernel_array.append(rbf_kernel)
kernel.append_kernel(rbf_kernel)
# Kernel init
kernel.init(features, features)
kernel.set_cache_size(kirmes_ini.K_CACHE_SIZE)
svm = LibSVM(kirmes_ini.K_COMBINED_C, kernel, complete_labels)
svm.parallel.set_num_threads(kirmes_ini.K_NUM_THREADS)
# Training
svm.train()
if not os.path.exists(options.output_path):
os.mkdir(options.output_path)
html = {}
if options.contrib:
html["contrib"] = contrib(svm, kernel, motif_labels, kernel_array, motifs)
if options.logos:
html["poims"] = poims(svm, kernel, kernel_array, motifs, options.output_path)
if options.query:
html["query"] = evaluate(options, svm, kernel, features, motifs)
htmlize(html, options.output_html)
示例2: _train
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import set_cache_size [as 别名]
#.........这里部分代码省略.........
# hack_block_vec.push_back(Pairii(data.name_to_id("B_2705"), data.name_to_id("B_4001")))
# other_group = ["B_0702", "B_1501", "B_5801"]
# for task_id_1 in other_group:
# for task_id_2 in other_group:
# hack_block_vec.push_back(Pairii(data.name_to_id(task_id_1), data.name_to_id(task_id_2)))
#
#
#
# # create mask-based normalizer
# normalizer_hack = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, hack_block_vec)
# kernel_hack = shogun_factory.create_empty_kernel(param)
# kernel_hack.set_normalizer(normalizer_hack)
#
# # append current kernel to CombinedKernel
# combined_kernel.append_kernel(kernel_hack)
#
# # append features
# combined_features.append_feature_obj(base_features)
##################################################
# init combined kernel
combined_kernel.init(combined_features, combined_features)
#combined_kernel.precompute_subkernels()
self.additional_information["mkl weights before"] = combined_kernel.get_subkernel_weights()
print "subkernel weights:", combined_kernel.get_subkernel_weights()
svm = None
print "using MKL:", (param.flags["mkl_q"] >= 1.0)
if param.flags["mkl_q"] >= 1.0:
svm = MKLClassification()
svm.set_mkl_norm(param.flags["mkl_q"])
svm.set_kernel(combined_kernel)
svm.set_labels(lab)
else:
# create SVM (disable unsupported optimizations)
combined_kernel.set_cache_size(500)
svm = SVMLight(param.cost, combined_kernel, lab)
num_threads = 8
svm.io.enable_progress()
svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
svm.parallel.set_num_threads(num_threads)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
svm.set_epsilon(0.03)
# set cost
if param.flags["normalize_cost"]:
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
else:
svm.set_C(param.cost, param.cost)
svm.train()
print "subkernel weights (after):", combined_kernel.get_subkernel_weights()
########################################################
print "svm objective:"
print svm.get_objective()
self.additional_information["svm_objective"] = svm.get_objective()
self.additional_information["svm num sv"] = svm.get_num_support_vectors()
self.additional_information["mkl weights post-training"] = combined_kernel.get_subkernel_weights()
########################################################
# wrap up predictors
svms = {}
# use a reference to the same svm several times
for task_name in train_data.keys():
svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm, param)
return svms
示例3: _train
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import set_cache_size [as 别名]
#.........这里部分代码省略.........
pos_block_vec.push_back(tmp_pair)
print "creating normalizer"
normalizer_pos = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, pos_block_vec)
print "creating empty kernel"
kernel_pos = shogun_factory.create_empty_kernel(param)
print "setting normalizer"
kernel_pos.set_normalizer(normalizer_pos)
print "appending kernel"
# append current kernel to CombinedKernel
combined_kernel.append_kernel(kernel_pos)
print "appending features"
# append features
combined_features.append_feature_obj(base_features)
print "done constructing combined kernel"
##################################################
# init combined kernel
combined_kernel.init(combined_features, combined_features)
print "subkernel weights:", combined_kernel.get_subkernel_weights()
svm = None
print "using MKL:", (param.transform >= 1.0)
if param.transform >= 1.0:
svm = MKLClassification()
svm.set_mkl_norm(param.transform)
#svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto
svm.set_C(param.cost, param.cost)
svm.set_kernel(combined_kernel)
svm.set_labels(lab)
else:
# create SVM (disable unsupported optimizations)
combined_kernel.set_cache_size(500)
svm = SVMLight(param.cost, combined_kernel, lab)
# set up SVM
num_threads = 8
svm.io.enable_progress()
#svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
svm.parallel.set_num_threads(num_threads)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
print "WARNING: custom epsilon set"
svm.set_epsilon(0.05)
# normalize cost
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
# save additional info
self.additional_information["svm_objective"] = svm.get_objective()
self.additional_information["svm num sv"] = svm.get_num_support_vectors()
self.additional_information["mkl weights post-training"] = combined_kernel.get_subkernel_weights()
print self.additional_information
# wrap up predictors
svms = {}
# use a reference to the same svm several times
for task_name in train_data.keys():
svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm)
return svms
示例4: _train
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import set_cache_size [as 别名]
#.........这里部分代码省略.........
kernel_pos = shogun_factory.create_empty_kernel(param)
print "setting normalizer"
kernel_pos.set_normalizer(normalizer)
print "appending kernel"
# append current kernel to CombinedKernel
combined_kernel.append_kernel(kernel_pos)
print "appending features"
# append features
combined_features.append_feature_obj(base_features)
print "done constructing combined kernel"
##################################################
# init combined kernel
# init weights
# combined_kernel.set_subkernel_weights([1.0/2.85]*combined_kernel.get_num_subkernels())
combined_kernel.init(combined_features, combined_features)
print "subkernel weights:", combined_kernel.get_subkernel_weights()
svm = None
print "using MKL:", (param.transform >= 1.0)
if param.transform >= 1.0:
svm = MKLClassification()
svm.set_mkl_norm(param.transform)
#svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto
svm.set_C(param.cost, param.cost)
svm.set_kernel(combined_kernel)
svm.set_labels(lab)
else:
# create SVM (disable unsupported optimizations)
combined_kernel.set_cache_size(500)
svm = SVMLight(param.cost, combined_kernel, lab)
# set up SVM
num_threads = 8
svm.io.enable_progress()
#svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
svm.parallel.set_num_threads(num_threads)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)
#print "WARNING: custom epsilon set"
#svm.set_epsilon(0.05)
# normalize cost
norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
svm.set_C(norm_c_neg, norm_c_pos)
# start training
svm.train()
# save additional info
self.additional_information["svm_objective"] = svm.get_objective()
self.additional_information["svm num sv"] = svm.get_num_support_vectors()
self.additional_information["post_weights"] = combined_kernel.get_subkernel_weights()
print self.additional_information
# wrap up predictors
svms = {}
# use a reference to the same svm several times
for task_name in train_data.keys():
svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm)
return svms
示例5: _train
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import set_cache_size [as 别名]
def _train(self, train_data, param):
"""
training procedure using training examples and labels
@param train_data: Data relevant to SVM training
@type train_data: dict<str, list<instances> >
@param param: Parameters for the training procedure
@type param: ParameterSvm
"""
#numpy.random.seed(1337)
numpy.random.seed(666)
# merge data sets
data = PreparedMultitaskData(train_data, shuffle=True)
# create shogun label
lab = shogun_factory.create_labels(data.labels)
# assemble combined kernel
combined_kernel = CombinedKernel()
combined_kernel.io.set_loglevel(shogun.Kernel.MSG_DEBUG)
# set kernel cache
if param.flags.has_key("cache_size"):
combined_kernel.set_cache_size(param.flags["cache_size"])
# create features
base_features = shogun_factory.create_features(data.examples)
combined_features = CombinedFeatures()
########################################################
print "creating a masked kernel for each node:"
########################################################
# fetch taxonomy from parameter object
taxonomy = param.taxonomy.data
# create name to leaf map
nodes = taxonomy.get_all_nodes()
for node in nodes:
print "creating kernel for ", node.name
# fetch sub-tree
active_task_ids = [data.name_to_id(leaf.name) for leaf in node.get_leaves()]
print "masking all entries other than:", active_task_ids
# create mask-based normalizer
normalizer = MultitaskKernelMaskNormalizer(data.task_vector_nums, data.task_vector_nums, active_task_ids)
# normalize trace
if param.flags.has_key("normalize_trace") and param.flags["normalize_trace"]:
norm_factor = len(node.get_leaves()) / len(active_task_ids)
normalizer.set_normalization_constant(norm_factor)
# create kernel
kernel = shogun_factory.create_empty_kernel(param)
kernel.set_normalizer(normalizer)
# append current kernel to CombinedKernel
combined_kernel.append_kernel(kernel)
# append features
combined_features.append_feature_obj(base_features)
print "------"
combined_kernel.init(combined_features, combined_features)
#combined_kernel.precompute_subkernels()
print "subkernel weights:", combined_kernel.get_subkernel_weights()
svm = None
print "using MKL:", (param.flags["mkl_q"] >= 1.0)
if param.flags["mkl_q"] >= 1.0:
# set up MKL
svm = MKLClassification()
# set the "q" in q-norm MKL
svm.set_mkl_norm(param.flags["mkl_q"])
# set interleaved optimization
#.........这里部分代码省略.........