本文整理汇总了Python中shogun.Kernel.CombinedKernel.obtain_from_generic方法的典型用法代码示例。如果您正苦于以下问题:Python CombinedKernel.obtain_from_generic方法的具体用法?Python CombinedKernel.obtain_from_generic怎么用?Python CombinedKernel.obtain_from_generic使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shogun.Kernel.CombinedKernel
的用法示例。
在下文中一共展示了CombinedKernel.obtain_from_generic方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: kernel_choice_linear_time_mmd_comb
# 需要导入模块: from shogun.Kernel import CombinedKernel [as 别名]
# 或者: from shogun.Kernel.CombinedKernel import obtain_from_generic [as 别名]
def kernel_choice_linear_time_mmd_comb():
from shogun.Features import RealFeatures
from shogun.Features import GaussianBlobsDataGenerator
from shogun.Kernel import GaussianKernel, CombinedKernel
from shogun.Statistics import LinearTimeMMD
from shogun.Statistics import MMDKernelSelectionCombMaxL2
from shogun.Statistics import MMDKernelSelectionCombOpt
from shogun.Statistics import BOOTSTRAP, MMD1_GAUSSIAN
from shogun.Distance import EuclideanDistance
from shogun.Mathematics import Statistics, Math
# note that the linear time statistic is designed for much larger datasets
# results for this low number will be bad (unstable, type I error wrong)
m=1000
distance=10
stretch=5
num_blobs=3
angle=pi/4
# streaming data generator
gen_p=GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
# stream some data and plot
num_plot=1000
features=gen_p.get_streamed_features(num_plot)
features=features.create_merged_copy(gen_q.get_streamed_features(num_plot))
data=features.get_feature_matrix()
#figure()
#subplot(2,2,1)
#grid(True)
#plot(data[0][0:num_plot], data[1][0:num_plot], 'r.', label='$x$')
#title('$X\sim p$')
#subplot(2,2,2)
#grid(True)
#plot(data[0][num_plot+1:2*num_plot], data[1][num_plot+1:2*num_plot], 'b.', label='$x$', alpha=0.5)
#title('$Y\sim q$')
# create combined kernel with Gaussian kernels inside (shoguns Gaussian kernel is
# different to the standard form, see documentation)
sigmas=[2**x for x in range(-3,10)]
widths=[x*x*2 for x in sigmas]
combined=CombinedKernel()
for i in range(len(sigmas)):
combined.append_kernel(GaussianKernel(10, widths[i]))
# mmd instance using streaming features, blocksize of 10000
block_size=10000
mmd=LinearTimeMMD(combined, gen_p, gen_q, m, block_size)
# kernel selection instance (this can easily replaced by the other methods for selecting
# combined kernels
selection=MMDKernelSelectionCombOpt(mmd)
#selection=MMDKernelSelectionCombMaxL2(mmd)
# perform kernel selection (kernel is automatically set)
kernel=selection.select_kernel()
kernel=CombinedKernel.obtain_from_generic(kernel)
print "selected kernel weights:", kernel.get_subkernel_weights()
#subplot(2,2,3)
#plot(kernel.get_subkernel_weights())
#title("Kernel weights")
# compute tpye I and II error (use many more trials). Type I error is only
# estimated to check MMD1_GAUSSIAN method for estimating the null
# distribution. Note that testing has to happen on difference data than
# kernel selecting, but the linear time mmd does this implicitly
mmd.set_null_approximation_method(MMD1_GAUSSIAN)
# number of trials should be larger to compute tight confidence bounds
num_trials=5;
alpha=0.05 # test power
typeIerrors=[0 for x in range(num_trials)]
typeIIerrors=[0 for x in range(num_trials)]
for i in range(num_trials):
# this effectively means that p=q - rejecting is tpye I error
mmd.set_simulate_h0(True)
typeIerrors[i]=mmd.perform_test()>alpha
mmd.set_simulate_h0(False)
typeIIerrors[i]=mmd.perform_test()>alpha
print "type I error:", mean(typeIerrors), ", type II error:", mean(typeIIerrors)