当前位置: 首页>>代码示例>>Python>>正文


Python numpy.logspace函数代码示例

本文整理汇总了Python中numpy.logspace函数的典型用法代码示例。如果您正苦于以下问题:Python logspace函数的具体用法?Python logspace怎么用?Python logspace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了logspace函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _extend_ref

def _extend_ref(ref, min_w, max_w):
    """Extends the reference spectrum to the given limits, assuming a constant
    delta log-lambda scale.

    Args:
        ref (spectrum.Spectrum): Reference spectrum
        min_w, max_w (float): Wavelength limts
    """
    # Delta log-lambda
    w = ref.w
    dw = np.median(np.log10(w[1:]) - np.log10(w[:-1]))

    if min_w < w[0]:
        num_points = int((np.log10(w[0]) - np.log10(min_w))/dw)
        left = np.logspace(np.log10(w[0]), np.log10(min_w), num_points,
                           base=10.0)[1:]
        # Don't forget to reverse left
        w = np.concatenate((left[::-1], w))

    if max_w > w[-1]:
        num_points = int((np.log10(max_w) - np.log10(w[-1]))/dw)
        right = np.logspace(np.log10(w[-1]), np.log10(max_w), num_points,
                            base=10.0)[1:]
        w = np.concatenate((w, right))

    if len(w) != len(ref.w):
        ref = ref.extend(w)

    return ref
开发者ID:samuelyeewl,项目名称:specmatch-emp,代码行数:29,代码来源:shift.py

示例2: test_dtype

 def test_dtype(self):
     y = logspace(0, 6, dtype='float32')
     assert_equal(y.dtype, dtype('float32'))
     y = logspace(0, 6, dtype='float64')
     assert_equal(y.dtype, dtype('float64'))
     y = logspace(0, 6, dtype='int32')
     assert_equal(y.dtype, dtype('int32'))
开发者ID:hitej,项目名称:meta-core,代码行数:7,代码来源:test_function_base.py

示例3: get_clfmethod

def get_clfmethod (clfmethod, n_feats, n_subjs, n_jobs=1):

    #classifiers
    classifiers = { 'cart'   : tree.DecisionTreeClassifier(random_state = 0),
                    'rf'     : RandomForestClassifier(max_depth=None, min_samples_split=1, random_state=None),
                    'gmm'    : GMM(init_params='wc', n_iter=20, random_state=0),
                    'svm'    : SVC (probability=True, max_iter=50000, class_weight='auto'),
                    'linsvm' : LinearSVC (class_weight='auto'),
                    'sgd'    : SGDClassifier (fit_intercept=True, class_weight='auto', shuffle=True, n_iter = np.ceil(10**6 / 416)),
                    'percep' : Perceptron (class_weight='auto'),
    }

    #Classifiers parameter values for grid search
    if n_feats < 10:
        max_feats = range(1, n_feats, 2)
    else:
        max_feats = range(1, 30, 4)
    max_feats.extend([None, 'auto', 'sqrt', 'log2'])

    clgrid =      { 'cart'   : dict(criterion = ['gini', 'entropy'], max_depth = [None, 10, 20, 30]),
                    'rf'     : dict(n_estimators = [3, 5, 10, 30, 50, 100], max_features = max_feats),
                    'gmm'    : dict(n_components = [2,3,4,5], covariance_type=['spherical', 'tied', 'diag'], thresh = [True, False] ),
                    #'svm'  : dict(kernel = ['rbf', 'linear', 'poly'], C = np.logspace(-3, 3, num=7, base=10), gamma = np.logspace(-3, 3, num=7, base=10), coef0 = np.logspace(-3, 3, num=7, base=10)),
                    #'svm'    : dict(kernel = ['rbf', 'poly'], C = np.logspace(-3, 3, num=7, base=10), gamma = np.logspace(-3, 3, num=7, base=10), coef0=np.logspace(-3, 3, num=7, base=10)),
                    'svm'    : dict(kernel = ['rbf', 'linear'], C = np.logspace(-3, 3, num=7, base=10), gamma = np.logspace(-3, 3, num=7, base=10)),
                    'linsvm' : dict(C = np.logspace(-3, 3, num=7, base=10)),
                    'sgd'    : dict(loss=['hinge', 'modified_huber', 'log'], penalty=["l1","l2","elasticnet"], alpha=np.logspace(-6, -1, num=6, base=10)),
                    'percep' : dict(penalty=[None, 'l2', 'l1', 'elasticnet'], alpha=np.logspace(-3, 3, num=7, base=10)),
    }

    return classifiers[clfmethod], clgrid[clfmethod]
开发者ID:borjaayerdi,项目名称:oasis_feets,代码行数:31,代码来源:do_classification.py

示例4: tsz_profile

 def tsz_profile(self, nu=None):
     """
     Return interpolation fn. for tSZ profile as a function of r [Mpc].
     """
     bb = np.linspace(0.0, self.bmax, 150) # Range of impact parameters
     rr = bb * self.r500
     
     # Interpolate the radial pressure profile, P
     N_X_SAMP = 1200 # Increase this for more accurate integration
     _r = np.logspace(-4, np.log10(self.bmax*self.r500), 200)
     #_r = np.linspace(1e-4, self.bmax*self.r500, 250)
     _P = self.P(_r)
     Pinterp = scipy.interpolate.interp1d(_r, _P, kind='linear', 
                                          bounds_error=False, fill_value=0.)
                                          
     
     # Sample the integrand and do Simpson-rule integration over samples
     ig_tsz = lambda x, b: Pinterp(x*self.r500) * (x / np.sqrt(x**2. - b**2.))
     _x = [ np.logspace(np.log10(b+1e-4), np.log10(self.bmaxc), N_X_SAMP) 
             for b in bb ]
     ysz = [ scipy.integrate.simps(ig_tsz(_x[i], bb[i]), _x[i]) 
               for i in range(bb.size) ]
     
     # Spectral dependence and Y_SZ pre-factors
     if nu == None:
       g_nu = 1.
     else:
       g_nu = self.tsz_spectrum(nu)
     fac_ysz = (2. * 2. * 2.051 / 511.) * self.r500
     ysz = g_nu * fac_ysz * np.array(ysz)
     
     # Interpolate and return
     interp = scipy.interpolate.interp1d( rr, ysz, kind='linear', 
                                          bounds_error=False, fill_value=0.0 )
     return interp
开发者ID:philbull,项目名称:flatmander,代码行数:35,代码来源:cluster_profile.py

示例5: convolved_true_maps

def convolved_true_maps(nu_min,nu_max,delta_nu,subdelta_nu,cmb,dust,verbose=True):

	sh = cmb.shape
	Nbpixels = sh[0]

	#frequencies to reconstruct
	Nbfreq=int(floor(log(nu_max/nu_min)/log(1+delta_nu)))+1 ## number of edge frequencies
	nus_edge=nu_min*np.logspace(0,log(nu_max/nu_min)/log(1+delta_nu),Nbfreq,endpoint=True,base=delta_nu+1) #edge frequencies of reconstructed bands
	nus=np.array([(nus_edge[i]+nus_edge[i-1])/2 for i in range(1,Nbfreq)])
	deltas=(delta_nu)*(nus)
	deltas=np.array([(nus_edge[i]-nus_edge[i-1]) for i in range(1,Nbfreq)])
	Nbbands=len(nus)

	#frequencies assumed to have been used for construction of TOD
	subnu_min=nu_min
	subnu_max=nu_max
	Nbsubfreq=int(floor(log(subnu_max/subnu_min)/log(1+subdelta_nu)))+1
	sub_nus_edge=subnu_min*np.logspace(0,log(subnu_max/subnu_min)/log(1+subdelta_nu),Nbsubfreq,endpoint=True,base=subdelta_nu+1)
	sub_nus=np.array([(sub_nus_edge[i]+sub_nus_edge[i-1])/2 for i in range(1,Nbsubfreq)])
	sub_deltas=np.array([(sub_nus_edge[i]-sub_nus_edge[i-1]) for i in range(1,Nbsubfreq)])  
	Nbsubbands=len(sub_nus)


	#Bands
	bands=[sub_nus[reduce(logical_and,(sub_nus<=nus_edge[i+1],sub_nus>=nus_edge[i]))] for i in range(Nbbands)]
	numbers=np.cumsum(np.array([len(bands[i]) for i in range(Nbbands)]))
	numbers=np.append(0,numbers)
	bands_numbers=np.array([(np.arange(numbers[i],numbers[i+1])) for i in range(Nbbands)])

	if verbose:
		print('Nombre de bandes utilisées pour la construction : '+str(Nbsubbands))
		print('Sous fréquences centrales utilisées pour la construction : '+str(sub_nus))
		print('Nombre de bandes reconstruites : '+str(Nbbands))
		print('Résolution spectrale : '+str(delta_nu))
		print ('Bandes reconstruites : ' + str(bands)) 
		print('Edges : '+str(nus_edge)) 
		print('Sub Edges : '+str(sub_nus_edge))

	#################
	### Input map ###
	#################

	x0=np.zeros((Nbsubbands,Nbpixels,3))
	for i in range(Nbsubbands):
		#x0[i,:,0]=cmb.T[0]+dust.T[0]*scaling_dust(150,sub_nus[i]e-9,1.59)
		x0[i,:,1]=cmb.T[1]+dust.T[1]*scaling_dust(150,sub_nus[i],1.59)
		x0[i,:,2]=cmb.T[2]+dust.T[2]*scaling_dust(150,sub_nus[i],1.59)

	###################################################################################
	### Convolution of the input map (only for comparison to the reconstructed map) ###
	###################################################################################
	x0_convolved=np.zeros((Nbbands,Nbpixels,3))
	for i in range(Nbbands):
		for j in bands_numbers[i]:
			sub_instrument=QubicInstrument(filter_nu=sub_nus[j]*10**9,filter_relative_bandwidth=sub_deltas[j]/sub_nus[j],detector_nep=2.7e-17)
			C=HealpixConvolutionGaussianOperator(fwhm=sub_instrument.synthbeam.peak150.fwhm * (150 / (sub_nus[j])))
			x0_convolved[i]+=C(x0[j])*sub_deltas[j]/np.sum(sub_deltas[bands_numbers[i]])


	return x0_convolved
开发者ID:jchamilton75,项目名称:MySoft,代码行数:60,代码来源:SpectroImagerGohar.py

示例6: _add_eq_contour

def _add_eq_contour(ax, ds, ds_denom, colorbar=None, levels=[], smooth=None):
    """
    Add contours where ds and ds_denom have equal efficiency (ratio = 1). The
    'levels' argument can be used to specify contours at ratios other than 1.
    """
    eff_array = _maximize_efficiency(np.array(ds))
    other_array = _maximize_efficiency(np.array(ds_denom))
    ratio_array = _smooth(eff_array / other_array, sigma=smooth)
    xmin = ds.attrs.get('x_min', 1.0)
    ymin = ds.attrs.get('y_min', 1.0)
    xmax = ds.attrs['x_max']
    ymax = ds.attrs['y_max']

    xvals = np.logspace(math.log10(xmin), math.log10(xmax), ds.shape[0])
    yvals = np.logspace(math.log10(ymin), math.log10(ymax), ds.shape[1])
    xgrid, ygrid = np.meshgrid(xvals, yvals)
    ct = ax.contour(
        xgrid, ygrid,
        ratio_array.T,
        linewidths = _line_width,
        levels = [1.0] if not levels else levels,
        colors = ['r','orange','y','green'],
        )
    def fmt(value):
        if value == 1.0:
            return 'equal'
        return '{:+.0%}'.format(value - 1.0)
    ax.clabel(ct, fontsize=_text_size*0.75, inline=True, fmt=fmt)
    if colorbar:
        colorbar.add_lines(ct)
开发者ID:dguest,项目名称:tagging-performance,代码行数:30,代码来源:ctaging.py

示例7: plot_optimisation

def plot_optimisation(ls_of_ws, cost_func):
    ws1, ws2 = zip(*ls_of_ws)
    # Plot figures
    fig = plt.figure(figsize=(10, 4))
    # Plot overview of cost function
    ax_1 = fig.add_subplot(1,2,1)
    ws1_1, ws2_1, cost_ws_1 = get_cost_surface(-3, 3, -3, 3, 100, cost_func)
    surf_1 = plot_surface(ax_1, ws1_1, ws2_1, cost_ws_1 + 1)
    ax_1.plot(ws1, ws2, 'b.')
    ax_1.set_xlim([-3,3])
    ax_1.set_ylim([-3,3])
    # Plot zoom of cost function
    ax_2 = fig.add_subplot(1,2,2)
    ws1_2, ws2_2, cost_ws_2 = get_cost_surface(0, 2, 0, 2, 100, cost_func)
    surf_2 = plot_surface(ax_2, ws1_2, ws2_2, cost_ws_2 + 1)
    ax_2.set_xlim([0,2])
    ax_2.set_ylim([0,2])
    surf_2 = plot_surface(ax_2, ws1_2, ws2_2, cost_ws_2)
    ax_2.plot(ws1, ws2, 'b.')
    # Show the colorbar
    fig.subplots_adjust(right=0.8)
    cax = fig.add_axes([0.85, 0.12, 0.03, 0.78])
    cbar = fig.colorbar(surf_1, ticks=np.logspace(0, 8, 9), cax=cax)
    cbar.ax.set_ylabel('$\\xi$', fontsize=15)
    cbar.set_ticklabels(['{:.0e}'.format(i) for i in np.logspace(0, 8, 9)])
    plt.suptitle('Cost surface', fontsize=15)
    plt.show()
开发者ID:Sandy4321,项目名称:peterroelants.github.io,代码行数:27,代码来源:plot_utils.py

示例8: setup

 def setup(self):
     self.energy_lo = np.logspace(0, 1, 11)[:-1] * u.TeV
     self.energy_hi = np.logspace(0, 1, 11)[1:] * u.TeV
     self.offset_lo = np.linspace(0, 1, 4)[:-1] * u.deg
     self.offset_hi = np.linspace(0, 1, 4)[1:] * u.deg
     self.migra_lo = np.linspace(0, 3, 4)[:-1]
     self.migra_hi = np.linspace(0, 3, 4)[1:]
     self.detx_lo = np.linspace(-6, 6, 11)[:-1] * u.deg
     self.detx_hi = np.linspace(-6, 6, 11)[1:] * u.deg
     self.dety_lo = np.linspace(-6, 6, 11)[:-1] * u.deg
     self.dety_hi = np.linspace(-6, 6, 11)[1:] * u.deg
     self.aeff_data = np.random.rand(10, 3) * u.cm * u.cm
     self.edisp_data = np.random.rand(10, 3, 3)
     self.bkg_data = np.random.rand(10, 10, 10) / u.MeV / u.s / u.sr
     self.aeff = EffectiveAreaTable2D(energy_lo=self.energy_lo, energy_hi=self.energy_hi,
                                      offset_lo=self.offset_lo, offset_hi=self.offset_hi,
                                      data=self.aeff_data)
     self.edisp = EnergyDispersion2D(e_true_lo=self.energy_lo, e_true_hi=self.energy_hi,
                                     migra_lo=self.migra_lo, migra_hi=self.migra_hi,
                                     offset_lo=self.offset_lo, offset_hi=self.offset_hi,
                                     data=self.edisp_data)
     self.bkg = Background3D(energy_lo=self.energy_lo, energy_hi=self.energy_hi,
                             detx_lo=self.detx_lo, detx_hi=self.detx_hi,
                             dety_lo=self.dety_lo, dety_hi=self.dety_hi,
                             data=self.bkg_data)
开发者ID:cdeil,项目名称:gammapy,代码行数:25,代码来源:test_irf_write.py

示例9: cv_test

def cv_test():
    """
        tests the cross validation. needs working krr class!
    """
    Xtr, Ytr = noisysincfunction(100, 0.1)
    Xte = np.arange(-np.pi, np.pi, 0.01)[np.newaxis, :]

    krr = imp.krr()

    pl.figure()
    pl.subplot(1, 2, 1)
    params = ["kernel", ["gaussian"], "kernelparam", np.logspace(-2, 2, 10), "regularization", np.logspace(-2, 2, 10)]
    cvkrr = imp.cv(Xtr, Ytr, krr, params, loss_function=squared_error_loss, nrepetitions=2)
    cvkrr.predict(Xte)
    print cvkrr.kernelparameter
    print cvkrr.regularization

    pl.plot(Xtr.T, Ytr.T)
    pl.plot(Xte.T, cvkrr.ypred.T)
    pl.title("CV with fixed regularization")

    pl.subplot(1, 2, 2)
    params = ["kernel", ["gaussian"], "kernelparam", np.logspace(-2, 2, 10), "regularization", [0]]
    cvkrr = imp.cv(Xtr, Ytr, krr, params, loss_function=squared_error_loss, nrepetitions=2)
    cvkrr.predict(Xte)
    print cvkrr.kernelparameter
    print cvkrr.regularization

    pl.plot(Xtr.T, Ytr.T)
    pl.plot(Xte.T, cvkrr.ypred.T)
    pl.title("CV with efficient LOOCV")
    print "\n(time the test takes on my notebook: approx. 6 seconds)"
开发者ID:philjjoon,项目名称:Lab-MachineLearning,代码行数:32,代码来源:ps3_tests.py

示例10: update_xscale

	def update_xscale(self):
		if self.logfreqscale == 2:
			self.xscaled = numpy.logspace(numpy.log2(self.minfreq), numpy.log2(self.maxfreq), self.canvas_height, base=2.0)
		elif self.logfreqscale == 1:
			self.xscaled = numpy.logspace(numpy.log10(self.minfreq), numpy.log10(self.maxfreq), self.canvas_height)
		else:
			self.xscaled = numpy.linspace(self.minfreq, self.maxfreq, self.canvas_height)
开发者ID:Bentley4,项目名称:friture,代码行数:7,代码来源:spectrogram_image.py

示例11: benchmark_plot

def benchmark_plot(data):
    """ log-log graph of the benchmark results """
    plt.figure()
    idx = 0
    symbols = ['o', 's', 'h', 'D','1','8','*','+','x']
    for (N, tp, tnp, speedup, name) in data:
        plt.loglog(N, tp,'r%s' % symbols[idx],label='pure Python %s' % name )
        plt.loglog(N, tnp,'b%s' % symbols[idx],label='numpy %s' % name )
        idx += 1
        speedup_txt = "%s speedup: %3.1fx" % (name, speedup)
        plt.text( 700, (0.9)/np.exp(0.3*idx), speedup_txt , fontsize=14)
    n = np.logspace(2,5,20)
    logline =  [(5e-6)*nn for nn in n]
    plt.loglog(n,logline,'r-',label='5e-6 * N')
    n = np.logspace(4,6,20)
    logline =  [(2e-8)*nn for nn in n]
    plt.loglog(n,logline,'b-',label='2e-8 * N')
    
    logline =  [(2e-9)*nn*np.log(nn) for nn in n]
    plt.loglog(n,logline,'b-.',label='2e-8 * N log(N)')
        
    logline =  [(2e-12)*nn*nn for nn in n]
    plt.loglog(n,logline,'b--',label='2e-8 * N*N')
        
    plt.xlabel('Input data size')
    plt.ylabel('CPU seconds')

    plt.legend(loc='upper left')
    plt.title('allantools numpy benchmark, AW 2014-08-31')
    plt.show()
开发者ID:kuzavas,项目名称:allantools,代码行数:30,代码来源:benchmark_purepython_vs_numpy.py

示例12: entries_histogram

def entries_histogram(pandas_df):
    
    df = pandas_df
    
    ### Let's plot two histograms on the same axes to show hourly
    ### entries when raining vs. when not raining.
    
    ### no axis transform...
    plt.figure()
    df.ENTRIESn_hourly[df.rain == 1].plot(kind='hist', stacked=True, alpha=0.5, bins=100)
    df.ENTRIESn_hourly[df.rain == 0].plot(kind='hist', stacked=True, alpha=0.5, bins=100)
    plt.xlabel('Entries Hourly')
    plt.ylabel('Frequency')
    plt.xlim([0, 15000])
    plt.ylim([0, 50000])
    plt.show()
    # this command would close the plot
    # plt.clf()
    
    ### with a log scale transform on the x-axis...
    plt.figure()
    df.ENTRIESn_hourly[df.rain == 1].plot(kind='hist', stacked=True, alpha=0.5, bins=np.logspace(0.1, 6, 50)) # your code here to plot a historgram for hourly entries when it is raining
    df.ENTRIESn_hourly[df.rain == 0].plot(kind='hist', stacked=True, alpha=0.5, bins=np.logspace(0.1, 6, 50)) # your code here to plot a historgram for hourly entries when it is not raining
    plt.xlabel('Entries Hourly')
    plt.ylabel('Frequency')
    plt.gca().set_xscale("log")
    plt.show()
开发者ID:winkelman,项目名称:udacity-dand-ds,代码行数:27,代码来源:exploratory_plot.py

示例13: test_fuzz_K_to_discharge_coefficient

def test_fuzz_K_to_discharge_coefficient():
    '''
    # Testing the different formulas
    from sympy import *
    C, beta, K = symbols('C, beta, K')

    expr = Eq(K, (sqrt(1 - beta**4*(1 - C*C))/(C*beta**2) - 1)**2)
    solns = solve(expr, C)
    [i.subs({'K': 5.2314291729754, 'beta': 0.05/0.07366}) for i in solns]
    
    [-sqrt(-beta**4/(-2*sqrt(K)*beta**4 + K*beta**4) + 1/(-2*sqrt(K)*beta**4 + K*beta**4)),
 sqrt(-beta**4/(-2*sqrt(K)*beta**4 + K*beta**4) + 1/(-2*sqrt(K)*beta**4 + K*beta**4)),
 -sqrt(-beta**4/(2*sqrt(K)*beta**4 + K*beta**4) + 1/(2*sqrt(K)*beta**4 + K*beta**4)),
 sqrt(-beta**4/(2*sqrt(K)*beta**4 + K*beta**4) + 1/(2*sqrt(K)*beta**4 + K*beta**4))]
    
    # Getting the formula
    from sympy import *
    C, beta, K = symbols('C, beta, K')
    
    expr = Eq(K, (sqrt(1 - beta**4*(1 - C*C))/(C*beta**2) - 1)**2)
    print(latex(solve(expr, C)[3]))
    '''
    
    Ds = np.logspace(np.log10(1-1E-9), np.log10(1E-9))
    for D_ratio in Ds:
        Ks = np.logspace(np.log10(1E-9), np.log10(50000))
        Ks_recalc = []
        for K in Ks:
            C = K_to_discharge_coefficient(D=1, Do=D_ratio, K=K)
            K_calc = discharge_coefficient_to_K(D=1, Do=D_ratio, C=C)
            Ks_recalc.append(K_calc)
        assert_allclose(Ks, Ks_recalc)
开发者ID:kkremitzki,项目名称:fluids,代码行数:32,代码来源:test_flow_meter.py

示例14: train_gbg_svm_model

def train_gbg_svm_model(X_train, y_train, verbose=False):
    # train SVM
    X_scaled = preprocessing.scale(X_train)

    best_score = 9999
    best_params = {}

    # grid search to optimize parameters of SVM
    C_list = np.logspace(-5, 2, num=11)
    gamma_list = np.logspace(-3, 1, num=11)
    epsilon_list = [0.1]
    for c_test in C_list:
        for gamma_test in gamma_list:
            for epsilon_test in epsilon_list:
                svm_model = svm.SVR(kernel="rbf", C=c_test, gamma=gamma_test, epsilon=epsilon_test)
                scores = cross_validation.cross_val_score(svm_model, X_scaled, y_train, cv=5)
                mean_score = np.mean(scores)
                if verbose:
                    print("params: " + str(svm_model.get_params()))
                    print(mean_score)
                if abs(mean_score) < abs(best_score):
                    best_score = mean_score
                    best_params = svm_model.get_params()

    print("***Best Params***")
    print(best_params)
    print("Score:" + str(best_score))

    # return a trained SVM with the best parameters we found
    ret_svm = svm.SVR()
    ret_svm.set_params(**best_params)
    ret_svm.fit(X_scaled, y_train)
    return ret_svm
开发者ID:keithdlandry,项目名称:player_stats,代码行数:33,代码来源:training_data.py

示例15: adjust_SVM

    def adjust_SVM(self):
        Cs = np.logspace(0, 10, 15, base=2)
        gammas = np.logspace(-7, 4, 15, base=2)
        scores = np.zeros((len(Cs), len(gammas)))
        scores[:] = np.nan

        print 'adjusting SVM (may take a long time) ...'
        def f(job):
            i, j = job
            samples, labels = self.get_dataset()
            params = dict(C = Cs[i], gamma=gammas[j])
            score = cross_validate(SVM, params, samples, labels)
            return i, j, score
        
        ires = self.run_jobs(f, np.ndindex(*scores.shape))
        for count, (i, j, score) in enumerate(ires):
            scores[i, j] = score
            print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
        print scores

        print 'writing score table to "svm_scores.npz"'
        np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas)

        i, j = np.unravel_index(scores.argmin(), scores.shape)
        best_params = dict(C = Cs[i], gamma=gammas[j])
        print 'best params:', best_params
        print 'best error: %.2f %%' % (scores.min()*100)
        return best_params
开发者ID:BRAINSia,项目名称:OpenCV_TruncatedSVN,代码行数:28,代码来源:digits_adjust.py


注:本文中的numpy.logspace函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。