当前位置: 首页>>代码示例>>Python>>正文


Python pylab.frange函数代码示例

本文整理汇总了Python中pylab.frange函数的典型用法代码示例。如果您正苦于以下问题:Python frange函数的具体用法?Python frange怎么用?Python frange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了frange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: timings

def timings(exe_name_1, exe_name_2, num_of_funcs):
    exe1, exe2 = get_intersecting_func_names(exe_name_1, exe_name_2)

    index_list = random.sample(range(len(exe1)), num_of_funcs)
    funcs1 = [exe1[i] for i in index_list]
    funcs2 = [exe2[i] for i in index_list]

    bst = []
    timing_dict = {}
    for block_sim_threshold in pl.frange(0, 0.8, 0.1):
        block_sim_threshold = round(block_sim_threshold, 1)
        mbds = []
        for min_block_dist_similarity in pl.frange(0, 0.8, 0.1):
            min_block_dist_similarity = round(min_block_dist_similarity, 1)
            test_dict = {  # "log_decisions": True,
                         "block_similarity_threshold": block_sim_threshold,
                         "min_block_dist_similarity": min_block_dist_similarity,
                         "association_graph_max_size": 5000}
            start = time.time()
            delta = get_optimal_threshold(funcs1, funcs2, test_dict=test_dict)
            elapsed = (time.time() - start)
            mbds.append(elapsed)
            print (block_sim_threshold, min_block_dist_similarity, elapsed)
            timing_dict[block_sim_threshold][min_block_dist_similarity] = (delta, elapsed)
            print elapsed
        bst.append(mbds)
    return timing_dict
开发者ID:ChenSvirsky,项目名称:REDB-1,代码行数:27,代码来源:Compare.py

示例2: svm_model

def svm_model(train_data_features, train_data_split_crossfold_features, test_data_features, labels, labels_cross_validation_classwise, using_cross_validation2, kf, settings):
    if using_cross_validation2:
        C_base = 4.5
        C_step = 0.5#0.005
        C = C_base
        _results = []
        if(len(train_data_cross_validation_classwise_features) > 0):
            """train_all = np.append(train_data_features, train_data_cross_validation_classwise_features, axis=0)
            labels_all = np.append(labels, labels_cross_validation_classwise)
            kf_all = KFold(len(train_all)-1, n_folds=int(settings['Data']['CrossValidation2']), shuffle=True)
            for train, test in kf_all:
                svc = SVC(kernel="linear", C=C, probability=True)
                model = svc.fit(train_all[train], labels_all[train])
                predicted_classes = model.predict(train_all[test])
                predicted_classes_train = model.predict(train_all[train])
                class_probabilities = model.predict_proba(train_all[test])
                print("C: ",C," n points:", len(predicted_classes), " percentage: ",(labels_all[test] != predicted_classes).sum()*100/len(predicted_classes),"% percentage_train: ", (labels_all[train] != predicted_classes_train).sum()*100/len(predicted_classes_train),"%")
                _results.append((labels_all[test] != predicted_classes).sum())
                C += C_step"""
            for c in pl.frange(C_base,9, C_step):
                svc = SVC(kernel="linear", C=c, probability=True)
                model = svc.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                print("C: ",c," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
            for c in pl.frange(1,3, 1):
                svc = SVC(kernel="linear", C=c, probability=True)
                model = svc.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                print("C: ",c," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        else:
            for train, test in kf:
                svc = SVC(kernel="linear", C=C, probability=True)
                model = svc.fit(train_data_features[train], labels[train])
                predicted_classes = model.predict(train_data_features[test])
                predicted_classes_train = model.predict(train_data_features[train])
                class_probabilities = model.predict_proba(train_data_features[test])
                print("C: ",C," n points:", len(predicted_classes), " percentage: ",(labels[test] != predicted_classes).sum()*100/len(predicted_classes),"% percentage_train: ", (labels[train] != predicted_classes_train).sum()*100/len(predicted_classes_train),"%")
                _results.append((labels[test] != predicted_classes).sum())
                C += C_step

        C = C_base + C_step * _results.index(min(_results))
        print("C: ", C)
        if(len(train_data_cross_validation_classwise_features) > 0):
            svc = SVC(kernel="linear", C=C, probability=True)
            model = svc.fit(train_data_features, labels)
            predicted_classes = model.predict(train_data_cross_validation_classwise_features)
            class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
            print("C: ",C," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
            print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        svc = SVC(kernel="linear", C=C, probability=True)
        model = svc.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
    else:
        svc = SVC(kernel="linear", C=8, probability=True)
        model = svc.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
开发者ID:dvn123,项目名称:MachineLearning,代码行数:60,代码来源:machine_learning_models.py

示例3: optimal_block_sim_threshold_min_block_dist_similarity

def optimal_block_sim_threshold_min_block_dist_similarity(exe_name_1,
                                                          exe_name_2,
                                                          num_of_funcs):
    func_set = Function.objects.exclude(graph__num_of_blocks=1)
    exe1, exe2 = get_intersecting_func_names(func_set, exe_name_1,
                                             exe_name_2)
    index_list = random.sample(range(len(exe1)), num_of_funcs)
    funcs1 = [exe1[i] for i in index_list];
    funcs2 = [exe2[i] for i in index_list];
    best_block_sim_threshold = 0
    best_min_block_dist_similarity = 0
    best_delta = float("-infinity")
    for block_sim_threshold in pl.frange(0, 0.8, 0.1):
        for min_block_dist_similarity in pl.frange(0.5, 0.8, 0.1):
            print ("current", block_sim_threshold, min_block_dist_similarity)
            print ("best", best_block_sim_threshold, best_min_block_dist_similarity)
            test_dict = {  # "log_decisions": True,
                         "block_similarity_threshold": block_sim_threshold,
                         "min_block_dist_similarity": min_block_dist_similarity,
                         "association_graph_max_size": 5000}
            delta = \
                get_optimal_threshold(funcs1, funcs2, test_dict=test_dict)

            if best_delta < delta:
                best_delta = delta
                print "best delta: " + str(best_delta)
                best_block_sim_threshold = block_sim_threshold
                best_min_block_dist_similarity = min_block_dist_similarity

    print ("best_delta: " +
           str(best_delta) +
           ", best_block_sim_threshold: " +
           str(best_block_sim_threshold) +
           ", best_min_block_dist_similarity: " +
           str(best_min_block_dist_similarity))
开发者ID:ChenSvirsky,项目名称:REDB-1,代码行数:35,代码来源:Compare.py

示例4: generate_plot

def generate_plot():
	h_bar = 6.582E-16
	q = 1
	a = 1E-10
	t = 1
	c = 3.0E8
	g = -2.002
	N = 1
	E = -1
	Ez = 1000
	eta = 0.01 + (0.01)*1.j
	sigma_x = np.array([[0,1],[1,0]])
	sigma_y = np.array([[0, -1.j],[1.j,0]])
	kxs = []
	alphas = []
	stxs = []
	stys = []
	for kx in pl.frange(0, 2*np.pi, 0.1):
		kxs.append(kx)
		kys = []
		alphas_row = []
		stxs_row = []
		stys_row = []
		for ky in pl.frange(0, 2*np.pi, 0.1):
			coeff = (-1)*g*q*(1/(h_bar**2))*(a**2)*(t**2)*(1/(2*c**2))
			#print(coeff)
			hamil = sparse.kron(np.identity(2, dtype=np.complex_), t*(np.cos(kx)+np.cos(ky)))
			hamil += coeff*(np.cos(kx) + np.cos(ky))*(Ez*np.sin(ky)*sigma_x - Ez*np.sin(kx)*sigma_y)
			E_arr = sparse.kron(np.identity(2, dtype=np.complex_),E).toarray()
			greens = linalg.inv(E_arr-hamil-eta)
			img = (greens - calc.hermitian(greens))/(2.j)
			stxs_row.append(np.trace(np.dot(img,sigma_x))/2)
			stys_row.append(np.trace(np.dot(img,sigma_y))/2)
			kys.append(ky)
			alpha = np.trace(img)/2
			alphas_row.append(alpha)
		#print(stxs_row)
		alphas.append(alphas_row)
		stxs.append(stxs_row)
		stys.append(stys_row)
		print(kx)
	print('loop over')	
	x, y = np.meshgrid(kxs, kys)
	print('here')
	#print(alphas)
	alphas = np.array(alphas)
	stxs = np.array(stxs)
	stys = np.array(stys)
	print(stxs)
	#print(alphas)
	#fig = plt.figure()
	plt.pcolormesh(x, y, alphas)
	#plt.pcolormesh(x,y,stxs)
	plt.quiver(x, y, stxs, stys, color='red', angles='xy', scale_units='xy', scale=1)
	#plt.quiver(x, y, stys, color='red', headlength=10)
	print('mesh complete')
	#plt.colorbar()
	plt.show()
开发者ID:georgemattson,项目名称:many-body-modeler,代码行数:58,代码来源:soc_exercise.py

示例5: alpha_impurity

def alpha_impurity():
	"""
	Calculate and plot Gilbert Damping for on-site potential randomization at different strengths
	"""
	pass
	alphas = []
	strengths = []
	coll = []
	soc = 0.1
	length = 100
	energy = 1
	theta = 0
	randomize = True
	collector = coll
	with open('alpha_vs_impurity_soc0pt1_len100.txt','w') as f:
		for strength in pl.frange(0,0.1,0.05):
			rando_strength = strength
			strengths.append(strength)
			alpha = integrate.quad(inf_rashba_integrand, 0, 2*np.pi, args=(energy,length,soc,theta,randomize,rando_strength,collector),epsabs=1e-4, epsrel=1e-4, limit=50)[0]
			print(coll)
			f.write(str(strength)+' '+str(coll)+'\n')
			avg = np.mean(coll)
			f.write(str(strength)+' '+str(avg)+'\n')
			std = np.std(coll)
			f.write(str(strength)+' '+str(std)+'\n')
			alphas.append(avg)
	fig = plt.figure()
	ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
	ax.plot(strengths, alphas, 'bo', xs, ys, 'g')
	fig.savefig('alpha_impurity.png')
	plt.show()
开发者ID:georgemattson,项目名称:many-body-modeler,代码行数:31,代码来源:rashba_plane.py

示例6: log_res

def log_res(train_data_features, train_data_cross_validation_classwise_features, test_data_features, labels, labels_cross_validation_classwise, using_cross_validation2, kf, settings):
    if using_cross_validation2:
        logres_C = 1
        logres_results = []
        if(len(train_data_cross_validation_classwise_features) > 0):
            """train_all = np.append(train_data_features, train_data_cross_validation_classwise_features, axis=0)
            labels_all = np.append(labels, labels_cross_validation_classwise)
            kf_all = KFold(len(train_all)-1, n_folds=int(settings['Data']['CrossValidation2']), shuffle=True)
            for train, test in kf_all:
                C = logres_C
                p = 'l1'
                clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
                model = clf_l1_LR.fit(train_all[train], labels_all[train])
                predicted_classes = model.predict(train_all[test])
                predicted_classes_train = model.predict(train_all[train])
                print("N points:", len(predicted_classes), " percentage: ",(labels_all[test] != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels_all[train] != predicted_classes_train).sum()*100/len(predicted_classes_train))
                logres_results.append((labels_all[test] != predicted_classes).sum())
                logres_C += 1"""
            for c in pl.frange(logres_C,15, 1):
                clf_l1_LR = LogisticRegression(C=c, solver='lbfgs', penalty='l2', tol=0.01)
                model = clf_l1_LR.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                predicted_classes_train = model.predict(train_data_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                logres_results.append(log_loss(labels_cross_validation_classwise, class_probabilities))
                print("N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),
                      "%, percentage_train: ", (labels != predicted_classes_train).sum()*100/len(predicted_classes_train))
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        else:
            for train, test in kf:
                C = logres_C
                p = 'l1'
                clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
                model = clf_l1_LR.fit(train_data_features[train], labels[train])
                predicted_classes = model.predict(train_data_features[test])
                predicted_classes_train = model.predict(train_data_features[train])
                print("N points:", len(predicted_classes), " percentage: ",(labels[test] != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels[train] != predicted_classes_train).sum()*100/len(predicted_classes_train))
                logres_results.append((labels[test] != predicted_classes).sum())
                logres_C += 1
        print(logres_results)
        logres_C = logres_results.index(min(logres_results)) + 1
        print("Log Res C: ", logres_C)
        if(len(train_data_cross_validation_classwise_features) > 0):
            clf_l1_LR = LogisticRegression(C=logres_C, penalty='l2', tol=0.01)
            model = clf_l1_LR.fit(train_data_features, labels)
            predicted_classes = model.predict(train_data_cross_validation_classwise_features)
            predicted_classes_train = model.predict(train_data_features)
            class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
            print("N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels != predicted_classes_train).sum()*100/len(predicted_classes_train))
            print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        clf_l1_LR = LogisticRegression(C=logres_C, penalty='l1', tol=0.01)
        model = clf_l1_LR.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
    else:
        C = 1
        p = 'l1'
        clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
        model = clf_l1_LR.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
开发者ID:dvn123,项目名称:MachineLearning,代码行数:59,代码来源:machine_learning_models.py

示例7: main

def main():
    x_list = [i for i in pl.frange(-20, 20, 0.1)]
    # y_list = [sigmoid_f(x) for x in x_list]
    # show_plot(x_list, y_list)


    y_list = [sigmoid_f(x, 1.1894132348229451) for x in x_list]
    show_plot(x_list, y_list)
开发者ID:gswyhq,项目名称:hello-world,代码行数:8,代码来源:画出函数对应的图形.py

示例8: ax_pianoroll

def ax_pianoroll(ax, title='notes'):
    '''Twelve named semitones on the x-axis, one octave.'''
    ax.clear()
    ax.set_title(title)
    ax.set_xticks(pl.frange(0, 1, npts=12, closed=0), 'C. C# D. D# E. F. F# G. G# A. A# B.'.split())
    ax.set_yticks(pl.arange(24))
    ax.set_grid(True, axis='x')
    ax.set_xlim(-.5/12, 11.5/12)
    ax.set_autoscale(True, axis='y')
开发者ID:tripzilch,项目名称:dsp,代码行数:9,代码来源:wplot.py

示例9: plot_dic_cmp

def plot_dic_cmp(dic, imgname, firstnum):
    import pylab

    X = pylab.frange(0, len(dic) - 1)
    Ys = list(sorted(dic.values(), key=lambda lis:sum(lis), reverse=True))
    for i in xrange(len(Ys[0])):
        Y = [y[i] for y in Ys]
        pylab.plot(X[:firstnum], Y[:firstnum])
    pylab.savefig(imgname + '_%d.png' % firstnum)
开发者ID:Catentropy,项目名称:mylab,代码行数:9,代码来源:tools_0.py

示例10: ax_pianoroll

def ax_pianoroll(title='notes'):
    '''Twelve named semitones on the x-axis, one octave.'''
    pl.cla()
    pl.title(title)
    pl.xticks(pl.frange(0, 1, npts=12, closed=0), 'C. C# D. D# E. F. F# G. G# A. A# B.'.split())
    pl.yticks(pl.arange(24))
    pl.grid(True, axis='x')
    pl.xlim(-.5/12,11.5/12)
    pl.autoscale(True,axis='y')
开发者ID:antiface,项目名称:dsp-2,代码行数:9,代码来源:wplot.py

示例11: plot_w

def plot_w(dic, name):
    import pylab

    X = pylab.frange(0, len(dic) - 1)
    Y = list(sorted(dic.values(), reverse=True))
    Y = map(lambda y:pylab.log(y), Y)
    pylab.plot(X, Y)
    #show()
    pylab.savefig(name + '.png')
开发者ID:Catentropy,项目名称:mylab,代码行数:9,代码来源:tools_0.py

示例12: process_svr

def process_svr(df):

    bestsc = -100
    bestpara = 1
    for c in pl.frange(0.5,1.5,0.1):
        clf = svm.SVR(C =c )
        scores = cross_validation.cross_val_score(clf,df[predictors],df[target1].values.ravel(),cv = 5)
        score = np.mean(scores)
        if (bestsc < score):
            bestsc = score
            bestpara = c
    return bestpara
开发者ID:Matafight,项目名称:MatPyUtil,代码行数:12,代码来源:parkinson.py

示例13: process_ridge

def process_ridge(df):

    bestpara = 0
    bestsc = -1000
    for alp in pl.frange(0.5,1.5,0.1):
        clf = Ridge(alpha = alp)
        scores = cross_validation.cross_val_score(clf,df[predictors],df[target1].values.ravel(),cv = 5)
        score = np.mean(scores)
        if (bestsc < score):
            bestsc = score
            bestpara = alp
            
    return bestpara
开发者ID:Matafight,项目名称:MatPyUtil,代码行数:13,代码来源:parkinson.py

示例14: plot_theory

def plot_theory():
    '''Produce a plot showing the forcing, analytic velocity solution and
    analytic pressure solution'''
    from pylab import \
    plot,figure,quiver,frange,subplot,xticks,yticks,axis,xlabel,ylabel, \
    subplots_adjust 


    figure()

    y=frange(0.0,1,0.05)

    psol=pressure_solution(forcing)

    usol=solution(forcing)

    v=0*y

    x=0*y

    us=array([float(usol(pos)) for pos in zip(x,y)])

    ps=array([float(psol(pos)) for pos in zip(x,y)])

    uf=array([forcing(pos) for pos in zip(x,y)])[:,0]

    subplots_adjust(wspace=0.25)
    subplot(1,3,1)

    quiver(x[1:-1],y[1:-1],uf[1:-1],v[1:-1], scale=1)
    plot(uf,y)
    xticks([0,0.5,1],map(str,[0,0.5,1]))
    yticks([ 0 ,  0.2,  0.4,  0.6,  0.8,  1 ],map(str,[ 0 ,  0.2,  0.4,  0.6,  0.8,  1 ]))
    ylabel("y")
    xlabel("u source")

    subplot(1,3,2)
    plot(us,y)
    quiver(x[1:-1],y[1:-1],us[1:-1],v[1:-1], scale=.03)
    xticks([0,0.01,0.02,0.03],map(str,[0,0.01,0.02,0.03]))
    yticks([])
    xlabel("u solution")

    subplot(1,3,3)
    plot(ps,y)
    xticks([-0.02,-0.01,0],map(str,[-0.02,-0.01,0]))
    yticks([])
    xlabel("p solution")
    

    return uf,us,ps
开发者ID:FluidityProject,项目名称:multifluids,代码行数:51,代码来源:channel_viscous.py

示例15: create_ringmap

def create_ringmap(one2onepar,ringmap):
    run_dir = config['defaultsave.directory']
    ringmap= os.path.join(run_dir,os.path.basename(ringmap)) 
    
    fid = open(ringmap,'w')    
    det = np.genfromtxt(one2onepar,
                                    names="l2, 2theta, phi, pwid, phigh",
                                    skip_header=1,
                                    dtype =(float, float, float, float, float))
                                    
    ttheta=np.array(det['2theta'])
    group=0
    numspec_tot=0

    dtheta=0.63
    for angle in py.frange(2.83,136,dtheta):
        myindex=(ttheta>(angle-dtheta/2))*(ttheta<(angle+dtheta/2))
        spectra=np.asarray(np.where(myindex))
        spectra=spectra+1
        numspec=np.shape(spectra)[1]
        if np.shape(spectra)[1]>0:
            group=group+1
    
    fid.write('{0:4.0f}\n'.format(group))
    group=0
    for angle in py.frange(2.83,136,dtheta):
        myindex=(ttheta>(angle-dtheta/2))*(ttheta<(angle+dtheta/2))
        spectra=np.asarray(np.where(myindex))
        spectra=spectra+1
        numspec=np.shape(spectra)[1]
        if np.shape(spectra)[1]>0:
            group=group+1
        fid.write('{0:4.0f}\n'.format(group))
        fid.write('{0:5.0f}\n'.format(np.shape(spectra)[1]))
        for i in range(numspec):
            fid.write('{0:6.0f}\n'.format(spectra[0][i]))
    
    fid.close()
开发者ID:mantidproject,项目名称:scriptrepository,代码行数:38,代码来源:create_MERInst_Files.py


注:本文中的pylab.frange函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。