当前位置: 首页>>代码示例>>Python>>正文


Python scipy.sort函数代码示例

本文整理汇总了Python中scipy.sort函数的典型用法代码示例。如果您正苦于以下问题:Python sort函数的具体用法?Python sort怎么用?Python sort使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sort函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dataSubset

def dataSubset(fittingData,numDatapoints,seed=345,maxNumIndepParams=None):
    """
    By default, add one timepoint for each independent parameter first,
    then increase the number of timepoints per independent parameter.
    Timepoints are added randomly for each independent parameter.
    Independent parameters are added in the order of indepParamsList.
    """
    scipy.random.seed(seed)
    subset = []
    numIndepParams = len(fittingData)
    if maxNumIndepParams is None: maxNumIndepParams = numIndepParams
    numDatapoints = int(numDatapoints)
    for i in range(min(numDatapoints,maxNumIndepParams)):
        varNames = scipy.sort( fittingData[i].keys() )
        allTimes = scipy.sort( fittingData[i][varNames[0]].keys() )
        
        possibleIndices = range(len(allTimes))
        scipy.random.shuffle(possibleIndices)
        
        N = numDatapoints/maxNumIndepParams
        if i < numDatapoints%maxNumIndepParams: N += 1
        timeIndices = possibleIndices[:N]
        times = scipy.array(allTimes)[timeIndices]

        s = {}
        for var in varNames:
            s[var] = dict([(t,fittingData[i][var][t]) for t in times])
        subset.append(s)

    return subset
开发者ID:EmoryUniversityTheoreticalBiophysics,项目名称:SirIsaac,代码行数:30,代码来源:fitAllParallel.py

示例2: check

    def check( x ):
        y = sl.canonicalise( x )
        yr = y[0,:]
        yc = y[:,0]

        assert all( yr == sc.sort( yr ) )
        assert all( yc == sc.sort( yc ) )
开发者ID:arunchaganty,项目名称:spectral,代码行数:7,代码来源:tests.py

示例3: quantify_intron_retention

def quantify_intron_retention(event, gene, counts_segments, counts_edges, counts_seg_pos):

    cov = sp.zeros((2, ), dtype='float')
    sg = gene.splicegraph
    segs = gene.segmentgraph

    seg_lens = segs.segments[1, :] - segs.segments[0, :]
    seg_shape = segs.seg_edges.shape
    order = 'C'
    offset = 0

    ### find exons corresponding to event
    idx_exon1  = sp.where((sg.vertices[0, :] == event.exons1[0, 0]) & (sg.vertices[1, :] == event.exons1[0, 1]))[0]
    idx_exon2  = sp.where((sg.vertices[0, :] == event.exons1[1, 0]) & (sg.vertices[1, :] == event.exons1[1, 1]))[0]

    ### find segments corresponding to exons
    seg_exon1 = sp.sort(sp.where(segs.seg_match[idx_exon1, :])[1])
    seg_exon2 = sp.sort(sp.where(segs.seg_match[idx_exon2, :])[1])
    seg_all = sp.arange(seg_exon1[0], seg_exon2[-1])

    seg_intron = sp.setdiff1d(seg_all, seg_exon1)
    seg_intron = sp.setdiff1d(seg_intron, seg_exon2)
    assert(seg_intron.shape[0] > 0)

    ### compute exon coverages as mean of position wise coverage
    # intron_cov
    cov[0] = sp.sum(counts_segments[seg_intron] * seg_lens[seg_intron]) / sp.sum(seg_lens[seg_intron])

    ### check intron confirmation as sum of valid intron scores
    ### intron score is the number of reads confirming this intron
    # intron conf
    idx = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon1[-1], seg_exon2[0]], seg_shape, order=order) + offset)[0]
    cov[1] = counts_edges[idx, 1]

    return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:35,代码来源:quantify.py

示例4: _remdup

def _remdup(a,amax=None):
    """Remove duplicates from vector a
    """
    scipy.sort(a)
    flag = 0
    for x in range(1,len(a)):
        if (a[x-1]+1) - (a[x]+1) == 0:
            flag = 1
    return flag
开发者ID:myw,项目名称:dataiap,代码行数:9,代码来源:genetic.py

示例5: QQPlot

def QQPlot(arguments,pv,unique_pv,fname):
    font_size = 18
    mpl.rcParams['font.family']="sans-serif"
    mpl.rcParams['font.sans-serif']="Arial"
    mpl.rcParams['font.size']=font_size
    #mpl.rcParams['figure.dpi'] = 300
    mpl.rcParams['font.weight']='medium'
    mpl.rcParams['figure.facecolor'] = 'white'
    mpl.rcParams['lines.linewidth'] = 1
    mpl.rcParams['axes.facecolor'] = 'white'
    mpl.rcParams['patch.edgecolor'] = 'white'
    mpl.rcParams['grid.linestyle'] = '-'
    mpl.rcParams['grid.color'] = 'LightGray'
    if arguments.ignore!=None:
        if arguments.ignore in fname:
            return 
    
    if arguments.distinct:
        pv = unique_pv

    pl.figure(figsize=(5,5))
    pv_uni = (sp.arange(1.0/float(pv.shape[0]),1,1.0/float(pv.shape[0]+1)))
    pl.plot(-sp.log10(pv_uni),-sp.log10(sp.sort(pv_uni)),'b--')
    pl.ylim(0,(-sp.log10(pv[:])).max()+1)
    pl.plot(-sp.log10(pv_uni),-sp.log10(sp.sort(pv[:],axis=0)),'.',color="#F68E55",markersize=12,markeredgewidth=0,alpha=1)
    #plot theoretical expectations
    if arguments.estpv:
        datapoints=10**(sp.arange(sp.log10(0.5),sp.log10(pv.shape[0]-0.5)+0.1,0.1))
        beta_alpha=sp.zeros(datapoints.shape[0])
        beta_nalpha=sp.zeros(datapoints.shape[0])
        beta_tmp=sp.zeros(datapoints.shape[0])
        for n in xrange(datapoints.shape[0]):
            m=datapoints[n]
            beta_tmp[n]=stats.beta.ppf(0.5,m,pv.shape[0]-m)
            beta_alpha[n]=stats.beta.ppf(0.05,m,pv.shape[0]-m)
            beta_nalpha[n]=stats.beta.ppf(1-0.05,m,pv.shape[0]-m)
        estimated_pvals=datapoints/pv.shape[0]
        lower_bound = -sp.log10(estimated_pvals-(beta_tmp-beta_alpha))
        upper_bound = -sp.log10(estimated_pvals+(beta_nalpha-beta_tmp))
        pl.fill_between(-sp.log10(estimated_pvals),lower_bound,upper_bound,color='#00BFF3',alpha=0.4,linewidth=0)
    if arguments.title:
        pl.title("Phenotype: %s"%(fname))
    pl.xlabel('Expected $-log10(p-value)$')
    pl.ylabel('Observed $-log10(p-value)$')
    if arguments.gc:
        gc = sp.median(stats.chi2.isf(pv,1))/0.456
        pl.text(4,1,"$\hat \lambda=%.2f$"%(gc))
    remove_border()
    pl.subplots_adjust(left=0.14,bottom=0.13,right=0.97,top=0.95,wspace=0.45)
    pl.savefig(os.path.join(arguments.out,'qqplot_' + fname + '.' + arguments.iformat) )
    pl.close()
开发者ID:dominikgrimm,项目名称:easyGWASCore,代码行数:51,代码来源:plotting.py

示例6: get_coords

 def get_coords(self, trafo=False):
     
     if self.event_type != 'mult_exon_skip':
         if trafo:
             #return sp.sort(sp.unique(sp.c_[self.exons1_col.ravel(), self.exons2_col.ravel()]))
             return sp.sort(sp.r_[self.exons1_col.ravel(), self.exons2_col.ravel()])
         else:
             #return sp.sort(sp.unique(sp.c_[self.exons1.ravel(), self.exons2.ravel()]))
             return sp.sort(sp.r_[self.exons1.ravel(), self.exons2.ravel()])
     else:
         if trafo:
             return sp.sort(sp.r_[self.exons1_col.ravel()[:4], self.exons2_col.ravel()[-4:]])
         else:
             return sp.sort(sp.r_[self.exons1.ravel()[:4], self.exons2.ravel()[-4:]])
开发者ID:jiahsinhuang,项目名称:spladder,代码行数:14,代码来源:event.py

示例7: quantify_mult_exon_skip

def quantify_mult_exon_skip(event, gene, counts_segments, counts_edges):

    cov = sp.zeros((2, ), dtype='float')

    sg = gene.splicegraph
    segs = gene.segmentgraph

    seg_lens = segs.segments[1, :] - segs.segments[0, :]
    seg_shape = segs.seg_edges.shape[0]
    order = 'C'
    offset = 0

    ### find exons corresponding to event
    idx_exon_pre  = sp.where((sg.vertices[0, :] == event.exons2[0, 0]) & (sg.vertices[1, :] == event.exons2[0, 1]))[0]
    idx_exon_aft  = sp.where((sg.vertices[0, :] == event.exons2[-1, 0]) & (sg.vertices[1, :] == event.exons2[-1, 1]))[0]
    seg_exons = []
    for i in range(1, event.exons2.shape[0] - 1):
        tmp = sp.where((sg.vertices[0, :] == event.exons2[i, 0]) & (sg.vertices[1, :] == event.exons2[i, 1]))[0]
        seg_exons.append(sp.where(segs.seg_match[tmp, :])[1])
    
    ### find segments corresponding to exons
    seg_exon_pre = sp.sort(sp.where(segs.seg_match[idx_exon_pre, :])[1])
    seg_exon_aft = sp.sort(sp.where(segs.seg_match[idx_exon_aft, :])[1])

    seg_exons_u = sp.sort(sp.unique([x for sublist in seg_exons for x in sublist]))

    ### inner exons_cov
    cov[0] = sp.sum(counts_segments[seg_exons_u] * seg_lens[seg_exons_u]) / sp.sum(seg_lens[seg_exons_u])

    ### check intron confirmation as sum of valid intron scores
    ### intron score is the number of reads confirming this intron
    # exon_pre_exon_conf
    idx1 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exons[0][0]], seg_shape, order=order) + offset)[0]
    if len(idx1.shape) > 0 and idx1.shape[0] > 0:
        cov[0] += counts_edges[idx1[0], 1]
    # exon_exon_aft_conf
    idx2 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exons[-1][-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
    if len(idx2.shape) > 0 and idx2.shape[0] > 0:
        cov[0] += counts_edges[idx2[0], 1]
    # exon_pre_exon_aft_conf
    idx3 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
    if len(idx3.shape) > 0 and idx3.shape[0] > 0:
        cov[1] = counts_edges[idx3[0], 1]
    for i in range(len(seg_exons) - 1):
        # sum_inner_exon_conf
        idx4 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exons[i][-1], seg_exons[i+1][0]], seg_shape, order=order) + offset)[0]
        if len(idx4.shape) > 0 and idx4.shape[0] > 0:
            cov[0] += counts_edges[idx4[0], 1]

    return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:50,代码来源:quantify.py

示例8: NGorN50

def NGorN50(file_path='contigs.txt', genomesize=None):
    contigs, num_contig = file_parser(file_path)
    print( "Total number of contigs: %d " %(num_contig) ) # Expect 20

    # Sort the contigs in reverse order in an array e.g. 
    # array([79, 23, 10])
    contigs = scipy.sort(contigs)[::-1]
    #print(contigs)

    # Calculate sum to compare against for N50s or NG50
    if genomesize == None:
        contig_sum = contigs.sum()/2
        print( "50 Contig Sum is: %d" % (contig_sum) )
    else:
        contig_sum = int(genomesize)/2
        print ("50 Genome Size specified: %d" %(contig_sum))

    for counter in range(1, num_contig+1):
        # TODO: Consider memoizing this if you need to reuse this script for large contigs for performance gains.

        # Check the accumulated sum against the comparison
        if contigs[0:counter].sum() > contig_sum:
            print( "Partial Contig Sum is: %d, with counter: %d, and contig length %d" 
                % (contigs[0:counter].sum(), counter, contigs[counter-1]) )
            # Only need to find the first case
            break
开发者ID:djphan,项目名称:N50-Calculator,代码行数:26,代码来源:N50_Calculator.py

示例9: generateNoteLength

  def generateNoteLength(self):
    length = (60. / self.wavetempo) * self.time_freq_fs
    note_length = sp.array([2**i for i in range(5)]) / 4.
    note_length *= length
    note_huten = sp.array(
        [note_length[i-1]+note_length[i] for i in range(1, 5)])
    note_length = sp.r_[note_length, note_huten]
    note_length = sp.sort(note_length)

    note_length_pair = []
    for i in range(note_length.size):
      try:
        upper = (note_length[i+1] - note_length[i])/2
        upper += note_length[i]
      except IndexError:
        upper = note_length[i] * 2
      try:
        lower = note_length_pair[-1][1]
      except IndexError:
        lower = 0
      note_length_pair.append((lower, upper))
        
    if(self.output_form == 'MML'):
      note_name = ['16', '16.', '8', '8.', '4', '4.', '2', '2.', '1']
    elif(self.output_form == 'PMX'):
      note_name = ['1', '1d', '8', '8d', '4', '4d', '2', '2d', '0']
    return (note_name, note_length_pair)
开发者ID:mackee,项目名称:utakata,代码行数:27,代码来源:utakata_time_freq.py

示例10: bulk_bands_calculator

 def bulk_bands_calculator(self,s,sub,kx,ky,kz):
   ''' Calculate the band energies for the specified kx, ky, and kz values.
       The 3x3 Hamiltonian for wurtzite crystals is used for the valence,
       while a 1x1 Hamiltonian is used for the conduction band. The model is
       from the chapter by Vurgaftman and Meyer in the book by Piprek. 
   '''
   E = scipy.zeros((4,len(s.Eg0)))   
   E[0,:] = s.Eg0+s.delcr+s.delso/3+\
               hbar**2/(2*s.mepara)*(kx**2+ky**2)+\
               hbar**2/(2*s.meperp)*(kz**2)+\
               (s.a1+s.D1)*s.epszz+(s.a2+s.D2)*(s.epsxx+s.epsyy)
   L = hbar**2/(2*m0)*(s.A1*kz**2+s.A2*(kx+ky)**2)+\
       s.D1*s.epszz+s.D2*(s.epsxx+s.epsyy)
   T = hbar**2/(2*m0)*(s.A3*kz**2+s.A4*(kx+ky)**2)+\
       s.D3*s.epszz+s.D4*(s.epsxx+s.epsyy)
   F = s.delcr+s.delso/3+L+T
   G = s.delcr-s.delso/3+L+T
   K = hbar**2/(2*m0)*s.A5*(kx+1j*ky)**2+s.D5*(s.epsxx-s.epsyy)
   H = hbar**2/(2*m0)*s.A6*(kx+1j*ky)*kz+s.D6*(s.epsxz)
   d = scipy.sqrt(2)*s.delso/3
   for ii in range(len(s.Eg0)):
     mat = scipy.matrix([[    F[ii],     K[ii],       -1j*H[ii]      ],
                         [    K[ii],     G[ii],       -1j*H[ii]+d[ii]],
                         [-1j*H[ii], -1j*H[ii]+d[ii],     L[ii]      ]])
     w,v = scipy.linalg.eig(mat)
     E[1:,ii] = scipy.flipud(scipy.sort(scipy.real(w)))
   return E
开发者ID:puluoning,项目名称:ledsim,代码行数:27,代码来源:material.py

示例11: termFrequencyMatrix

def termFrequencyMatrix(directory,stopwords,termlist):
	""" The student must code this. """
	filenames = sp.sort(os.listdir(directory))
	frequencyMatrix = sp.zeros((len(termlist),len(filenames)))
	for i in xrange(len(filenames)):
		frequencyMatrix[:,i] = termVector(directory + filenames[i],stopwords,termlist)
	return frequencyMatrix.astype(float)
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:7,代码来源:LSI.py

示例12: traj_ensemble_quantiles

def traj_ensemble_quantiles(traj_set, quantiles=(0.025, 0.5, 0.975)):
    """
    Return a list of trajectories, each one corresponding the a given passed-in
    quantile.
    """
    all_values = scipy.array([traj.values for traj in traj_set])
    sorted_values = scipy.sort(all_values, 0)
                   
    q_trajs = []
    for q in quantiles:
        # Calculate the index corresponding to this quantile. The q is because
        #  Python arrays are 0 indexed
        index = q * (len(sorted_values) - 1)
        below = int(scipy.floor(index))
        above = int(scipy.ceil(index))
        if above == below:
            q_values = sorted_values[below]
        else:
            # Linearly interpolate...
            q_below = (1.0*below)/(len(sorted_values)-1)
            q_above = (1.0*above)/(len(sorted_values)-1)
            q_values = sorted_values[below] + (q - q_below)*(sorted_values[above] - sorted_values[below])/(q_above - q_below)
        q_traj = copy.deepcopy(traj_set[0])
        q_traj.values = q_values
        q_trajs.append(q_traj)

    return q_trajs
开发者ID:Colbert-Sesanker,项目名称:Networks,代码行数:27,代码来源:Ensembles.py

示例13: draw_graph

def draw_graph(graph):

    # create networkx graph
    G=nx.Graph()
    
    ordered_node_list = scipy.sort([int(i[1::]) for i in graph])

    # add nodes
    #for node in graph:
    #    G.add_node(node)
    for num in ordered_node_list:
        G.add_node('n'+str(num))
    

    # add edges
    for i in graph:
        for j in graph[i][1::]:
            G.add_edge(i,j)
            
    colors = ['b','r','g','c','w','k']
    
    node_color = [colors[graph[node][0]] for node in graph]

    # draw graph
    #pos = nx.shell_layout(G)
    pos = nx.spring_layout(G,iterations=100)
    nx.draw(G, pos, node_color = node_color)

    # show graph
    plt.axis('off')
    plt.show()
开发者ID:atombear,项目名称:kami_solver,代码行数:31,代码来源:KamiSolve.py

示例14: subsetsWithFits

def subsetsWithFits(fileNumString,onlyNew=False):
    """
    Find data subsets (N) that have models that have been fit to
    all conditions.
    
    onlyNew (False)         : Optionally include only subsets that have
                              fits that are not included in the current
                              combined fitProbs.
    """
    fpd = loadFitProbData(fileNumString)
    saveFilename = fpd.values()[0]['saveFilename']
    
    Nlist = []
    for N in scipy.sort(fpd.keys()):
        # find models that have been fit to all conditions
        if len(fpd[N]['fitProbDataList']) == 1:
            fitModels = fpd[N]['fitProbDataList'][0]['logLikelihoodDict'].keys()
        else:
            fitModels = scipy.intersect1d([ fp['logLikelihoodDict'].keys() \
                                            for fp in fpd[N]['fittingProblemList'] ])
        if onlyNew:
            Nfilename = directoryPrefixNonly(fileNumString,N)+'/'+saveFilename
            fileExists = os.path.exists(Nfilename)
            if not fileExists: # no combined file exists
                if len(fitModels) > 0:
                    Nlist.append(N)
            else: # check which fit models are currently included in the saved file
                fpMultiple = load(Nfilename)
                fitModelsSaved = fpMultiple.logLikelihoodDict.keys()
                if len(scipy.intersect1d(fitModels,fitModelsSaved)) < len(fitModels):
                    Nlist.append(N)
        else:
            if len(fitModels) > 0:
                Nlist.append(N)
    return Nlist
开发者ID:EmoryUniversityTheoreticalBiophysics,项目名称:SirIsaac,代码行数:35,代码来源:fitAllParallel.py

示例15: quantify_mutex_exons

def quantify_mutex_exons(event, gene, counts_segments, counts_edges):

    sg = gene.splicegraph
    segs = gene.segmentgraph

    seg_lens = segs.segments[1, :] - segs.segments[0, :]
    seg_shape = segs.seg_edges.shape[0]
    order = 'C'
    offset = 0

    ### find exons corresponding to event
    idx_exon_pre  = sp.where((sg.vertices[0, :] == event.exons1[0, 0]) & (sg.vertices[1, :] == event.exons1[0, 1]))[0]
    idx_exon_aft  = sp.where((sg.vertices[0, :] == event.exons1[-1, 0]) & (sg.vertices[1, :] == event.exons1[-1, 1]))[0]
    idx_exon1  = sp.where((sg.vertices[0, :] == event.exons1[1, 0]) & (sg.vertices[1, :] == event.exons1[1, 1]))[0]
    idx_exon2  = sp.where((sg.vertices[0, :] == event.exons2[1, 0]) & (sg.vertices[1, :] == event.exons2[1, 1]))[0]
    
    ### find segments corresponding to exons
    seg_exon_pre = sp.sort(sp.where(segs.seg_match[idx_exon_pre, :])[1])
    seg_exon_aft = sp.sort(sp.where(segs.seg_match[idx_exon_aft, :])[1])
    seg_exon1 = sp.sort(sp.where(segs.seg_match[idx_exon1, :])[1])
    seg_exon2 = sp.sort(sp.where(segs.seg_match[idx_exon2, :])[1])

    # exon1 cov
    cov[0] = sp.sum(counts_segments[seg_exon1] * seg_lens[seg_exon1]) / sp.sum(seg_lens[seg_exon1])
    # exon2 cov
    cov[1] = sp.sum(counts_segments[seg_exon2] * seg_lens[seg_exon2]) / sp.sum(seg_lens[seg_exon2])

    ### check intron confirmation as sum of valid intron scores
    ### intron score is the number of reads confirming this intron
    # exon_pre_exon1_conf
    idx1 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon1[0]], seg_shape, order=order) + offset)[0]
    if len(idx1.shape) > 0 and idx1.shape[0] > 0:
        cov[0] += counts_edges[idx1[0], 1]
    # exon_pre_exon2_conf
    idx2 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon2[0]], seg_shape, order=order) + offset)[0]
    if len(idx2.shape) > 0 and idx2.shape[0] > 0:
        cov[1] += counts_edges[idx2[0], 1]
    # exon1_exon_aft_conf
    idx3 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon1[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
    if len(idx3.shape) > 0 and idx3.shape[0] > 0:
        cov[0] += counts_edges[idx3[0], 1]
    # exon2_exon_aft_conf
    idx4 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon2[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
    if len(idx4.shape) > 0 and idx4.shape[0] > 0:
        cov[1] += counts_edges[idx4[0], 1]

    return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:47,代码来源:quantify.py


注:本文中的scipy.sort函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。