当前位置: 首页>>代码示例>>Python>>正文


Python scipy.log2函数代码示例

本文整理汇总了Python中scipy.log2函数的典型用法代码示例。如果您正苦于以下问题:Python log2函数的具体用法?Python log2怎么用?Python log2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了log2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: score

    def score(self):
        self.uniq_docids()

        fst_term = self.query_terms[0]
        term_docs_freq = self.im.get_term_info(fst_term).get_pos_map()
        for docid in self.rank_list:
            tf = 0
            if term_docs_freq.has_key(docid):
                tf = len(term_docs_freq[docid])
            score_this_term = sp.log2(self.parent.score_once(tf, docid))
            self.rank_list[docid] += score_this_term

        previous = 0
        for term_current in self.query_terms[1:]:
            term_previous = self.query_terms[previous]
            previous += 1

            if not (self.im.token_id_map.has_key(term_previous) and self.im.token_id_map.has_key(term_current)):
                continue

            term_previous_docs_freq = self.im.get_term_info(term_previous).get_pos_map()
            term_current_docs_freq = self.im.get_term_info(term_current).get_pos_map()

            for docid in self.rank_list:
                tf_w1 = 0
                distance_score = sp.inf
                if term_previous_docs_freq.has_key(docid):
                    tf_w1 = len(term_previous_docs_freq[docid])
                    distance_score = self.im.get_doc_len_by_id(docid)
                    if term_current_docs_freq.has_key(docid):
                        distance_score = shortest_dis(term_previous_docs_freq[docid], term_current_docs_freq[docid])
                score_this_term = sp.log2(self.score_once(tf_w1, distance_score))
                self.rank_list[docid] += score_this_term
开发者ID:heroxdream,项目名称:information-retrieval,代码行数:33,代码来源:BLMLaplace.py

示例2: score

    def score(self):
        self.uniq_docids()

        fst_term = self.query_terms[0]
        term_docs_freq = term_freq[fst_term]
        parent = LMLaplace('<<empty query>>')
        for docid in self.rank_list:
            tf = 0
            if term_docs_freq.has_key(docid):
                tf = term_docs_freq[docid]
            score_this_term = sp.log2(parent.score_once(tf, docid))
            self.rank_list[docid] += score_this_term

        previous = 0
        for term_current in self.query_terms[1:]:
            term_previous = self.query_terms[previous]
            previous += 1

            if not (term_freq.has_key(term_previous) and term_freq.has_key(term_current)):
                continue

            term_previous_docs_freq = term_freq[term_previous]
            term_current_docs_freq = term_freq[term_current]

            for docid in self.rank_list:
                tf_w1 = 0
                distance_score = sp.inf
                if term_previous_docs_freq.has_key(docid):
                    tf_w1 = term_previous_docs_freq[docid]
                    distance_score = doc_length[docid]
                    if term_current_docs_freq.has_key(docid):
                        distance_score = bigram_distance(docid, term_previous, term_current)
                score_this_term = sp.log2(self.score_once(tf_w1, distance_score))
                self.rank_list[docid] += score_this_term
开发者ID:heroxdream,项目名称:information-retrieval,代码行数:34,代码来源:BLMLaplace.py

示例3: generateNodesAdaptive

    def generateNodesAdaptive(self):
        innerDomainSize = self.innerDomainSize
        innerMeshSize   = self.innerMeshSize
        numberElementsInnerDomain = innerDomainSize/innerMeshSize
	assert(numberElementsInnerDomain < self.numberElements)
        domainCenter = (self.domainStart+self.domainEnd)/2
        nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
        nodes0 = np.delete(nodes0,-1)
        numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
        const = np.log2(innerDomainSize/2.0)/0.5
        exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
        nodes1 = np.power(np.sqrt(2),exp)
        nodesp = np.concatenate((nodes0,nodes1))
        nodesn = -nodesp[::-1]
        nodesn = np.delete(nodesn,-1)
        linNodalCoordinates = np.concatenate((nodesn,nodesp))
        nodalCoordinates = 0

        #Introduce higher order nodes
        if self.elementType == "quadratic" or self.elementType == "cubic":
           if self.elementType == "quadratic":
              numberNodesPerElement = 3 
           elif self.elementType == "cubic":
              numberNodesPerElement = 4

           for i in range(0,len(linNodalCoordinates)-1):
              newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
              nodalCoordinates = np.delete(nodalCoordinates,-1)
              nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))

        else:
           nodalCoordinates = linNodalCoordinates
    
        return nodalCoordinates
开发者ID:mrinaliyer,项目名称:tuckerDFT,代码行数:34,代码来源:FEM.py

示例4: test_shannon

def test_shannon(guys):
    tot = float(len(guys))
    counts = count_digs(guys)

    entropy = sum( -count/tot*sp.log2(count/tot + 10**(-10)) for count in counts )

    return max(1e-10,entropy/sp.log2(10))
开发者ID:thephysicsvirtuosi,项目名称:random-site,代码行数:7,代码来源:tests.py

示例5: set_comparison_plot

 def set_comparison_plot():
     #pl.xlim(xmin = max(0, pl.xlim()[1] -16 ))
     pyplot.xticks(
         symbols - 2 ** scipy.arange(scipy.log2(symbols))[::-1],
         2 ** scipy.arange(scipy.log2(symbols), dtype=int)[::-1])
     pyplot.grid('on')
     plotter.set_slave_info(slavename)
     pyplot.xlabel("Rank Deficiency")
     pyplot.ylabel("Extra Packets")
开发者ID:GOPRO1955,项目名称:kodo,代码行数:9,代码来源:plot_dependency.py

示例6: kl

def kl(p, q):
    """Compute the KL divergence between two discrete probability distributions

    The calculation is done directly using the Kullback-Leibler divergence,
    KL( p || q ) = sum_{x} p(x) log_2( p(x) / q(x) )

    Base 2 logarithm is used, so that returned values is measured in bits.
    """
    
    if (p==0.).sum()+(q==0.).sum() > 0:
        raise Exception, "Zero bins found"
    return (p*(log2(p) - log2(q))).sum()
开发者ID:dawnsong,项目名称:neuro-kl,代码行数:12,代码来源:kl_tools.py

示例7: calculateLevel

	def calculateLevel(self, s, t):
		'''
		Calculate the appropriate mipmap level for texture filtering over a
		quadrilateral given by the texture-space vertices
		[s[1], t[1]], ... , [s[4], t[4]].

		There are many ways to do this; the instance variable levelCalcMethod
		selects the desired one.  The most correct way is to choose the
		minSideLen method, as long as the quadrilateral is vaguely
		rectangular-shaped.  This only works if you're happy to use lots of
		samples however, otherwise you get aliasing.
		'''
		s = s.copy()*self.levels[0].image.shape[0]
		t = t.copy()*self.levels[0].image.shape[1]
		if self.levelCalcMethod == 'minSideLen':
			# Get mipmap level with minimum feature size equal to the shortest
			# quadrilateral side
			s1 = pylab.concatenate((s, s[0:1]))
			t1 = pylab.concatenate((t, t[0:1]))
			minSideLen2 = (numpy.diff(s1)**2 + numpy.diff(t1)**2).min()
			level = log2(minSideLen2)/2
		elif self.levelCalcMethod == 'minQuadWidth':
			# Get mipmap level with minimum feature size equal to the width of
			# the quadrilateral.  This one is kinda tricky.
			# v1,v2 = vectors along edges
			v1 = array([0.5*(s[1]-s[0] + s[2]-s[3]), 0.5*(t[1]-t[0] + t[2]-t[3]),])
			v2 = array([0.5*(s[3]-s[0] + s[2]-s[1]), 0.5*(t[3]-t[0] + t[2]-t[1]),])
			v1Sq = dot(v1,v1)
			v2Sq = dot(v2,v2)
			level = 0.5*log2(min(v1Sq,v2Sq) * (1 - dot(v1,v2)**2/(v1Sq*v2Sq)))
		elif self.levelCalcMethod == 'minDiag':
			# Get mipmap level with minimum feature size equal to the minimum
			# distance between the centre of the quad and the vertices.  Sort
			# of a "quad radius"
			#
			# This is more-or-less the algorithm used in Pixie...
			minDiag2 = ((s - s.mean())**2 + (t - t.mean())**2).min()
			level = log2(minDiag2)/2
		#elif self.levelCalcMethod == 'sqrtArea':
			# Get mipmap level with minimum feature size estimated as the
			# square root of the area of the box.
		elif self.levelCalcMethod == 'trilinear':
			# Get mipmap level which will result in no aliasing when plain
			# trilinear filtering is used (no integration)
			maxDiag2 = ((s - s.mean())**2 + (t - t.mean())**2).max()
			level = log2(maxDiag2)/2
		elif self.levelCalcMethod == 'level0':
			# Else just use level 0.  Correct texture filtering will take care
			# of any aliasing...
			level = 0
		else:
			raise "Invalid mipmap level calculation type: %s" % self.levelCalcMethod
		return max(level,0)
开发者ID:UIKit0,项目名称:aqsis,代码行数:53,代码来源:texfilt.py

示例8: plot_cwt

def plot_cwt(t):
    s1 = plt.subplot(221)
    t.plot()
    s2 = plt.subplot(222)
    spec = time_avg(t)
    plt.plot(spec, sp.log2(t.period))
    plt.ylim(sp.log2(t.period).max(), sp.log2(t.period).min())
    nscales = len(t.scales)
    yt = sp.arange(nscales, step=int(1 / self.dscale))
    plt.yticks(yt, t.scales[yt])
    plt.ylim(nscales - 1, 0)
    s1.set_position((0.1, 0.1, 0.65, 0.8))
    s2.set_position((0.8, 0.1, 0.15, 0.8))
开发者ID:ElOceanografo,项目名称:PyCWT,代码行数:13,代码来源:pycwt.py

示例9: get_independent

def get_independent(MyList, sorting = False, sigma = 0.387):
    """
    Calculate the tuple of (L,k) or (L,k,W)
    using a List written in short form, as follows:
    [\
    [L,[k1,k2,k3,...],[W1,W2]]\ Case 1    
    [L,[k1,k2,k3,...],(Wmin,Wmax)]\ Case 2
    ...\
    ]
    The Windows W of Case 2 are calculated as powers of 2,
    from Wmin to Wmax included
    Output:
    independentNames (as "L,k", or "L,k,W")
    independentValues
    """
    out = {}
    numIndependentNames = len(MyList[0])
    independentNames = "L, k"
    if numIndependentNames == 3:
        independentNames = independentNames + ", W"
    #
    for line in MyList:
        if numIndependentNames == 2:
            L, ks = line
        elif numIndependentNames == 3:
            L, ks, Ws = line
            if isinstance(Ws,int):
                Ws = [Ws]
            elif isinstance(Ws,tuple):
                lower_e, upper_e = scipy.log2(Ws[0]), scipy.log2(Ws[1])
                e2 = scipy.array(range(lower_e, upper_e+1))
                Ws = 2**e2
        if not isinstance(ks,list):
            ks = [ks]
        
        for k in ks:
            if numIndependentNames == 2:
                wincorr = 1.0*k/L
                out[wincorr] = L, k
            elif numIndependentNames == 3:
                for W in Ws:
                    wincorr = 1.0*W*(1.0*k/L)**sigma
                    out[wincorr] = L, k, W

    if sorting:
        return independentNames, map(out.get,sorted(out))
    else:
        return independentNames, out.values()
开发者ID:gdurin,项目名称:SloppyScaling,代码行数:48,代码来源:Utils.py

示例10: plotHeatmap

def plotHeatmap(fwrap, aclass, algoparams, trials, maxsteps):
    """ Visualizing performance across trials and across time 
    (iterations in powers of 2) """
    psteps = int(log2(maxsteps)) + 1
    storesteps = [0] + [2 ** x  for x in range(psteps)]
    ls = lossTraces(fwrap, aclass, dim=trials, maxsteps=maxsteps,
                    storesteps=storesteps, algoparams=algoparams,
                    minLoss=1e-10)
            
    initv = mean(ls[0])
    maxgain = exp(fwrap.stochfun.maxLogGain(maxsteps) + 1)
    maxneggain = (sqrt(maxgain))
    
    M = zeros((psteps, trials))
    for sid in range(psteps):
        # skip the initial values
        winfactors = clip(initv / ls[sid+1], 1. / maxneggain, maxgain)
        winfactors[isnan(winfactors)] = 1. / maxneggain
        M[sid, :] = log10(sorted(winfactors))
        
    pylab.imshow(M.T, interpolation='nearest', cmap=cm.RdBu, #@UndefinedVariable
                 aspect=psteps / float(trials) / 1,  
                 vmin= -log10(maxgain), vmax=log10(maxgain),
                 )   
    pylab.xticks([])
    pylab.yticks([])
    return ls
开发者ID:Andres-Hernandez,项目名称:py-optim,代码行数:27,代码来源:plotting.py

示例11: _get_freq_stuff

def _get_freq_stuff(x, params, timeDim=2, verbose=None):
    '''
    internal function, not really meant to be called/viewed by the end user
    (unless end user is curious).

    computes nfft based on x.shape.
    '''
    badNfft = False
    if 'nfft' in params:
        if params['nfft'] < x.shape[timeDim]:
            badNfft = True
            logger.warn(
                'nfft should be >= than number of time points. Reverting' +
                'to default setting of nfft = 2**ceil(log2(nTimePts))\n')

    if 'nfft' not in params or badNfft:
        nfft = int(2.0 ** ceil(sci.log2(x.shape[timeDim])))
    else:
        nfft = int(params['nfft'])

    f = (np.arange(0.0, nfft, 1.0) * params['Fs'] / nfft)
    fInd = ((f >= params['fpass'][0]) & (f <= params['fpass'][1]))

    f = f[fInd]

    return (nfft, f, fInd)
开发者ID:SusanMcL,项目名称:ANLffr,代码行数:26,代码来源:spectral.py

示例12: integrator_solve

def integrator_solve(df):
    cum_vec = np.array(np.cumsum(df['ct']))
    binheaders = utils.get_column_headers(df)
    n_bins = 1000
    n_batches = len(binheaders)
    f_binned = sp.zeros((n_batches,n_bins))
    bins = np.linspace(cum_vec[-1]/1000-1,cum_vec[-1]-1,1000,dtype=int)
    for i in range(n_bins):
         for j in range(n_batches):
             batch_name = binheaders[j]
             f_binned[j,i] = scipy.integrate.quad(integrand_1,bins[i],bins[i+1])[0]
    f_reg = scipy.ndimage.gaussian_filter1d(f_binned,0.04*n_bins,axis=0)
    f_reg = f_reg/f_reg.sum()

    # compute marginal probabilities
    p_b = sp.sum(f_reg,axis=1)
    p_s = sp.sum(f_reg,axis=0)

    # finally sum to compute the MI
    MI = 0
    for j in range(n_batches):
        for i in range(n_bins):
            if f_reg[i,j] != 0:
                MI = MI + f_reg[i,j]*sp.log2(f_reg[i,j]/(p_b[i]*p_s[j]))
    return MI
开发者ID:jbkinney,项目名称:mpathic,代码行数:25,代码来源:EstimateMutualInfoforMImax.py

示例13: hz2midi

def hz2midi(hz):
    """
    midi = hz2midi(hz)

    Converts frequency in Hertz to midi notation.
    """
    return 12*scipy.log2(hz/440.0) + 69
开发者ID:zangsir,项目名称:pymir,代码行数:7,代码来源:mir.py

示例14: negativity

def negativity(rho, subsys, method='tracenorm', logarithmic=False):
    """
    Compute the negativity for a multipartite quantum system described
    by the density matrix rho. The subsys argument is an index that
    indicates which system to compute the negativity for.

    .. note::

        Experimental.
    """
    mask = [idx == subsys for idx, n in enumerate(rho.dims[0])]
    rho_pt = partial_transpose(rho, mask)

    if method == 'tracenorm':
        N = ((rho_pt.dag() * rho_pt).sqrtm().tr().real - 1)/2.0
    elif method == 'eigenvalues':
        l = rho_pt.eigenenergies()
        N = ((abs(l)-l)/2).sum()
    else:
        raise ValueError("Unknown method %s" % method)

    if logarithmic:
        return log2(2 * N + 1)
    else:
        return N
开发者ID:JonathanUlm,项目名称:qutip,代码行数:25,代码来源:entropy.py

示例15: main_loop

def main_loop(init_param, X, K, iter=1000, tol=1e-6):
    """
    Gaussian Mixture Model
    Arguments:
    - `X`: Input data (2D array, [[x11, x12, ..., x1D], ..., [xN1, ... xND]]).
    - `K`: Number of clusters.
    - `iter`: Number of iterations to run.
    - `tol`: Tolerance.
    """
    X = sp.asarray(X)
    N, D = X.shape
    pi = sp.asarray(init_param["coff"])
    mu = sp.asarray(init_param["mean"])
    sigma = sp.asarray(init_param["cov"])

    L = sp.inf

    for i in xrange(iter):
        # E-step
        gamma = sp.apply_along_axis(
            lambda x: sp.fromiter(
                (pi[k] * gauss_mixture_calculate(x, mu[k], sigma[k]) for k in xrange(K)), dtype=float
            ),
            1,
            X,
        )
        gamma /= sp.sum(gamma, 1)[:, sp.newaxis]

        # M-step
        Nk = sp.sum(gamma, 0)
        mu = sp.sum(X * gamma.T[..., sp.newaxis], 1) / Nk[..., sp.newaxis]
        xmu = X[:, sp.newaxis, :] - mu
        sigma = (
            sp.sum(gamma[..., sp.newaxis, sp.newaxis] * xmu[:, :, sp.newaxis, :] * xmu[:, :, :, sp.newaxis], 0)
            / Nk[..., sp.newaxis, sp.newaxis]
        )
        pi = Nk / N

        # Likelihood
        Lnew = sp.sum(
            sp.log2(
                sp.sum(
                    sp.apply_along_axis(
                        lambda x: sp.fromiter(
                            (pi[k] * gauss_mixture_calculate(x, mu[k], sigma[k]) for k in xrange(K)), dtype=float
                        ),
                        1,
                        X,
                    ),
                    1,
                )
            )
        )
        if abs(L - Lnew) < tol:
            break
        L = Lnew
        print "log likelihood=%s" % L

    return dict(pi=pi, mu=mu, sigma=sigma, gamma=gamma)
开发者ID:huydx,项目名称:datamining-collection,代码行数:59,代码来源:em-algorithm.py


注:本文中的scipy.log2函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。