当前位置: 首页>>代码示例>>Python>>正文


Python mpmath.log函数代码示例

本文整理汇总了Python中mpmath.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_apparent_activation_energy

    def get_apparent_activation_energy(self,rxn_parameters,epsilon=1e-10):
        """
        returns apparent Arrhenius activation energies (in units of R)
        for production/consumption of each gas phase species.
        Calculated as
        E_app = T^2(dlnr_+/dT)=(T^2/r_+)(dr_+/dT), where r+ is the TOF
        :param rxn_parameters: reaction paramenters, see solver-base
        :param epsilon: degree of pertubation in temperature
        :type epsilon: float, optional
        """
        current_tofs = self.get_turnover_frequency(rxn_parameters)
        current_T = self.temperature
        new_T = current_T*(1+epsilon)
        dT = new_T-current_T
        self.temperature = new_T
        descriptors = list(self._rxm.mapper._descriptors) #don't overwrite them, if temperature is a descriptor
        if 'temperature' in self._rxm.descriptor_names:
                index = self._rxm.descriptor_names.index('temperature')
                descriptors[index] = new_T
        rxn_parameters_newT = self._rxm.scaler.get_rxn_parameters(descriptors)
        new_tofs = self.get_turnover_frequency(rxn_parameters_newT)
        E_apps = []
        R = 8.31447e-3/96.485307#units of eV

        for i,gas in enumerate(self.gas_names):
            barriers_i = []
            dlnTOF = mp.log(new_tofs[i])-mp.log(current_tofs[i]) #this will fail if any of the TOFs are 0.
            E_app = R*float(dlnTOF.real)/dT*(current_T**2)
            E_apps.append(E_app)

        self.temperature = current_T
        self._apparent_activation_energy = E_apps
        #self.get_turnover_frequency(rxn_parameters)
        print E_apps
        return E_apps
开发者ID:ajmedford,项目名称:catmap,代码行数:35,代码来源:mean_field_solver.py

示例2: dpint

def dpint(f,snr):
	'''Integrand of the detection probability of single sources. Since it contains a modified Bessel function, which gets very big values, it has to be defined in a special way.'''
	big=mpmath.log(mpmath.besseli(1,snr*np.sqrt(2.*f)))
	small=mpmath.mpf(-f-0.5*snr**2.)
	normal=mpmath.log(np.sqrt(2.*f)*1./snr)
	result=mpmath.exp(mpmath.fsum([big,small,normal]))
	return float(result) #In the end the result should be between 0 and some sizeable number, so a float should be enough.
开发者ID:pabloarosado,项目名称:horizon_python,代码行数:7,代码来源:PLOT_threshold_more2.py

示例3: findGaussianChangePoint

def findGaussianChangePoint( data ):
	
	# the denominator. This is the easy part.
	N = len( data )

	if N<6 : return None # can't find a cp in data this small

	# set up gamma function table
	#for i in range(N):
		

	s2 = mpf(data.var())
	gpart = gamma( mpf(N)/2.0 - 1 )
	denom = (pi**1.5) * mpf((N*s2))**( -N/2.0 + 0.5 ) * gpart

	# the numerator. A little trickier.
	# calc_twostate_weights() already deals with ts<3 and ts>N-2.
	weights=calc_twostate_weights( data )
	if weights is None: return None

	num = 2.0**2.5 * abs(data.mean()) * weights.mean()

	logodds = log( num ) - log( denom ) 	

	print "num:", num, "log num:", log(num), "| denom:", denom, "log denom:", log(denom), "|| log odds:", logodds 
	
	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : 
		return None
	
	return ( weights.argmax(), logodds ) 
开发者ID:ChayaSt,项目名称:cpdetect,代码行数:31,代码来源:old2cpDetect.py

示例4: mobility

 def mobility(self, z=1000, E=0, T=300, pn=None):
     if pn is None:
         Eg = self.band_gap(T, symbolic=False, electron_volts=False)
         # print Eg, self.__to_numeric(-Eg/(k*T)), mp.exp(self.__to_numeric(-Eg/(k*T)))
         pn = self.Nc(T, symbolic=False) * self.Nv(T, symbolic=False) * mp.exp(
             self.__to_numeric(-Eg / (k * T))) * 1e-12
         # print pn
     N = 0
     for dopant in self.dopants:
         N += dopant.concentration(z)
     N *= 1e-6
     # print N
     mobility = {'mobility_e': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0},
                 'mobility_h': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0}}
     for key in mobility.keys():
         mu_L = self.reference[key]['mu_L0'] * (T / 300.0) ** (-self.reference[key]['alpha'])
         mu_I = (self.reference[key]['A'] * (T ** (3 / 2)) / N) / (
         mp.log(1 + self.reference[key]['B'] * (T ** 2) / N) - self.reference[key]['B'] * (T ** 2) / (
         self.reference[key]['B'] * (T ** 2) + N))
         try:
             mu_ccs = (2e17 * (T ** (3 / 2)) / mp.sqrt(pn)) / (mp.log(1 + 8.28e8 * (T ** 2) * (pn ** (-1 / 3))))
             X = mp.sqrt(6 * mu_L * (mu_I + mu_ccs) / (mu_I * mu_ccs))
         except:
             mu_ccs = np.nan
             X = 0
         # print X
         mu_tot = mu_L * (1.025 / (1 + ((X / 1.68) ** (1.43))) - 0.025)
         Field_coeff = (1 + (mu_tot * E * 1e-2 / self.reference[key]['v_s']) ** self.reference[key]['beta']) ** (
         -1 / self.reference[key]['beta'])
         mobility[key]['mu_L'] = mu_L * 1e-4
         mobility[key]['mu_I'] = mu_I * 1e-4
         mobility[key]['mu_ccs'] = mu_ccs * 1e-4
         mobility[key]['mu_tot'] = mu_tot * 1e-4 * Field_coeff
     return mobility
开发者ID:bond-anton,项目名称:Schottky,代码行数:34,代码来源:Semiconductor.py

示例5: findPoissonChangePoint

def findPoissonChangePoint( data, factorial ):
	# data is a list of counts in each time period, uniformly spaced

	# the denominator (including both P(D|H1) and constant parts of P(D|H2) )
	C = data.sum()
	N = mpf(len(data))
	denominator = factorial[C-1] * pi / ( 2 * N**C )

	# the numerator (trickier)
	# this needs to be averaged over the possible change points 
	weights = zeros(N,dtype=object)
	CA = 0
	CB = C
	for i in range(1,N) :
		# points up through i are in data set A; the rest are in B
		datapoint = data[i-1]	
		NA = mpf(i)   ; CA += datapoint
		NB = mpf(N-i) ; CB -= datapoint
		
		fraction_num = factorial[CA] * factorial[CB] 
		fraction_den = NA**(CA+1) * NB**(CB+1) * ( (CA/NA)**2 + (CB/NB)**2 )
		#weights.append( fraction_num/fraction_den )
		weights[i-1] = mpf(fraction_num)/fraction_den

	numerator = weights.mean()
	lognum= inv_log10 * log( numerator )
	logden= inv_log10 * log( denominator )
	logodds = lognum - logden
	print "num:",numerator, "log num:", lognum, "| denom:", denominator, "log denom:", logden, "|| log odds:", logodds 

	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : return None
	return ( weights.argmax(), logodds ) 
开发者ID:ChayaSt,项目名称:cpdetect,代码行数:33,代码来源:cpDetect.py

示例6: genenergies

def genenergies(fnR,fnQ,seqsR,seqsQ,gamma,sQ,sR,R0): #Parses seqs and model type then calculates and returns energies R is transcription factor, Q is RNAP
    ematR = np.genfromtxt(fnR,skiprows=1)
    ematQ = np.genfromtxt(fnQ,skiprows=1)
    fR = open(fnR)
    fQ = open(fnQ)
    mattype = fR.read()[:6] #mattype must be the same
    #mattypeQ = fQ.read()[:6]
    energies = np.zeros(len(seqsQ))
    N = len(seqsQ)
    mut_region_lengthQ = len(seqsQ[0])
    mut_region_lengthR = len(seqsR[0])
    
    if mattype == '1Point':
            for i,s in enumerate(seqsR):
                seq_matR = seq2mat(s)
		seq_matQ = seq2mat(seqsQ[i])
		RNAP = (seq_matQ*ematQ).sum()*sQ
		TF = (seq_matR*ematR).sum()*sR + R0
                energies[i] = -RNAP + mp.log(1 + mp.exp(-TF - gamma)) - mp.log(1 + mp.exp(-TF))
    '''
    elif mattype == '2Point':
            for i,s in enumerate(seqs):
                seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,2))*16)
                seq_mat[seq2mat2(s)] = 1
                energies[i] = (seq_mat*(emat.ravel())).sum()
    elif mattype == '3Point':
            for i,s in enumerate(seqs):
                seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,3))*64)
                seq_mat[seq2mat3(s)] = 1
                energies[i] = (seq_mat*(emat.ravel())).sum()
    '''
    return energies
开发者ID:irelandb,项目名称:sortseq,代码行数:32,代码来源:ThermoSimUtils.py

示例7: eta

def eta(lam):
    """Function from DLMF 8.12.1 shifted to be centered at 0."""
    if lam > 0:
        return mp.sqrt(2*(lam - mp.log(lam + 1)))
    elif lam < 0:
        return -mp.sqrt(2*(lam - mp.log(lam + 1)))
    else:
        return 0
开发者ID:antonior92,项目名称:scipy,代码行数:8,代码来源:gammainc_asy.py

示例8: compute_similarity

def compute_similarity(partitions_themes, similarity='KLDivergence'):
    """
    Third step of the Temporal Text Mining algorithm. Receives the distribution of all themes in two partitions.
    Computes and returns the similarity between all the themes.

    :param partitions_themes: Contains the distribution of all themes in the partitions.
    :type list
    :param similarity: Specifies which similarity measure to use
    :type str

    :return: Returns the computed similarities between all the themes.
    :rtype: list
    """

    # Declare the similarity matrix to store the computed similarities
    similarity_matrix = []

    # Compute the similarities between all the themes
    for theme1_index, theme1 in enumerate(partitions_themes[0]):

        # Creates a new line in the matrix
        similarity_matrix.append([])

        for theme2_index, theme2 in enumerate(partitions_themes[1]):

            # If the similarity is KL-Divergence
            if similarity == 'KLDivergence':
                vocabulary_size = len(theme1)
                klDivergence = 0
                for word_index in range(vocabulary_size):
                    klDivergence += theme2[word_index]*math.log(theme2[word_index]/theme1[word_index])
                similarity_matrix[theme1_index].append(1/klDivergence)

            # If the similarity is Jensen-Shanon Divergence (JSD)
            if similarity == 'JSDivergence':
                vocabulary_size = len(theme1)
                jsDivergence = 0
                for word_index in range(vocabulary_size):
                    jsDivergence += (0.5*theme1[word_index]*math.log(theme1[word_index]/theme2[word_index]) +
                                     0.5*theme2[word_index]*math.log(theme2[word_index]/theme1[word_index]))
                similarity_matrix[theme1_index].append(1/jsDivergence)

            # If the similarity is the support of the distributions
            if similarity == 'support':
                # Compute the support of each theme distribution
                threshold = 0.001
                theme1_support = (theme1 > threshold)
                theme2_support = (theme2 > threshold)

                # Compute and store the similarity between the distributions' support
                support_similarity = sum((theme1_support & theme2_support))
                similarity_matrix[theme1_index].append(support_similarity)

    # Returns the computed similarities between all the themes.
    return similarity_matrix
开发者ID:enrsr,项目名称:CommunityDetection,代码行数:55,代码来源:TemporalTextMining.py

示例9: pqCandidates

        def pqCandidates(z, w = w):

            PiI = mpmath.pi * 1j

            if abs(1 - z) < globalsettings.getSetting("maximalError"):
                return (z, 0, 0), 1

            p = mpmathRoundToInt( (w.w0 - mpmath.log(  z)) / PiI)
            q = mpmathRoundToInt( (w.w1 + mpmath.log(1-z)) / PiI)
            err = abs(w.w2 + mpmath.log(z) - mpmath.log(1-z) + p * PiI + q * PiI)

            return (z, p, q), err
开发者ID:unhyperbolic,项目名称:SnapRepr,代码行数:12,代码来源:bloch_group.py

示例10: logOfSquare

def logOfSquare(c):
    return mpmath.log(c)

    csquare = c * c

    maxErr = globalsettings.getSetting("maximalError")

    if not ((csquare.real > 0 or
             abs(csquare.imag) > maxErr)):
        raise NumericalError(c, msg = "logOfSqaure near branch cut")

    return mpmath.log(csquare) / 2
开发者ID:unhyperbolic,项目名称:SnapRepr,代码行数:12,代码来源:bloch_group.py

示例11: energy

    def energy(self, clustering):
        energy = mpmath.mpf(0.0)
        new_vertex_distributions = _combine_vertex_distributions_given_clustering(
            self.vertex_distributions, clustering)

        # likelihood
        likelihood_energy = -self._log_likelihood(clustering, new_vertex_distributions)

        # prior on similarity:
        # We prefer the cluster whose minimum similarity is large.
        # - the similarity of a pair of vertexes is measured by the similarity
        #   of top 10 words in the distribution. (measure each word type
        #   respectively and take average)
        intra_cluster_energy = mpmath.mpf(0.0)
        for cluster_id, cluster_vertex_set in enumerate(clustering):
            min_similarity_within_cluster = self._min_similarity_within_cluster(cluster_vertex_set, new_vertex_distributions[cluster_id])
            intra_cluster_energy += -mpmath.log(mpmath.exp(min_similarity_within_cluster - 1))

        # Between cluster similarity:
        #  - For each pair of clusters, we want to find the pair of words with maximum similarity
        #    and prefer this similarity value to be small.
        inter_cluster_energy = mpmath.mpf(0.0)
        if len(clustering) > 1:
            for i in range(0, len(clustering)-1):
                for j in range(i+1, len(clustering)):
                    max_similarity_between_clusters = self._max_similarity_between_clusters(clustering[i], clustering[j])
                    inter_cluster_energy += -mpmath.log(mpmath.exp(-max_similarity_between_clusters))

        # prior on clustering complexity: prefer small number of clusters.
        length_energy = -mpmath.log(mpmath.exp(-len(clustering)))

        # classification: prefer small number of categories.
        class_energy = 0.0
        if self._classifier is not None:
            num_classes = self._calculate_num_of_categories(clustering, new_vertex_distributions)
            class_energy = -mpmath.log(mpmath.exp(-(abs(num_classes-len(clustering)))))

        # classification confidence: maximize the classification confidence
        confidence_energy = 0.0
        for cluster_id, cluster_vertex_set in enumerate(clustering):
            (category, confidence) = self._predict_label(new_vertex_distributions[cluster_id])
            confidence_energy += -mpmath.log(confidence)

        energy += (0.5)*likelihood_energy + intra_cluster_energy + inter_cluster_energy + 30.0*length_energy + 20.0*class_energy + confidence_energy
        logging.debug('ENERGY: {0:12.6f}\t{1:12.6f}\t{2:12.6f}\t{3:12.6f}\t{4:12.6f}\t{5:12.6f}'.format(
            likelihood_energy.__float__(),
            intra_cluster_energy.__float__(),
            inter_cluster_energy.__float__(),
            length_energy.__float__(),
            class_energy.__float__(),
            confidence_energy.__float__()))
        return energy
开发者ID:RedHenLab,项目名称:CDI,代码行数:52,代码来源:topic_model.py

示例12: my_secant

def my_secant(eq, p1, p2, debug=False):
    tol = mp.mpf(0.1)
    max_count = 10000
    sol = 0
    for count in range(max_count):
        if debug: print (count + 1), ''
        if p1 == p2:
            sol = p1
            break
        y1 = eq(p1)
        y2 = eq(p2)
        if debug: print '-->', p1, '->', y1
        if debug: print '-->', p2, '->', y2
        if abs(y1) < abs(y2):
            sol = p1
            err = abs(y1)
        else:
            sol = p2
            err = abs(y2)
        if err < tol:
            break
        if mp.sign(y1) * mp.sign(y2) < 0:
            # p3 = (p1+p2)/mpf(2)
            x1 = mp.log(p1)
            x2 = mp.log(p2)
            # x1 = p1
            # x2 = p2
            # x3 = (x2*y1 - x1*y2)/(y1-y2)
            # if x3 == x1 or x3 == x2:
            x3 = (x1 + x2) / mp.mpf(2)
            p3 = mp.exp(x3)
            # p3 = x3
            if p3 == p1 or p3 == p2:
                break
            y3 = eq(p3)
            if debug: print '--->', x1, x2, x3, p3, '->', y3
            if mp.sign(y3) == mp.sign(y1):
                p1 = p3
            else:
                p2 = p3
        elif mp.sign(y1) * mp.sign(y2) == 0:
            if y1 == 0:
                sol = p1
            elif y2 == 0:
                sol = p2
            else:
                raise Exception('Strange: sign returns zero! without zeros $)')
            break
        else:
            raise Exception('Functin has same sign on both ends')
    if debug: print 'Solution:', sol
    return sol
开发者ID:bond-anton,项目名称:Schottky,代码行数:52,代码来源:Helpers.py

示例13: findGaussianChangePoint

def findGaussianChangePoint( data, gammatable ):
	N = len( data )
	if N<6 : return None # can't find a cp in data this small

	# the denominator. This is the easy part.
	denom = (pi**1.5) * mpf(( N*data.var() ))**( -N/2.0 + 0.5 ) * gammatable[N]

	# BEGIN weight calculation
	# the numerator. A little trickier.
	weights=[0,0,0] # the change cannot have occurred in the last 3 points
	data2=data**2

	#initialize
	dataA=data[0:3] ; dataA2=data2[0:3] ; NA = len(dataA)
	dataB=data[3:] ; dataB2=data2[3:] ;  NB = len(dataB)
	sumA=dataA.sum() ; sumsqA=dataA2.sum()
	sumB=dataB.sum()  ; sumsqB=dataB2.sum()

	# first data point--this could be done in the loop but it's okay here
	meanA=sumA/NA ; meansumsqA = sumsqA/NA ; meanA2 = meanA**2 ; sA2=meansumsqA-meanA2
	meanB=sumB/NB ; meansumsqB = sumsqB/NB ; meanB2 = meanB**2 ; sB2=meansumsqB-meanB2

	wnumf1 = mpf(NA)**(-0.5*NA + 0.5 ) * mpf(sA2)**(-0.5*NA + 1) * gammatable[NA]
	wnumf2 = mpf(NB)**(-0.5*NB + 0.5 ) * mpf(sB2)**(-0.5*NB + 1) * gammatable[NB]
	wdenom = (sA2 + sB2) * (meanA2*meanB2)
	weights.append( (wnumf1*wnumf2)/wdenom ) 

	for i in range( 3, N-3 ):
		NA += 1	; NB -= 1
		next = data[i]
		sumA += next	; sumB -= next
		nextsq = data2[i]
		sumsqA += nextsq; sumsqB -= nextsq
		meanA=sumA/NA ; meansumsqA = sumsqA/NA ; meanA2 = meanA**2 ; sA2=meansumsqA-meanA2
		meanB=sumB/NB ; meansumsqB = sumsqB/NB ; meanB2 = meanB**2 ; sB2=meansumsqB-meanB2
		wnumf1 = mpf(NA)**(-0.5*NA + 0.5 ) * mpf(sA2)**(-0.5*NA + 1) * gammatable[NA]
		wnumf2 = mpf(NB)**(-0.5*NB + 0.5 ) * mpf(sB2)**(-0.5*NB + 1) * gammatable[NB]
		wdenom = (sA2 + sB2) * (meanA2*meanB2)
		weights.append( (wnumf1*wnumf2)/wdenom) 
	weights.extend( [0,0] ) # the change cannot have occurred at the last 2 points
	weights=array(weights)
	# END weight calculation

	num = 2.0**2.5 * abs(data.mean()) * weights.mean()
	logodds = log( num ) - log( denom ) 	
	print "num:", num, "log num:", log(num), "| denom:", denom, "log denom:", log(denom), "|| log odds:", logodds 
	
	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : return None
	return ( weights.argmax(), logodds ) 
开发者ID:ChayaSt,项目名称:cpdetect,代码行数:50,代码来源:cpDetect.py

示例14: L_function

    def L_function(self):
        p = self.p
        q = self.q
        z = self.z
        PiI = mpmath.pi * 1j

        val= (
              myDilog(z)
            + (mpmath.log(z) + p * PiI) * ( mpmath.log(1 - z) + q * PiI) / 2
            - mpmath.pi ** 2 / 6)

        if self.sign == -1:
            return -val
        else:
            return val
开发者ID:unhyperbolic,项目名称:SnapRepr,代码行数:15,代码来源:bloch_group.py

示例15: lnlhood

    def lnlhood(self, param):
        """ This is the function that evaluates the likelihood at each point in NDIM space

        Parameters
        ----------
        param :

        Returns
        -------

        """
        likeit=0

        #loop over all the ions
        for ii in range(self.nions):

            #parity check : make sure data and models are aligned
            if(self.data[ii][0] != self.mod_colm_tag[ii]):
                raise ValueError('Mismtach between observables and models. This is a big mistake!!!')

            #now call the interpolator for models given current ion
            mod_columns=self.interpol[ii](param)

            #check if upper limit
            if(self.data[ii][3] == -1):
                #integrate the upper limit of a Gaussian - cumulative distribution
                arg=((self.data[ii][1]-mod_columns)/(np.sqrt(2)*self.data[ii][2]))[0]
                thislike=mmath.log(0.5+0.5*mmath.erf(arg))
                likeit=likeit+float(thislike)
                #print self.data[ii][0], float(thislike), self.data[ii][1], mod_columns

            #check if lower limit
            elif(self.data[ii][3] == -2):

                #integrate the lower limit of a Gaussian - Q function
                arg=((self.data[ii][1]-mod_columns)/(np.sqrt(2)*self.data[ii][2]))[0]
                thislike=mmath.log(0.5-0.5*mmath.erf(arg))
                likeit=likeit+float(thislike)
                #print self.data[ii][0], float(thislike), self.data[ii][1], mod_columns

            #if value, just eval Gaussian
            else:

                #add the likelihood for this ion
                thislike=-1*np.log(np.sqrt(2*np.pi)*self.data[ii][2])-(self.data[ii][1]-mod_columns)**2/(2*self.data[ii][2]**2)
                likeit=likeit+thislike

        return likeit
开发者ID:mneeleman,项目名称:pyigm,代码行数:48,代码来源:mcmc.py


注:本文中的mpmath.log函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。