本文整理汇总了Python中pylab.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: obs
def obs(f=vars['rate_stoch'],
age_indices=age_indices,
age_weights=age_weights,
value=pl.log(dm.value_per_1(d)),
tau=se**-2, data=d):
f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
return mc.normal_like(value, pl.log(f_i), tau)
示例2: likelihood
def likelihood(self, x0, X, Y, U):
"""returns the log likelihood of the states `X` and observations `Y`
under the current model p(X,Y|M)
Parameters
----------
x0 : matrix
initial state
X : list of matrix
state sequence
Y : list of matrix
observation sequence
U : list of matrix
input sequence
Notes
----------
This calculates
p(X,Y|M) = p(x0)\prod_{t=1}^Tp(y_t|x_t)\prod_{t=1}^Tp(x_t|x_{t-1})
using the model currently defined in self.
"""
l1 = pb.sum([pb.log(self.observation_dist(x,y)) for (x,y) in zip(X,Y)])
l2 = pb.sum([
pb.log(self.transition_dist(x,u,xdash)) for (x,u,xdash) in zip(X[:-1],U[:-1],X[1:])])
l3 = self.init_dist(x0)
l = l1 + l2 + l3
assert not pb.isinf(l).any(), (l1,l2,l3)
return l
示例3: query
def query(q,DBs,Ms,n,l=1000,minVal=0.0,maxVal=1.0):
#parameters
P1=.01779
P2=.0000156
rho = log(P1)/log(P2)
sims = len(Ms)#number of multi-runs for whp
k = len(Ms[0])#number of random matrix projections per run
candidates = set()
#first iterate over the runs
for s in xrange(sims):
#next iterate over the n^rho nearby points
hashVal = decodeGt24(q,Ms[s],minVal,maxVal)
if DBs[s].has_key(hashVal):
for c in DBs[s][hashVal]:
candidates.add(c)
for r in xrange(int(n**rho+.5)):
hashVal = decodeGt24(q,Ms[s],minVal,maxVal,True)
if DBs[s].has_key(hashVal):
for c in DBs[s][hashVal]:
candidates.add(c)
if len(candidates)>2*l:return candidates
return candidates
示例4: add_thermodynamic_constraints
def add_thermodynamic_constraints(cpl, dG0_f, c_range=(1e-6, 1e-2), T=default_T, bounds=None):
"""
For any compound that does not have an explicit bound set by the 'bounds' argument,
create a bound using the 'margin' variables (the last to columns of A).
"""
Nc = dG0_f.shape[0]
if bounds != None and len(bounds) != Nc:
raise Exception("The concentration bounds list must be the same length as the number of compounds")
if bounds == None:
bounds = [(None, None)] * Nc
for c in xrange(Nc):
if pylab.isnan(dG0_f[c, 0]):
continue # unknown dG0_f - cannot bound this compound's concentration at all
b_low = bounds[c][0] or c_range[0]
b_high = bounds[c][1] or c_range[1]
# lower bound: dG0_f + R*T*ln(Cmin) <= x_i
cpl.variables.set_lower_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(b_low))
# upper bound: x_i <= dG0_f + R*T*ln(Cmax)
cpl.variables.set_upper_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(b_high))
示例5: covariate_constraint
def covariate_constraint(mu=vars['mu_age'], alpha=vars['alpha'], beta=vars['beta'],
U_all=U_all,
X_sex_max=X_sex_max,
X_sex_min=X_sex_min,
lower=pl.log(model.parameters[name]['level_bounds']['lower']),
upper=pl.log(model.parameters[name]['level_bounds']['upper'])):
log_mu_max = pl.log(mu.max())
log_mu_min = pl.log(mu.min())
alpha = pl.array([float(x) for x in alpha])
if len(alpha) > 0:
for U_i in U_all:
log_mu_max += max(0, alpha[U_i].max())
log_mu_min += min(0, alpha[U_i].min())
# this estimate is too crude, and is causing problems
#if len(beta) > 0:
# log_mu_max += pl.sum(pl.maximum(X_max*beta, X_min*beta))
# log_mu_min += pl.sum(pl.minimum(X_max*beta, X_min*beta))
# but leaving out the sex effect results in strange problems, too
log_mu_max += X_sex_max*float(beta[sex_index])
log_mu_min += X_sex_min*float(beta[sex_index])
lower_violation = min(0., log_mu_min - lower)
upper_violation = max(0., log_mu_max - upper)
return mc.normal_like([lower_violation, upper_violation], 0., 1.e-6**-2)
示例6: _make_log_freq_map
def _make_log_freq_map(self):
"""
::
For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies
and bandwidths of linear and log-scaled frequency axes for a constant-Q transform.
"""
fp = self.feature_params
bpo = float(self.nbpo) # Bands per octave
self._fftN = float(self.nfft)
hi_edge = float( self.hi )
lo_edge = float( self.lo )
f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth
self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) )
self._dctN = self._cqtN
self._outN = float(self.nfft/2+1)
if self._cqtN<1: print "warning: cqtN not positive definite"
mxnorm = P.empty(self._cqtN) # Normalization coefficients
fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)])
logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)])
logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN))
for i in P.arange(self._cqtN)])
#self._fftfrqs = fftfrqs
self._logfrqs = logfrqs
self._logfbws = logfbws
self._make_cqt()
示例7: projectCl
def projectCl(lvec,P,D,dNdz,z,growthFac=None):
"""
project C_l's given a power spectrum class P (Camb or
BBKS) and Distance class D together
arguments:
lvec: vector of l values
P: p.pk p.k contains the power spectrum, e.g. pt.Camb instance
D: frw.Distance instance
dNdz,z, growthFac: vectors suitable for trapezoid z-integration
presently it crashes if z=0.0 is included, start from a small z value
"""
lvec = M.asarray(lvec)
dNdz2 = M.asarray(dNdz)**2
z = M.asarray(z)
da1 = 1./D.rtc(z)/D.h #comoving Da in h^1Mpc
dNdz2vc = dNdz2/D.vc(z)/D.h**3 # comovin volume in (h^-1Mpc)^3
#`use growth factor if given
if growthFac:
dNdz2vc = dNdz2vc*(growthFac**2)
lk = M.log(P.k)
pk = P.pk
## return M.asarray([utils.trapz(utils.splineResample(pk,lk,
## M.log(l*da1))*dNdz2vc,z) for l in lvec])
return M.asarray([utils.trapz(utils.interpolateLin(pk,lk,
M.log(l*da1))*dNdz2vc,z) for l in lvec])
示例8: duxbury_icdf
def duxbury_icdf(X,L,s):
"""
Returns the inverse duxbury cdf evaluated at X.
The duxbury CDF is 1 - exp( -(L^2)*exp( - (s/x)^2 ) )
"""
return (-s*s/pylab.log( -pylab.log(1-X)/(L*L)) )**(0.5)
示例9: weibull_lsq
def weibull_lsq(data):
"""
Returns the weibull parameters estimated by using
the least square method for the given data.
The weibull CDF is 1 - exp(-(x/l)^k).
One should be aware of the fact that this approach weighs
the extreme (small or large) observations more than the
bulk.
"""
# Evaluate the emperical CDF at the observations
# and rescale to convert into emperical probability
n = len(data)
print type(data)
print type(empCDF(data,data))
ecdf = empCDF(data,data)*n/(1.0 + n)
# Make the array of "infered" variables and independent variables
y = pylab.log(-pylab.log(1-ecdf))
x = pylab.log(data)
# estimate regression coefficients of y = a*x + b
a, b = lsqReg(x,y)
# Extract the weibull parameters
k = a
l = pylab.exp(-b/k)
return k, l
示例10: obs
def obs(pi=pi):
return (
pop_A_prev * pop_A_N * pl.log(pi)
+ (1 - pop_A_prev) * pop_A_N * pl.log(1 - pi)
+ pop_B_prev * pop_B_N * pl.log(pi)
+ (1 - pop_B_prev) * pop_B_N * pl.log(1 - pi)
)
示例11: calcAUC
def calcAUC(data, y0, lag, mgr, asym, time):
"""
Calculate the area under the curve of the logistic function
using its integrated formula
[ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
"""
# First check that max growth rate is not zero
# If so, calculate using the data instead of the equation
if mgr == 0:
auc = calcAUCData(data, time)
else:
timeS = time[0]
timeE = time[-1]
t1 = asym - y0
#try:
t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
#except RuntimeWarning as rw:
# Exponent is too large, setting to 10^3
# newexp = 1000
# t2_s = py.log(newexp + 1)
# t2_e = py.log(newexp + 1)
t3 = 4 * mgr
t4_s = asym * timeS
t4_e = asym * timeE
start = (asym * (t1 * t2_s) / t3) + t4_s
end = (asym * (t1 * t2_e) / t3) + t4_e
auc = end - start
if py.absolute(auc) == float('Inf'):
x = py.diff(time)
auc = py.sum(x * data[1:])
return auc
示例12: setup
def setup(dm, key, data_list, rate_stoch):
""" Generate the PyMC variables for a log-normal model of
a function of age
Parameters
----------
dm : dismod3.DiseaseModel
the object containing all the data, priors, and additional
information (like input and output age-mesh)
key : str
the name of the key for everything about this model (priors,
initial values, estimations)
data_list : list of data dicts
the observed data to use in the beta-binomial liklihood function
rate_stoch : pymc.Stochastic
a PyMC stochastic (or deterministic) object, with
len(rate_stoch.value) == len(dm.get_estimation_age_mesh()).
Results
-------
vars : dict
Return a dictionary of all the relevant PyMC objects for the
log-normal model. vars['rate_stoch'] is of particular
relevance, for details see the beta_binomial_model
"""
vars = {}
est_mesh = dm.get_estimate_age_mesh()
vars['rate_stoch'] = rate_stoch
# set up priors and observed data
prior_str = dm.get_priors(key)
dismod3.utils.generate_prior_potentials(vars, prior_str, est_mesh)
vars['observed_rates'] = []
for d in data_list:
age_indices = dismod3.utils.indices_for_range(est_mesh, d['age_start'], d['age_end'])
age_weights = d.get('age_weights', pl.ones(len(age_indices)) / len(age_indices))
lb, ub = dm.bounds_per_1(d)
se = (pl.log(ub) - pl.log(lb)) / (2. * 1.96)
if pl.isnan(se) or se <= 0.:
se = 1.
print 'data %d: log(value) = %f, se = %f' % (d['id'], pl.log(dm.value_per_1(d)), se)
@mc.observed
@mc.stochastic(name='obs_%d' % d['id'])
def obs(f=vars['rate_stoch'],
age_indices=age_indices,
age_weights=age_weights,
value=pl.log(dm.value_per_1(d)),
tau=se**-2, data=d):
f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
return mc.normal_like(value, pl.log(f_i), tau)
vars['observed_rates'].append(obs)
return vars
示例13: add_localized_dGf_constraints
def add_localized_dGf_constraints(self, cid2dG0_f, cid2bounds, c_range, T=300):
self.T = T
for (rid, sparse) in self.reactions:
dG0_r = 0
for (cid, coeff) in sparse.iteritems():
if cid in cid2dG0_f:
dG0_r += coeff * cid2dG0_f[cid]
else:
dG0_r = None
break
(curr_c_min, curr_c_max) = cid2bounds.get(cid, (None, None))
if curr_c_min == None:
curr_c_min = c_range[0]
if curr_c_max == None:
curr_c_max = c_range[1]
if coeff < 0:
dG0_r += coeff * common.R * T * pylab.log(curr_c_max)
else:
dG0_r += coeff * common.R * T * pylab.log(curr_c_min)
if dG0_r != None and dG0_r > 0:
# this reaction is a localized bottleneck, add a constraint that its flux = 0
constraint_name = rid + "_irreversible"
self.cpl.linear_constraints.add(names=[constraint_name], senses="E", rhs=[0])
self.cpl.linear_constraints.set_coefficients(constraint_name, rid, 1)
示例14: make_pCr_problem
def make_pCr_problem(S, dG0_f,
c_mid=1e-3,
ratio=3.0,
T=default_T,
bounds=None,
log_stream=None):
"""Creates a Cplex problem for finding the pCr.
Simply sets up all the constraints. Does not set the objective.
Args:
S: stoichiometric matrix.
dG0_f: deltaG0'-formation values for all compounds (in kJ/mol) (1 x compounds)
c_mid: the default concentration to center the pCr on.
ratio: the ratio between the distance of the upper bound from c_mid
and the lower bound from c_mid (in logarithmic scale)
bounds: the concentration bounds for metabolites.
log_stream: where to write Cplex logs to.
Returns:
A cplex.Cplex object for the problem.
"""
Nc = S.shape[1]
if Nc != dG0_f.shape[0]:
raise Exception("The S matrix has %d columns, while the dG0_f vector has %d" % (Nc, dG0_f.shape[0]))
if bounds and len(bounds) != Nc:
raise Exception("The concentration bounds list must be the same length as the number of compounds")
cpl = create_cplex(S, dG0_f, log_stream)
# Add pC variable.
cpl.variables.add(names=['pC'], lb=[0], ub=[1e6])
# Add variables for concentration bounds for each metabolite.
for c in xrange(Nc):
if pylab.isnan(dG0_f[c, 0]):
continue # unknown dG0_f - cannot bound this compound's concentration at all
# dG at the center concentration.
dG_f_mid = dG0_f[c, 0] + R*T*pylab.log(c_mid)
if bounds == None or bounds[c][0] == None:
# lower bound: x_i + r/(1+r) * R*T*ln(10)*pC >= dG0_f + R*T*ln(Cmid)
cpl.linear_constraints.add(senses='G', names=['c%d_lower' % c], rhs=[dG_f_mid])
cpl.linear_constraints.set_coefficients('c%d_lower' % c, 'c%d' % c, 1)
cpl.linear_constraints.set_coefficients('c%d_lower' % c, 'pC', R*T*pylab.log(10) * ratio / (ratio + 1.0))
else:
# this compound has a specific lower bound on its activity
cpl.variables.set_lower_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(bounds[c][0]))
if bounds == None or bounds[c][1] == None:
# upper bound: x_i - 1/(1+r) * R*T*ln(10)*pC <= dG0_f + R*T*ln(Cmid)
cpl.linear_constraints.add(senses='L', names=['c%d_upper' % c], rhs=[dG_f_mid])
cpl.linear_constraints.set_coefficients('c%d_upper' % c, 'c%d' % c, 1)
cpl.linear_constraints.set_coefficients('c%d_upper' % c, 'pC', -R*T*pylab.log(10) / (ratio + 1.0))
else:
# this compound has a specific upper bound on its activity
cpl.variables.set_upper_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(bounds[c][1]))
return cpl
示例15: tune_alpha
def tune_alpha(self, drug_name, alphas=None, N=80, l1_ratio=0.5,
n_folds=10, show=True, shuffle=False, alpha_range=[-2.8,0.1]):
"""Interactive tuning of the model (alpha).
This is much faster than :meth:`plot_cindex` but much slower than
ElasticNetCV
.. plot::
:include-source:
from gdsctools import *
ic = IC50(gdsctools_data("IC50_v5.csv.gz"))
gf = GenomicFeatures(gdsctools_data("genomic_features_v5.csv.gz"))
en = GDSCElasticNet(ic, gf)
en.tune_alpha(1047, N=40, l1_ratio=0.1)
"""
if alphas is None:
# logspace returns a vector in natural space that guarantees a
# uniform spacing in a log space (log10 or ln)
# -2.8 to 0.5 means alpha from 1.58e-3 to 3.16
# This is equivalent to log(1.58e-3)=-6.45 to log(3.16)=1.15 in ln
# scale
a1, a2 = alpha_range
alphas = pylab.logspace(a1, a2, N)
# Let us now do a CV across difference alphas
all_scores = []
for alpha in alphas:
scores = self.fit(drug_name, alpha, l1_ratio=l1_ratio,
n_folds=n_folds, shuffle=shuffle)
all_scores.append(scores)
# We can now plot the results that is the mean scores + error enveloppe
df = pd.DataFrame(all_scores)
# we also identify the max correlation and corresponding alpha
maximum = df.mean(axis=1).max()
alpha_best = alphas[df.mean(axis=1).argmax()]
if show is True:
mu = df.mean(axis=1)
sigma = df.var(axis=1)
pylab.clf()
pylab.errorbar(pylab.log(alphas), mu, yerr=sigma, color="gray")
pylab.plot(pylab.log(alphas), mu, 'or')
pylab.axvline(pylab.log(alpha_best), lw=4, alpha=0.5, color='g')
pylab.title("Mean scores (pearson) across alphas for Kfold=%s" % n_folds)
pylab.xlabel("ln(alpha)")
pylab.ylabel("mean score (pearson)")
pylab.grid()
results = {"alpha_best":alpha_best, "ln_alpha":pylab.log(alpha_best),
"maximum_Rp":maximum}
return results