本文整理汇总了Python中pylab.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了exp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: diagnostic
def diagnostic(self, kmin=1, kmax=8, k=None, ymax=None):
self.run(kmin=kmin, kmax=kmax);
pylab.clf()
pylab.subplot(3,1,2);
self.plot()
mf = GaussianMixtureFitting(self.fitting.data)
if k is None:
mf.estimate(k=self.best_k)
else:
mf.estimate(k=k)
pylab.subplot(3,1,1)
mf.plot()
if ymax is not None:
pylab.ylim([0, ymax])
pylab.subplot(3,1,3)
min_value = np.array([self.all_results[x]['AICc'] for x in self.x]).min()
pylab.plot(self.x, [pylab.exp((min_value-self.all_results[k]['AICc'])/2)
for k in self.x], 'o-', label='AICc')
min_value = np.array([self.all_results[x]['AIC'] for x in self.x]).min()
pylab.plot(self.x, [pylab.exp((min_value-self.all_results[k]['AIC'])/2)
for k in self.x], 'o-', label='AIC')
pylab.xlabel('probability of information loss (based on AICc')
pylab.legend()
示例2: test_covariate_model_dispersion
def test_covariate_model_dispersion():
# simulate normal data
n = 100
model = data.ModelData()
model.hierarchy, model.output_template = data_simulation.small_output()
Z = mc.rcategorical([.5, 5.], n)
zeta_true = -.2
pi_true = .1
ess = 10000.*pl.ones(n)
eta_true = pl.log(50)
delta_true = 50 + pl.exp(eta_true)
p = mc.rnegative_binomial(pi_true*ess, delta_true*pl.exp(Z*zeta_true)) / ess
model.input_data = pandas.DataFrame(dict(value=p, z_0=Z))
model.input_data['area'] = 'all'
model.input_data['sex'] = 'total'
model.input_data['year_start'] = 2000
model.input_data['year_end'] = 2000
# create model and priors
vars = dict(mu=mc.Uninformative('mu_test', value=pi_true))
vars.update(covariate_model.mean_covariate_model('test', vars['mu'], model.input_data, {}, model, 'all', 'total', 'all'))
vars.update(covariate_model.dispersion_covariate_model('test', model.input_data, .1, 10.))
vars.update(rate_model.neg_binom_model('test', vars['pi'], vars['delta'], p, ess))
# fit model
m = mc.MCMC(vars)
m.sample(2)
示例3: duxbury_cdf
def duxbury_cdf(X,L,s):
"""
Returns the duxbury cdf evaluated at X.
The duxbury CDF is 1 - exp( -(L^2)*exp( - (s/x)^2 ) )
"""
return 1 - pylab.exp( -L*L*pylab.exp( -((s/X)**2.0) ))
示例4: calcAUC
def calcAUC(data, y0, lag, mgr, asym, time):
"""
Calculate the area under the curve of the logistic function
using its integrated formula
[ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
"""
# First check that max growth rate is not zero
# If so, calculate using the data instead of the equation
if mgr == 0:
auc = calcAUCData(data, time)
else:
timeS = time[0]
timeE = time[-1]
t1 = asym - y0
#try:
t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
#except RuntimeWarning as rw:
# Exponent is too large, setting to 10^3
# newexp = 1000
# t2_s = py.log(newexp + 1)
# t2_e = py.log(newexp + 1)
t3 = 4 * mgr
t4_s = asym * timeS
t4_e = asym * timeE
start = (asym * (t1 * t2_s) / t3) + t4_s
end = (asym * (t1 * t2_e) / t3) + t4_e
auc = end - start
if py.absolute(auc) == float('Inf'):
x = py.diff(time)
auc = py.sum(x * data[1:])
return auc
示例5: fresnelSingleTransformFW
def fresnelSingleTransformFW(self,d) :
i2 = Intensity2D(self.nx,self.startx,self.endx,
self.ny,self.starty,self.endy,
self.wl)
u1p = self.i*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
i2.i = ftu1p*1j/(d*self.wl)*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
return i2
示例6: wave_gen
def wave_gen(self,ploti=1):
if self.wave_type=="pulse":
self.wave_origin=p.exp(-5e-2*(p.arange(self.iter_total)-20)**2)
elif self.wave_type=="sine":
self.wave_origin=(1-p.exp(-1e-7*(p.arange(self.iter_total))))*p.sin(2*p.pi*p.arange(self.iter_total)/(20))
if ploti==1:
p.figure(3)
p.plot(self.wave_origin)
示例7: beta
def beta(v, gate):
"""
backward rate of the Hudgkin-Huxley potassium gate
"""
if gate=='n':
return 0.125 * p.exp( (v+65)/-80. )
elif gate=='m':
return 4 * p.exp(-(v+65) / 18)
elif gate=='h':
return 1 / (1 + p.exp( -(v+35) / 10 ))
示例8: plot_jp_tmax_surf
def plot_jp_tmax_surf(mu,c,phi,pmax,smax,ks):
# @brief tau max based on varying slip rate, normal pressure
s_dot = py.arange(0,smax,smax/100.)
prange = py.arange(0,pmax,pmax/100.)
kap = 1-py.exp(-s_dot/ks) # kappa
TMAX = py.zeros((len(kap),len(prange)))
tphi = py.tan(phi) # keep tan(phi) handy
for k_i in range(0,len(kap)):
k_tmp = kap[k_i]
for p_j in range(0,len(prange)):
p_tmp = prange[p_j]
TMAX[k_i][p_j] = k_tmp*(c+p_tmp*tphi) + (1-k_tmp)*p_tmp*mu
fig = plt.figure()
ax = fig.add_subplot(121)
# should be ok to plot the surface
S, P = py.meshgrid(s_dot, prange)
CS = plt.contour(S,P,TMAX,8,colors='k',linewidths=1.5)
plt.clabel(CS,inlne=1,fontsize=16)
img = plt.imshow(TMAX, interpolation='bilinear', origin='lower',
cmap=cm.jet,extent=(min(s_dot),max(s_dot),min(prange),max(prange)))
CBI = plt.colorbar(img, orientation='vertical',shrink=0.8)
CBI.set_label(r'$\tau ,max $[psi]')
ax.set_title(r'$\tau ,max = f(\sigma,\kappa), ks=%.2f $'%ks)
ax.set_xlabel('slip rate [in/sec]')
ax.set_ylabel(r'$\sigma_z $',size=24)
# use twice ks, re-calc what's necessary, then replot
ks2 = ks * 2
kap2 = 1-py.exp(-s_dot/ks2)
TMAX2 = py.zeros((len(kap2),len(prange)))
# tphi = py.tan(phi) # keep tan(phi) handy
for k_i in range(0,len(kap2)):
k2_tmp = kap2[k_i]
for p_j in range(0,len(prange)):
p_tmp = prange[p_j]
TMAX2[k_i][p_j] = k2_tmp*(c+p_tmp*tphi) + (1-k2_tmp)*p_tmp*mu
#fig = plt.figure()
ax = fig.add_subplot(122)
# should be ok to plot the surface
# S, P = py.meshgrid(s_dot, prange)
CS2 = plt.contour(S,P,TMAX2,8,colors='k',linewidths=1.5)
plt.clabel(CS2,inlne=1,fontsize=16)
img2 = plt.imshow(TMAX2, interpolation='bilinear', origin='lower',
cmap=cm.jet,extent=(min(s_dot),max(s_dot),min(prange),max(prange)))
CBI2 = plt.colorbar(img2, orientation='vertical',shrink=0.8)
CBI2.set_label(r'$\tau ,max $[psi]')
ax.set_title(r'$\tau ,max = f(\sigma,\kappa), ks=%.2f $'%ks2)
ax.set_xlabel('slip rate [in/sec]')
ax.set_ylabel(r'$\sigma_z $',size=24)
示例9: alpha
def alpha(v,gate):
"""
forward rate of the Hudgkin-Huxley potassium gate
"""
if gate=='n':
v_centered = v + 55
return 0.01 * v_centered / (1 - p.exp(-v_centered/10.))
elif gate=='m':
return 0.1 * (v + 40) / (1 - p.exp( -(v + 40)/10))
elif gate=='h':
return 0.07 * p.exp( - (v + 65) / 20)
示例10: test_predict_for_wo_data
def test_predict_for_wo_data():
""" Approach to testing predict_for function:
1. Create model with known mu_age, known covariate values, known effect coefficients
2. Setup MCMC with NoStepper for all stochs
3. Sample to generate trace with known values
4. Predict for results, and confirm that they match expected values
"""
d = data.ModelData()
d.hierarchy, d.output_template = data_simulation.small_output()
# create model and priors
vars = ism.age_specific_rate(d, 'p', 'all', 'total', 'all', None, None, None)
# fit model
m = mc.MCMC(vars)
m.sample(1)
### Prediction case 1: constant zero random effects, zero fixed effect coefficients
# check estimates with priors on random effects
d.parameters['p']['random_effects'] = {}
for node in ['USA', 'NAHI', 'super-region-1', 'all']:
d.parameters['p']['random_effects'][node] = dict(dist='Constant', mu=0, sigma=1.e-9) # zero out REs to see if test passes
pred = covariate_model.predict_for(d, d.parameters['p'],
'all', 'total', 'all',
'USA', 'male', 1990,
0., vars['p'], 0., pl.inf)
### Prediction case 2: constant non-zero random effects, zero fixed effect coefficients
# FIXME: this test was failing because PyMC is drawing from the prior of beta[0] even though I asked for NoStepper
# check estimates with priors on random effects
for i, node in enumerate(['USA', 'NAHI', 'super-region-1']):
d.parameters['p']['random_effects'][node]['mu'] = (i+1.)/10.
pred = covariate_model.predict_for(d, d.parameters['p'],
'all', 'total', 'all',
'USA', 'male', 1990,
0., vars['p'], 0., pl.inf)
# test that the predicted value is as expected
fe_usa_1990 = pl.exp(.5*vars['p']['beta'][0].value) # beta[0] is drawn from prior, even though I set it to NoStepper, see FIXME above
re_usa_1990 = pl.exp(.1+.2+.3)
assert_almost_equal(pred,
vars['p']['mu_age'].trace() * fe_usa_1990 * re_usa_1990)
示例11: fresnelSingleTransformVW
def fresnelSingleTransformVW(self,d) :
# compute new window
x2 = self.nx*pl.absolute(d)*self.wl/(self.endx-self.startx)
y2 = self.ny*pl.absolute(d)*self.wl/(self.endy-self.starty)
# create new intensity object
i2 = Intensity2D(self.nx,-x2/2,x2/2,
self.ny,-y2/2,y2/2,
self.wl)
# compute intensity
u1p = self.i*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
i2.i = ftu1p*1j/(d*i2.wl)*pl.exp(-1j*pl.pi/(d*i2.wl)*(i2.xgrid**2+i2.ygrid**2))
return i2
示例12: Fraunhofer
def Fraunhofer(i, z) :
print "Propagation:Fraunhofer"
ft = pl.fftshift(pl.fftn(pl.fftshift(i.i)))
dx = i.wl*z/(i.nx*i.dx)
dy = i.wl*z/(i.ny*i.dy)
po = pl.exp(1j*2*pl.pi/i.wl*i.dx*i.dx)/(1j*i.wl*z)
p = pl.arange(0,i.nx)-(i.nx+0.5)/2.0
q = pl.arange(0,i.ny)-(i.ny+0.5)/2.0
[pp,qq] = pl.meshgrid(p,q)
pm = pl.exp(1j*pl.pi/(i.wl*z)*((pp*dx)**2+(qq*dy)**2))
i2 = Intensity.Intensity2D(i.nx,-i.nx*dx/2,i.nx*dy/2,i.ny,-i.ny*dy/2,i.ny*dy/2)
i2.i = po*pm*ft
return i2
print "Propagation:Fraunhofer>",dx,dy,i.nx*dx,i.ny*dy
示例13: gaussian
def gaussian(x,c,w):
""" Analytic Gaussian function with amplitude 'a', center 'c', width 'w'.
The FWHM of this fn is 2*sqrt(2*log(2))*w
NOT NORMALISED """
G = exp(-(x-c)**2/(2*w**2))
G /= G.max()
return G
示例14: test_fixed_effect_priors
def test_fixed_effect_priors():
model = data.ModelData()
# set prior on sex
parameters = dict(fixed_effects={'x_sex': dict(dist='TruncatedNormal', mu=1., sigma=.5, lower=-10, upper=10)})
# simulate normal data
n = 32.
sex_list = pl.array(['male', 'female', 'total'])
sex = sex_list[mc.rcategorical([.3, .3, .4], n)]
beta_true = dict(male=-1., total=0., female=1.)
pi_true = pl.exp([beta_true[s] for s in sex])
sigma_true = .05
p = mc.rnormal(pi_true, 1./sigma_true**2.)
model.input_data = pandas.DataFrame(dict(value=p, sex=sex))
model.input_data['area'] = 'all'
model.input_data['year_start'] = 2010
model.input_data['year_start'] = 2010
# create model and priors
vars = {}
vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, parameters, model,
'all', 'total', 'all'))
print vars['beta']
assert vars['beta'][0].parents['mu'] == 1.
示例15: test_covariate_model_sim_no_hierarchy
def test_covariate_model_sim_no_hierarchy():
# simulate normal data
model = data.ModelData()
model.hierarchy, model.output_template = data_simulation.small_output()
X = mc.rnormal(0., 1.**2, size=(128,3))
beta_true = [-.1, .1, .2]
Y_true = pl.dot(X, beta_true)
pi_true = pl.exp(Y_true)
sigma_true = .01*pl.ones_like(pi_true)
p = mc.rnormal(pi_true, 1./sigma_true**2.)
model.input_data = pandas.DataFrame(dict(value=p, x_0=X[:,0], x_1=X[:,1], x_2=X[:,2]))
model.input_data['area'] = 'all'
model.input_data['sex'] = 'total'
model.input_data['year_start'] = 2000
model.input_data['year_end'] = 2000
# create model and priors
vars = {}
vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, {}, model, 'all', 'total', 'all'))
vars.update(rate_model.normal_model('test', vars['pi'], 0., p, sigma_true))
# fit model
m = mc.MCMC(vars)
m.sample(2)