本文整理汇总了Python中numpy.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了exp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_open_close
def plot_open_close(self, fig = None, savefig = True):
'''
Plot the open period versus shut periods.
'''
stretch_list = self.cluster_data.compute_open_close()
mode_num = len(stretch_list)
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
cmap = np.linspace(0,1,mode_num)
for index, stretch in enumerate(stretch_list):
ax.scatter(stretch['open_period'], stretch['shut_period'],
facecolors='none',
edgecolors=plt.cm.spectral(cmap[index]),
s=1, label = str(index + 1))
#ax.scatter(stretch['mean_open'], stretch['mean_shut'],
# color=plt.cm.spectral(cmap[index]),
# s=50, label = str(index + 1))
ax.legend()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylim([0.3, np.exp(7)])
ax.set_xlim([0.3, np.exp(5)])
ax.set_xlabel('Open period (ms in log scale)')
ax.set_ylabel('Shut period (ms in log scale)')
ax.set_title('Open/Shut')
if savefig:
fig.savefig(os.path.join(self.filepath,self.name+'Open_Shut.png'),dpi=150)
示例2: sample
def sample(scores, temperature=1.0):
"""
Sampling words (each sample is drawn from a categorical distribution).
In:
scores - array of size #samples x #classes;
every entry determines a score for sample i having class j
temperature - temperature for the predictions;
the higher the flatter probabilities and hence more random answers
Out:
set of indices chosen as output, a vector of size #samples
"""
logscores = np.log(scores) / temperature
# numerically stable version
normalized_logscores= logscores - np.max(logscores, axis=-1)[:, np.newaxis]
margin_logscores = np.sum(np.exp(normalized_logscores),axis=-1)
probs = np.exp(normalized_logscores) / margin_logscores[:,np.newaxis]
draws = np.zeros_like(probs)
num_samples = probs.shape[0]
# we use 1 trial to mimic categorical distributions using multinomial
for k in xrange(num_samples):
draws[k,:] = np.random.multinomial(1,probs[k,:],1)
return np.argmax(draws, axis=-1)
示例3: mean_quadratic_weighted_kappa
def mean_quadratic_weighted_kappa(kappas, weights=None):
"""
Calculates the mean of the quadratic
weighted kappas after applying Fisher's r-to-z transform, which is
approximately a variance-stabilizing transformation. This
transformation is undefined if one of the kappas is 1.0, so all kappa
values are capped in the range (-0.999, 0.999). The reverse
transformation is then applied before returning the result.
mean_quadratic_weighted_kappa(kappas), where kappas is a vector of
kappa values
mean_quadratic_weighted_kappa(kappas, weights), where weights is a vector
of weights that is the same size as kappas. Weights are applied in the
z-space
"""
kappas = np.array(kappas, dtype=float)
if weights is None:
weights = np.ones(np.shape(kappas))
else:
weights = weights / np.mean(weights)
# ensure that kappas are in the range [-.999, .999]
kappas = np.array([min(x, .999) for x in kappas])
kappas = np.array([max(x, -.999) for x in kappas])
z = 0.5 * np.log((1 + kappas) / (1 - kappas)) * weights
z = np.mean(z)
return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
示例4: solveParams
def solveParams(self):
# Given the specified values for w, z1 and z2, determine the offsets
# xc and zc required to match a catenary to our cable. This is done
# algerbraically. The deriviation of the equation was performed with
# the sympy package.
w = self.w
a = self.a
zd = self.z2 - self.z1
# calculate some repeated elements
e2wa = np.exp(2 * w / a)
ewa = np.exp(w / a)
a2 = a ** 2
# calculate the 3 components
c1 = (a2 * e2wa - 2 * a2 * ewa + a2 + zd ** 2 * ewa) * ewa
c2 = (-2 * a * e2wa + 2 * a * ewa)
c3 = zd / (a * (ewa - 1))
# Determine the x offset ...
self.xc = a * np.log(2 * np.abs(np.sqrt(c1) / c2) + c3)
# ... and from this the y offset
self.zc = self.z1 - a * np.cosh(self.xc / a)
示例5: __set_static_gaus_pmfs
def __set_static_gaus_pmfs(self):
if np.logical_not(self.off_buff.is_full()):
print "The long term buffer is not yet full. This may give undesirable results"
# median RSS of off-state buffer
cal_med = self.off_buff.get_no_nan_median()
if (np.sum(cal_med == 127) > 0) | (np.sum(np.isnan(cal_med)) > 0):
sys.stderr.write('At least one link has a median of 127 or is nan\n\n')
quit()
if (np.sum(np.isnan(self.off_buff.get_nanvar())) > 0):
sys.stderr.write('the long term buffer has a nan')
quit()
cal_med_mat = np.tile(cal_med,(self.V_mat.shape[1],1)).T
# variance of RSS during calibration
cal_var = np.maximum(self.off_buff.get_nanvar(),self.omega) #3.0
cal_var_mat = np.tile(cal_var,(self.V_mat.shape[1],1)).T
# Compute the off_link emission probabilities for each link
x = np.exp(- (self.V_mat - cal_med_mat)**2/(2*cal_var_mat/1.0)) # 1.0
self.off_links = self.__normalize_pmf(x)
# Compute the on_link emission probabilities for each link
x = np.exp(- (self.V_mat - (cal_med_mat-self.Delta))**2/(self.eta*2*cal_var_mat)) # 3
self.on_links = self.__normalize_pmf(x)
示例6: testCustomGradient
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
示例7: CRRbinomial
def CRRbinomial(S, K, T, rf, sigma, n):
'''
Option pricing using binomial tree, no dividend
:param S: underlying current prince
:param K: option strke price
:param T: expire date
:param rf: risk-free rate
:param sigma: volatility
:param n: number of periods to T
:return:
'''
dt = float(T)/n
u = np.exp(sigma * (dt**0.5))
d= 1./u
p = (np.exp(rf*dt)-d)/(u-d)
euroCall, euroPut = 0, 0
for idx in xrange(0, n+1):
prob = spmisc.comb(n, idx)* (p**idx) * (1-p)**(n-idx)
euroCall += prob*max(S*(u**idx)*(d**(n-idx))-K, 0)
euroPut += prob*max(K-S*(u**idx)*(d**(n-idx)), 0)
euroCall *= np.exp(-rf*T)
euroPut *= np.exp(-rf*T)
return euroCall, euroPut
示例8: update
def update(self,proposal,logp,bad,i):
logps = self.logps[i-1]
thresh = self.thresh
if logp>logps:
self.logps[i] = logp
self.trace[i] = proposal
self.dets.append([d.value for d in self.deterministics])
self.nbad = 0
self.stuck = False
self.temp = 1.
return
self.nbad += 1
if self.nbad>self.thresh and self.stuck==False:
self.stuck = True
self.logpTmp = logps
if self.stuck==True:
r = log(rand())*self.temp
print 'stuck',i,logps,self.logpTmp,logp,r,logp-self.logpTmp
if logp-self.logpTmp>r:
self.logpTmp = logp
self.temp /= numpy.exp(1./thresh)
else:
self.temp *= numpy.exp(1./thresh)
self.logps[i] = self.logps[i-1]
self.trace[i] = self.trace[i-1].copy()
self.dets.append(self.dets[-1])
示例9: logLikelihood
def logLikelihood(self, obs_seq, num_cluster, error_rate_dict, switch_rate_dict):
"""
:param obs_seq: state list of the same length as observation sequence
:param num_cluster: int
:param error_rate_dict: dict
:param switch_rate_dict: dict
:return: log likelihood
"""
pi = [1/num_cluster] * num_cluster
path_length = len(obs_seq)
alpha_dict = dict()
alpha_dict[0] = dict(zip(range(num_cluster), [np.log(pi[i]) + np.log(error_rate_dict[i][obs_seq[0]])
for i in range(num_cluster)]))
for t in range(path_length-1):
alpha_dict[t+1] = dict()
for j in range(num_cluster):
log_alpha_j = [(alpha_dict[t][i] + np.log(switch_rate_dict[i][j])) for i in range(num_cluster)]
max_log_alpha_j = max(log_alpha_j)
sum_residual = np.sum(np.exp(log_alpha_j - max_log_alpha_j))
final = max_log_alpha_j + np.log(sum_residual)
alpha_dict[t+1][j] = final + np.log(error_rate_dict[j][obs_seq[t + 1]])
alpha_df = pd.DataFrame.from_dict(alpha_dict)
max_final = max(alpha_df.ix[:, path_length-1])
llk = max_final + np.log(np.sum(np.exp(alpha_df.ix[:, path_length-1] - max_final)))
return llk
示例10: test_solve_poisson_becke_sa
def test_solve_poisson_becke_sa():
sigma = 8.0
rtf = ExpRTransform(1e-4, 1e2, 500)
r = rtf.get_radii()
rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
rho = CubicSpline(rhoy, rhod, rtf)
v = solve_poisson_becke([rho])[0]
s2s = np.sqrt(2)*sigma
soly = erf(r/s2s)/r
sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
if False:
import matplotlib.pyplot as pt
n = 10
pt.clf()
pt.plot(r[:n], soly[:n], label='exact')
pt.plot(r[:n], v.y[:n], label='spline')
pt.legend(loc=0)
pt.savefig('denu.png')
assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
# Test the boundary condition at zero and infinity
assert v.extrapolation.l == 0
np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
示例11: __init__
def __init__(self, ps=None, sigma_v=0.0, redshift=0.0, **kwargs):
if ps == None:
from os.path import join, dirname
#psfile = join(dirname(__file__),"data/ps_z1.5.dat")
#psfile = join(dirname(__file__),"data/wigglez_halofit_z1.5.dat")
psfile = join(dirname(__file__),"data/wigglez_halofit_z0.8.dat")
print "loading matter power file: " + psfile
redshift = 0.8
#pk_interp = cs.LogInterpolater.fromfile(psfile)
pwrspec_data = np.genfromtxt(psfile)
(log_k, log_pk) = (np.log(pwrspec_data[:,0]), \
np.log(pwrspec_data[:,1]))
logpk_interp = interpolate.interp1d(log_k, log_pk,
bounds_error=False,
fill_value=np.min(log_pk))
pk_interp = lambda k: np.exp(logpk_interp(np.log(k)))
kstar = 7.0
ps = lambda k: np.exp(-0.5 * k**2 / kstar**2) * pk_interp(k)
self._sigma_v = sigma_v
RedshiftCorrelation.__init__(self, ps_vv=ps, redshift=redshift)
示例12: _compute_influence_kernel
def _compute_influence_kernel(self, iter, dqd):
"""Compute the neighborhood kernel for some iteration.
Parameters
----------
iter : int
The iteration for which to compute the kernel.
dqd : array (nrows x ncolumns)
This is one quadrant of Euclidean distances between Kohonen unit
locations.
"""
# compute radius decay for this iteration
curr_max_radius = self.radius * np.exp(-1.0 * iter / self.iter_scale)
# same for learning rate
curr_lrate = self.lrate * np.exp(-1.0 * iter / self.iter_scale)
# compute Gaussian influence kernel
infl = np.exp((-1.0 * dqd) / (2 * curr_max_radius * iter))
infl *= curr_lrate
# hard-limit kernel to max radius
# XXX is this really necessary?
infl[dqd > curr_max_radius] = 0.
return infl
示例13: firwin_complex_bandpass
def firwin_complex_bandpass(num_taps, cutoffs, window='hamming'):
width, center = max(cutoffs) - min(cutoffs), (cutoffs[0] + cutoffs[1]) / 2
b = scipy.signal.firwin(num_taps, width / 2, window='rectangular', scale=False)
b = b * numpy.exp(1j * numpy.pi * center * numpy.arange(len(b)))
b = b * scipy.signal.get_window(window, num_taps, False)
b = b / numpy.sum(b * numpy.exp(-1j * numpy.pi * center * (numpy.arange(num_taps) - (num_taps - 1) / 2)))
return b.astype(numpy.complex64)
示例14: convolve_template
def convolve_template():
from plot_Profile_evolution import all_obs
import numpy as np
date_list, observations = all_obs()
x = np.arange(512 - 25 - 50, 512 + 45 + 51)
delay = []
for n in observations:
best_mu = 0
best_corr = 0
for mu in np.linspace(512 - 25, 512 + 45, 701):
template = np.exp(-(x - 512) ** 2 / (2.0 * 6.2 ** 2)) + 0.09 * np.exp(-(x - mu) ** 2 / (2.0 * 8.0 ** 2))
template /= template.max()
corr = np.correlate(n, template, mode="valid").max()
if corr > best_corr:
best_corr = corr
best_mu = mu
delay.append(best_mu - 512)
plt.plot(date_list, delay, "ko")
plt.show()
示例15: all_GL
def all_GL(self, q, maxpiv=None):
"""return (piv, f_binodal_gas, f_binodal_liquid, f_spinodal_gas, f_spinodal_liquid) at insersion works piv sampled between the critical point and maxpiv (default to 2.2*critical pressure)"""
fc, pivc = self.critical_point(q)
Fc = np.log(fc)
#start sensibly above the critical point
startp = pivc*1.1
fm = fminbound(self.mu, fc, self.maxf(), args=(startp, q))
fM = fminbound(lambda f: -self.pv(f, startp, q), 0, fc)
initial_guess = np.log([0.5*fM, 0.5*(fm+self.maxf())])
#construct the top of the GL binodal
if maxpiv is None:
maxpiv = startp*2
topp = 1./np.linspace(1./startp, 1./maxpiv)
topGL = [initial_guess]
for piv in topp:
topGL.append(self.binodalGL(piv, q, topGL[-1]))
#construct the GL binodal between the starting piv and the critical point
botp = np.linspace(startp, pivc)[:-1]
botGL = [initial_guess]
for piv in botp:
botGL.append(self.binodalGL(piv, q, botGL[-1]))
#join the two results and convert back from log
binodal = np.vstack((
[[pivc, fc, fc]],
np.column_stack((botp, np.exp(botGL[1:])))[::-1],
np.column_stack((topp, np.exp(topGL[1:])))[1:]
))
#spinodal at the same pivs
spinodal = self.spinodalGL(q, binodal[:,0])
#join everything
return np.column_stack((binodal, spinodal[:,1:]))