本文整理汇总了Python中math.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了exp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: logadd
def logadd(x,y):
""" A helper function for log addition """
from math import log,exp
if x>y:
return x+log(1.+exp(y-x))
else:
return y+log(1.+exp(x-y))
示例2: standardMC_european_option
def standardMC_european_option(K, T, R, V, S0, N, option_type, path_num=10000):
dt = T / N
sigma = V
drift = math.exp((R - 0.5 * sigma * sigma) * dt)
sigma_sqrt = sigma * math.sqrt(dt)
exp_RT = math.exp(-R * T)
european_payoff = []
for i in xrange(path_num):
former = S0
for j in xrange(int(N)):
former = former * drift * math.exp(sigma_sqrt * numpy.random.normal(0, 1))
european_option = former
if option_type == 1.0:
european_payoff_call = exp_RT * max(european_option - K, 0)
european_payoff.append(european_payoff_call)
elif option_type == 2.0:
european_payoff_put = exp_RT * max(K - european_option, 0)
european_payoff.append(european_payoff_put)
# Standard Monte Carlo
p_mean = numpy.mean(european_payoff)
p_std = numpy.std(european_payoff)
p_confmc = (p_mean - 1.96 * p_std / math.sqrt(path_num), p_mean + 1.96 * p_std / math.sqrt(path_num))
return p_mean, p_std, p_confmc
示例3: optimize_hyperparameters
def optimize_hyperparameters(self, samples=5, step=3.0):
old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
for ii in xrange(samples):
log_likelihood_old = self.compute_likelihood(self._alpha_alpha, self._alpha_beta)
log_likelihood_new = math.log(random.random()) + log_likelihood_old
#print("OLD: %f\tNEW: %f at (%f, %f)" % (log_likelihood_old, log_likelihood_new, self._alpha_alpha, self._alpha_beta))
l = [x - random.random() * step for x in old_hyper_parameters]
r = [x + step for x in old_hyper_parameters]
for jj in xrange(self._alpha_maximum_iteration):
new_hyper_parameters = [l[x] + random.random() * (r[x] - l[x]) for x in xrange(len(old_hyper_parameters))]
trial_alpha, trial_beta = [math.exp(x) for x in new_hyper_parameters]
lp_test = self.compute_likelihood(trial_alpha, trial_beta)
if lp_test > log_likelihood_new:
self._alpha_alpha = math.exp(new_hyper_parameters[0])
self._alpha_beta = math.exp(new_hyper_parameters[1])
#self._alpha_sum = self._alpha_alpha * self._K
#self._beta_sum = self._alpha_beta * self._number_of_language_types
old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
break
else:
for dd in xrange(len(new_hyper_parameters)):
if new_hyper_parameters[dd] < old_hyper_parameters[dd]:
l[dd] = new_hyper_parameters[dd]
else:
r[dd] = new_hyper_parameters[dd]
assert l[dd] <= old_hyper_parameters[dd]
assert r[dd] >= old_hyper_parameters[dd]
print("\nNew hyperparameters (%i): %f %f" % (jj, self._alpha_alpha, self._alpha_beta))
示例4: _guinier_porod
def _guinier_porod(self, x):
"""
Guinier-Porod Model
"""
# parameters
G = self.params['scale']
s = self.params['dim']
Rg = self.params['rg']
m = self.params['m']
bgd = self.params['background']
n = 3.0 - s
qval = x
# take care of the singular points
if Rg <= 0.0:
return bgd
if (n-3.0+m) <= 0.0:
return bgd
#do the calculation and return the function value
q1 = sqrt((n-3.0+m)*n/2.0)/Rg
if qval < q1:
F = (G/pow(qval,(3.0-n)))*exp((-qval*qval*Rg*Rg)/n)
else:
F = (G/pow(qval, m))*exp(-(n-3.0+m)/2.0)*pow(((n-3.0+m)*n/2.0),
((n-3.0+m)/2.0))/pow(Rg,(n-3.0+m))
inten = F + bgd
return inten
示例5: invgammapdf
def invgammapdf(x, alpha, beta):
alpha = float(alpha)
beta = float(beta)
if not np.isscalar(x):
return (beta**alpha / math.gamma(alpha))*np.array([(xi**(-alpha - 1))*math.exp(-beta/xi) for xi in x])
else:
return (beta**alpha / math.gamma(alpha))*(x**(-alpha - 1))*math.exp(-beta/x)
示例6: get_decision_given_context
def get_decision_given_context(theta, type, decision, context):
global cache_normalizing_decision, feature_index, source_to_target_firing, model1_probs, ets
m1_event_prob = model1_probs.get((decision, context), 0.0)
fired_features = get_wa_features_fired(type=type, decision=decision, context=context,
dictionary_features=dictionary_features, ishybrid=True)
theta_dot_features = sum([theta[feature_index[f]] * f_wt for f_wt, f in fired_features])
numerator = m1_event_prob * exp(theta_dot_features)
if (type, context) in cache_normalizing_decision:
denom = cache_normalizing_decision[type, context]
else:
denom = ets[context]
target_firings = source_to_target_firing.get(context, set([]))
for tf in target_firings:
m1_tf_event_prob = model1_probs.get((tf, context), 0.0)
tf_fired_features = get_wa_features_fired(type=type, decision=tf, context=context,
dictionary_features=dictionary_features, ishybrid=True)
tf_theta_dot_features = sum([theta[feature_index[f]] * f_wt for f_wt, f in tf_fired_features])
denom += m1_tf_event_prob * exp(tf_theta_dot_features)
cache_normalizing_decision[type, context] = denom
try:
log_prob = log(numerator) - log(denom)
except ValueError:
print numerator, denom, decision, context, m1_event_prob, theta_dot_features
raise BaseException
return log_prob
示例7: hierarchy_dist
def hierarchy_dist(synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if hypernyms_1.has_key(lcs_candidate):
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if hypernyms_2.has_key(lcs_candidate):
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(beta * h_dist) - math.exp(-beta * h_dist)) /
(math.exp(beta * h_dist) + math.exp(-beta * h_dist)))
示例8: update_spins
def update_spins(spins, i, r):
current_energy = spins[i] * (hs[i] + J * (spins[(i - 1) % N] + spins[(i + 1) % N]))
prop_energy = -current_energy
p_prop = exp(-prop_energy) / (exp(-current_energy) + exp(-prop_energy))
# print "p_prop:",p_prop
if r < p_prop:
spins[i] *= -1
示例9: log_add
def log_add(left, right):
if (right < left):
return left + math.log1p(math.exp(right - left))
elif (right > left):
return right + math.log1p(math.exp(left - right))
else:
return left + M_LN2
示例10: B
def B(x):
y=None
if math.sin(x/(x**2+2))+math.exp(math.log1p(x)+1)==0 or x==0:
y='Neopredelen'
else:
y=(1/(math.sin(x/(x**2+2))+math.exp(math.log1p(x)+1)))-1
return y
示例11: f_active
def f_active(self, x):
if self.use_sigmod:
# range [0, 1]
return 1.0 / (math.exp(-x * self.shim) + 1.0)
else:
# range [-1, 1]
return 1.0 - 2 / (math.exp(2*x * self.shim) + 1)
示例12: trainBoost
def trainBoost(X, labels,T=5,covdiag=True):
N = len(X)
C = len(set(labels))
d = len(X[0])
priors = np.zeros(shape=(T, C))
mus = np.zeros(shape=(T, C, d))
sigmas = np.zeros(shape=(T, d, d, C))
alphas = np.zeros(T)
W = np.ones(N) / N
for t in range(T-1):
mus[t], sigmas[t] = mlParams(X, labels, W)
priors[t] = computePrior(labels, W)
delta = computeDelta(X, priors[t], mus[t], sigmas[t], labels, covdiag)
error = sum([W[i]*(1-delta[i]) for i in range(N)])
if error == 0:
error = 1e-6 # Prevent log(0)
alphas[t] = (np.log(1-error) - np.log(error))/2
W = [W[i] * math.exp(-alphas[t]) if delta[i] else W[i] * math.exp(alphas[t]) for i in range(N)]
W /= sum(W)
t += 1
mus[t], sigmas[t] = mlParams(X, labels, W)
priors[t] = computePrior(labels, W)
delta = computeDelta(X, priors[t], mus[t], sigmas[t], labels, covdiag)
error = sum([W[i]*delta[i] for i in range(N)])
alphas[t] = (np.log(1-error) - np.log(error))/2
return priors,mus,sigmas,alphas
示例13: toKepler
def toKepler(u, which = 'Pueyo', mass = 1, referenceTime = None):
"""
"""
if which == 'Pueyo':
res = np.zeros(6)
res[1] = u[1]
res[5] = u[5]
res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
res[2] = math.degrees(math.acos(u[2]))
res[3] = np.mod((u[3]-u[4])*0.5,360)
res[4] = np.mod((u[3]+u[4])*0.5,360)
return res
elif which == 'alternative':
res = np.zeros(6)
res[1] = u[1]
res[5] = u[5]
res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
res[2] = math.degrees(math.acos(u[2]))
res[3] = u[3]
res[4] = u[4]
return res
elif which == 'Chauvin':
stat = StatisticsMCMC()
res = stat.xFROMu(u,referenceTime,mass)
return res
return None
示例14: QExp
def QExp(self,qid,query,lDoc):
hEntityScore = {} #ObjId -> prf score
for doc in lDoc:
if not doc.DocNo in self.hDocKg:
continue
hDocEntity = self.hDocKg[doc.DocNo]
for ObjId,score in hDocEntity.items():
score += doc.score #log(a) + log(b)
if not ObjId in hEntityScore:
hEntityScore[ObjId] = math.exp(score)
else:
hEntityScore[ObjId] += math.exp(score)
lEntityScore = hEntityScore.items()
lEntityScore.sort(key=lambda item:item[1],reverse = True)
lEntityScore = lEntityScore[:self.NumOfExpEntity]
Z = sum([item[1] for item in lEntityScore])
if Z == 0:
lEntityScore = []
else:
lEntityScore = [[item[0],item[1] / float(Z)] for item in lEntityScore]
logging.info(
'[%s][%s] exp entity: %s',
qid,
query,
json.dumps(lEntityScore)
)
return lEntityScore
示例15: goodTuringCalculations
def goodTuringCalculations(bigTallyInSentence, sentenceNo, vocabulary):
# Initialize probability to 0
sentenceProb = 0
# print(keepCount)
f = open("s" + str(sentenceNo) + "GT.txt", "w")
for key, value in bigTallyInSentence.items():
if 0 == bigramOccurrences[key]:
sentenceProb += math.log(keepCount[1]) - math.log(bigrams.__len__())
f.write(str((key, str((math.log(keepCount[1]) - math.log(bigrams.__len__()))))))
elif bigramOccurrences[key] > 5:
sentenceProb += math.log(value + 1) - math.log(vocabulary[str(key[0])] + vocabulary.__len__())
f.write(
str(
(
key,
str(
math.exp(math.log(value + 1) - math.log(vocabulary[str(key[0])] + vocabulary.__len__()))
),
)
)
)
f.close()
print("The probability of sentence " + str(sentenceNo) + " is: " + str(math.exp(sentenceProb)))
return math.exp(sentenceProb)