本文整理汇总了Python中numpy.logaddexp函数的典型用法代码示例。如果您正苦于以下问题:Python logaddexp函数的具体用法?Python logaddexp怎么用?Python logaddexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logaddexp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: messages_backwards
def messages_backwards(self):
'approximates duration tails at indices > trunc with geometric tails'
aDl, aDsl, Al = self.aDl, self.aDsl, np.log(self.trans_matrix)
trunc = self.trunc if self.trunc is not None else self.T
T,state_dim = aDl.shape
assert trunc > 1
aBl = self.aBl/self.temp if self.temp is not None else self.aBl
hmm_betal = HMMStatesEigen._messages_backwards(self._get_hmm_transition_matrix(),aBl)
assert not np.isnan(hmm_betal).any()
betal = np.zeros((T,state_dim),dtype=np.float64)
betastarl = np.zeros_like(betal)
for t in xrange(T-1,-1,-1):
np.logaddexp.reduce(betal[t:t+trunc] + self.cumulative_likelihoods(t,t+trunc)
+ aDl[:min(trunc,T-t)],axis=0, out=betastarl[t])
if t+trunc < T:
np.logaddexp(betastarl[t], self.likelihood_block(t,t+trunc+1) + aDsl[trunc -1]
+ hmm_betal[t+trunc], out=betastarl[t])
if T-t < trunc and self.right_censoring:
np.logaddexp(betastarl[t], self.likelihood_block(t,None) + aDsl[T-t -1], betastarl[t])
np.logaddexp.reduce(betastarl[t] + Al,axis=1,out=betal[t-1])
betal[-1] = 0.
return betal, betastarl
示例2: calc_log_proba_mod
def calc_log_proba_mod(peptide, domain, sequence):
"""
Function which computes the log of the updated probability.
For numerical stability, the sum of the logs is computed as
log(exp(logA)+exp(logB)) which is just log(A+B)
"""
ix = PDZ_Data.domain_names.index(domain.name)
alpha = PDZ_Data.fp_interaction_matrix[peptide.name][ix]
score = eval_score(domain, sequence,0)
z_1 = log_modified(score)
z_2 = log_modified(-1.0*score)
if alpha > 0:
a = peptide.posterior_matrix[1,1]
x = np.log(a) -z_1
b = peptide.posterior_matrix[1,0]
y = np.log(b) - z_2
result = np.logaddexp(x,y)
else:
a = peptide.posterior_matrix[0,1]
x = np.log(a) - z_1
b = peptide.posterior_matrix[0,0]
y = np.log(b) - z_2
result = np.logaddexp(x,y)
return result*-1.0
示例3: compute_score_vect
def compute_score_vect(self, bin_n, mu_vect, sigmasq_vect, pi_vect):
bin_edges = np.arange(bin_n + 1, dtype=np.float32)/ (bin_n+1)
bin_centers = bin_edges[:-1] + 1./bin_n
score_bins = np.zeros(bin_n)
# compute the prob for each bin
K = len(pi_vect)
dp_per_comp_scores = np.zeros((bin_n, K), dtype=np.float32)
for k in range(K):
mu = mu_vect[k]
sigmasq = sigmasq_vect[k]
pi = pi_vect[k]
dp_per_comp_scores[:, k] = irm.util.log_norm_dens(bin_centers, mu, sigmasq)
dp_per_comp_scores[:, k] += np.log(pi)
scores = dp_per_comp_scores[:, 0]
for k in range(1, K):
scores = np.logaddexp(scores, dp_per_comp_scores[:, k])
# normalize
score_total = scores[0]
for i in range(1, bin_n):
score_total = np.logaddexp(score_total, scores[i])
scores -= score_total
return scores
示例4: softmax_loss2
def softmax_loss2(props, lbls, mask=None):
grdts = dict()
err = 0
for name, prop in props.iteritems():
# make sure that it is the output of binary class
assert(prop.shape[0]==2)
print "original prop: ", prop
# rebase the prop for numerical stability
# mathimatically, this do not affect the softmax result!
# http://ufldl.stanford.edu/tutorial/supervised/SoftmaxRegression/
# prop = prop - np.max(prop)
propmax = np.max(prop, axis=0)
prop[0,:,:,:] -= propmax
prop[1,:,:,:] -= propmax
log_softmax = np.empty(prop.shape, dtype=prop.dtype)
log_softmax[0,:,:,:] = prop[0,:,:,:] - np.logaddexp( prop[0,:,:,:], prop[1,:,:,:] )
log_softmax[1,:,:,:] = prop[1,:,:,:] - np.logaddexp( prop[0,:,:,:], prop[1,:,:,:] )
prop = np.exp(log_softmax)
props[name] = prop
lbl = lbls[name]
grdts[name] = prop - lbl
err = err + np.sum( -lbl * log_softmax )
print "gradient: ", grdts[name]
assert(not np.any(np.isnan(grdts[name])))
return (props, err, grdts)
示例5: forward_backward
def forward_backward(node_potentials,edge_potentials):
H,N = node_potentials.shape
forward = -1000.0 * np.ones([H,N],dtype=float)
backward = -1000.0 * np.ones([H,N],dtype=float)
forward[:,0] = np.log(node_potentials[:,0])
## Forward loop
for pos in xrange(1,N):
for current_state in xrange(H):
for prev_state in xrange(H):
forward_v = forward[prev_state,pos-1]
trans_v = np.log(edge_potentials[prev_state,current_state,pos-1])
logprob = forward_v + trans_v
forward[current_state,pos] = np.logaddexp(forward[current_state,pos], logprob)
forward[current_state,pos] += np.log(node_potentials[current_state,pos])
## Backward loop
backward[:,N-1] = 0.0 # log(1) = 0
for pos in xrange(N-2,-1,-1):
for current_state in xrange(H):
logprob = -1000.0
for next_state in xrange(H):
back = backward[next_state,pos+1]
trans = np.log(edge_potentials[current_state,next_state,pos]);
observation = np.log(node_potentials[next_state,pos+1]);
logprob = np.logaddexp(logprob, trans + observation + back);
backward[current_state,pos] = logprob
#sanity_check_forward_backward(forward,backward)
#print forward, backward
return np.exp(forward),np.exp(backward)
示例6: hsmm_messages_backwards_log
def hsmm_messages_backwards_log(
trans_potentials, initial_state_potential,
cumulative_obs_potentials, dur_potentials, dur_survival_potentials,
betal, betastarl,
left_censoring=False, right_censoring=True):
errs = np.seterr(invalid='ignore') # logaddexp(-inf,-inf)
T, _ = betal.shape
betal[-1] = 0.
for t in xrange(T-1,-1,-1):
cB, offset = cumulative_obs_potentials(t)
dp = dur_potentials(t)
np.logaddexp.reduce(betal[t:t+cB.shape[0]] + cB + dur_potentials(t),
axis=0, out=betastarl[t])
betastarl[t] -= offset
if right_censoring:
np.logaddexp(betastarl[t], cB[-1] - offset + dur_survival_potentials(t),
out=betastarl[t])
np.logaddexp.reduce(betastarl[t] + trans_potentials(t-1),
axis=1, out=betal[t-1])
betal[-1] = 0. # overwritten on last iteration
if not left_censoring:
normalizer = np.logaddexp.reduce(initial_state_potential + betastarl[0])
else:
raise NotImplementedError
np.seterr(**errs)
return betal, betastarl, normalizer
示例7: ctc_loss
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
label_ = [0, 0]
prob[prob < 1 / big_num] = 1 / big_num
log_prob = np.log(prob)
l = len(label)
for i in range(l):
label_.append(int(label[i]))
label_.append(0)
l_ = 2 * l + 1
a = np.full((seq_length, l_ + 1), -big_num)
a[0][1] = log_prob[remainder][0]
a[0][2] = log_prob[remainder][label_[2]]
for i in range(1, seq_length):
row = i * int(batch_size / num_gpu) + remainder
a[i][1] = a[i - 1][1] + log_prob[row][0]
a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
for j in range(3, l_ + 1):
a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
if label_[j] != 0 and label_[j] != label_[j - 2]:
a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
a[i][j] += log_prob[row][label_[j]]
return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])
示例8: compute_weights
def compute_weights(data, Nlive):
"""Returns log_ev, log_wts for the log-likelihood samples in data,
assumed to be a result of nested sampling with Nlive live points."""
start_data=data[:-Nlive]
end_data=data[-Nlive:]
log_wts=zeros(data.shape[0])
log_vol_factor=log1p(-1.0/Nlive)
log_dvol = -1.0/Nlive
log_vol = 0.0
log_ev = -float('inf')
for i,log_like in enumerate(start_data):
# Volume associated with this likelihood = Vol/Nlive:
log_this_vol=log_vol+log_dvol
log_wts[i] = log_like+log_this_vol
log_ev = logaddexp(log_ev, log_wts[i])
log_vol += log_vol_factor
avg_log_like_end = -float('inf')
for i,log_l in enumerate(end_data):
avg_log_like_end = logaddexp(avg_log_like_end, log_l)
avg_log_like_end-=log(Nlive)
# Each remaining live point contributes (Vol/Nlive)*like to
# integral, but have posterior weights Vol relative to the other samples
log_wts[-Nlive:] = log_vol+end_data
log_ev = logaddexp(log_ev, avg_log_like_end + log_vol)
log_wts -= log_ev
return log_ev, log_wts
示例9: test_transition_probabilities
def test_transition_probabilities(hm):
alpha = hm.forward()
beta = hm.backward()
gamma = hm.state_probs(alpha, beta)
xi = hm.bw(alpha, beta)
trans = hm.transition_probabilities(xi, gamma)
iter_trans = []
for i in range(len(hm.hidden_states)):
row = []
for j in range(len(hm.hidden_states)):
num = np.NINF
den = np.NINF
for seq in range(len(hm.observations)):
num_seq = xi[seq][0][i][j]
den_seq = gamma[seq][0][i]
for o in range(1, len(hm.observations[seq]) - 1):
# xi is probability of probability
# of being at state i at time t and
# state j at time t+1
num_seq = np.logaddexp(num_seq, xi[seq][o][i][j])
# gamma is probability of being in
# state i at time t
den_seq = np.logaddexp(den_seq, gamma[seq][o][i])
# add the current sequence contribution to total
num = np.logaddexp(num, num_seq)
den = np.logaddexp(den, den_seq)
row.append(np.exp(num - den))
iter_trans.append(row)
assert iter_trans == approx(trans)
示例10: equilibrium_concentrations
def equilibrium_concentrations(cls, DeltaG, Ptot, Ltot):
"""
Compute equilibrium concentrations for simple two-component association.
Parameters
----------
DeltaG : float
Reduced free energy of binding (in units of kT)
Ptot : float or numpy array
Total protein concentration summed over bound and unbound species, molarity.
Ltot : float or numpy array
Total ligand concentration summed over bound and unbound speciesl, molarity.
Returns
-------
P : float or numpy array with same dimensions as Ptot
Free protein concentration, molarity.
L : float or numpy array with same dimensions as Ptot
Free ligand concentration, molarity.
PL : float or numpy array with same dimensions as Ptot
Bound complex concentration, molarity.
"""
# Original form:
#Kd = np.exp(DeltaG)
#sqrt_arg = (Ptot + Ltot + Kd)**2 - 4*Ptot*Ltot
#sqrt_arg[sqrt_arg < 0.0] = 0.0
#PL = 0.5 * ((Ptot + Ltot + Kd) - np.sqrt(sqrt_arg)); # complex concentration (M)
# Numerically stable variant?
logP = np.log(Ptot)
logL = np.log(Ltot)
logPLK = np.logaddexp(np.logaddexp(logP, logL), DeltaG)
PLK = np.exp(logPLK);
sqrt_arg = 1.0 - np.exp(np.log(4.0) + logP + logL - 2*logPLK);
sqrt_arg[sqrt_arg < 0.0] = 0.0 # ensure always positive
PL = 0.5 * PLK * (1.0 - np.sqrt(sqrt_arg)); # complex concentration (M)
# Another variant
#PL = 2*Ptot*Ltot / ((Ptot+Ltot+Kd) + np.sqrt((Ptot + Ltot + Kd)**2 - 4*Ptot*Ltot)); # complex concentration (M)
# Yet another numerically stable variant?
#logPLK = np.logaddexp(np.log(Ptot + Ltot), DeltaG);
#PLK = np.exp(logPLK);
#xy = np.exp(np.log(Ptot) + np.log(Ltot) - 2.0*logPLK);
#chi = 1.0 - 4.0 * xy;
#chi[chi < 0.0] = 0.0 # prevent square roots of negative numbers
#PL = 0.5 * PLK * (1 - np.sqrt(chi))
# Ensure all concentrations are within limits, correcting cases where numerical issues cause problems.
PL[PL < 0.0] = 0.0 # complex cannot have negative concentration
#PL_max = np.minimum(Ptot, Ltot)
#indices = np.where(PL > PL_max)
#PL[indices] = PL_max[indices]
# Compute remaining concentrations.
P = Ptot - PL; # free protein concentration in sample cell after n injections (M)
L = Ltot - PL; # free ligand concentration in sample cell after n injections (M)
return [P, L, PL]
示例11: log_proba
def log_proba(self, kde, value, period=None):
if period is None:
return kde.score([[value]])
else:
values = kde.score_samples([[value], [value+period], [value-period]])
total = np.logaddexp(values[0], values[1])
total = np.logaddexp(total, values[2])
return total
示例12: compute_visible_llr
def compute_visible_llr(self, data):
background = data.frames.background
ball = data.frames.ball
ball.present_ll_c[...] = numpy.logaddexp(ball.color_analysis.ll, self.occlusion_analyzer.occluded_lpr)
ball.absent_ll_c[...] = data.background.q_estimation * numpy.logaddexp(background.color_analysis.ll, self.occlusion_analyzer.occluded_lpr)
ball.present_llr[...] = numpy.logaddexp(ball.present_ll_c - ball.absent_ll_c + self.visible_lpr, ball.absent_ll_c)
示例13: pixel_space_information_gain
def pixel_space_information_gain(self, baseline, gold_standard, stimulus, eps=1e-20):
log_p_gold = gold_standard.log_density(stimulus)
log_p_baseline = baseline.log_density(stimulus)
log_p_model = self.log_density(stimulus)
p_gold = np.exp(log_p_gold)
p_gold[p_gold == 0] = p_gold[p_gold > 0].min()
ig = (p_gold)*(np.logaddexp(log_p_model, np.log(eps))-np.logaddexp(log_p_baseline, np.log(eps)))
return ig
示例14: compute_sig_PPsi
def compute_sig_PPsi ( self ,layers , y_possible , mode = "None" ):
#print self.Prob[layers]
update_Psi_o = np.zeros(self.w_o.shape)
update_Psi_t = np.zeros(self.w_t.shape)
update_Psi = np.zeros(self.w.shape)
CRF_Prob = 0
if mode == "Start" :
log_current_Prob = ( np.dot(self.w_o[y_possible] , self.x[layers]/(self.SEQ_LENGTH) ) + self.w_t[self.y_class][y_possible]/self.SEQ_LENGTH )
update_Psi_o[y_possible] = (log_current_Prob) + np.log(self.x[layers])- np.log(self.SEQ_LENGTH)
update_Psi_t[self.y_class][y_possible] = (log_current_Prob) - np.log(self.SEQ_LENGTH)
CRF_Prob = log_current_Prob
elif mode == "End" :
log_current_Prob = ( self.w_t.T[self.y_class][:-1] )
update_Psi = log_current_Prob + self.CRF_Psi[layers][:]
update_Psi_t.T[self.y_class] = (log_current_Prob) - (self.SEQ_LENGTH)
CRF_Prob = log_current_Prob + (self.CRF_Prob[layers][:])
else:
log_current_Prob = ( np.dot(self.w_o[y_possible] , self.x[layers]/(self.SEQ_LENGTH) ) + self.w_t.T[y_possible][:-1])
update_Psi = log_current_Prob + self.CRF_Psi[layers][:]
update_Psi_t.T[y_possible][:] = (log_current_Prob) - np.log(self.SEQ_LENGTH)
update_Psi_o[y_possible] = log_current_Prob + np.log(self.x[layers]) - np.log(self.SEQ_LENGTH)
CRF_Prob = log_current_Prob + self.CRF_Prob[layers][:]
'''
for y_last in range(0,self.y_class):
if mode == "Start" :
log_current_Prob = ( np.dot(self.w_o[y_possible] , self.x[layers]/(self.SEQ_LENGTH) ) + self.w_t[self.y_class][y_possible]/self.SEQ_LENGTH )
update_Psi_o[y_possible] = (log_current_Prob) + np.log(self.x[layers])- np.log(self.SEQ_LENGTH)
update_Psi_t[self.y_class][y_possible] = (log_current_Prob) - np.log(self.SEQ_LENGTH)
CRF_Prob = log_current_Prob
elif mode == "End" :
log_current_Prob = ( self.w_t[y_last][self.y_class]/self.SEQ_LENGTH )
update_Psi = np.logaddexp ( update_Psi , log_current_Prob + self.CRF_Psi[layers][y_last] )
update_Psi_t[y_last][y_possible] = np.logaddexp( update_Psi_t[y_last][y_possible] , (log_current_Prob) - (self.SEQ_LENGTH) )
CRF_Prob = np.logaddexp( CRF_Prob , log_current_Prob + (self.CRF_Prob[layers][y_last]) )
else:
log_current_Prob = ( np.dot(self.w_o[y_possible] , self.x[layers]/(self.SEQ_LENGTH) ) + self.w_t[y_last][y_possible]/self.SEQ_LENGTH )
update_Psi = np.logaddexp ( update_Psi , log_current_Prob + self.CRF_Psi[layers][y_last])
update_Psi_t[y_last][y_possible] = np.logaddexp ( update_Psi_t[y_last][y_possible] , (log_current_Prob) - np.log(self.SEQ_LENGTH) )
update_Psi_o[y_possible] = np.logaddexp ( update_Psi_o[y_possible] , log_current_Prob + np.log(self.x[layers]) - np.log(self.SEQ_LENGTH) )
CRF_Prob = np.logaddexp( CRF_Prob , log_current_Prob + self.CRF_Prob[layers][y_last] )
'''
update_Psi = np.hstack( ( np.hstack(update_Psi_o) , np.hstack(update_Psi_t) ) )
if mode != "Start":
a = update_Psi[0]
b = CRF_Prob[0]
for idx in range(1 , self.y_class):
a = np.logaddexp( a , update_Psi[idx])
b = np.logaddexp( b , CRD_Prob[idx])
else:
a = update_Psi
b = CRF_Prob
self.CRF_Psi[layers+1][y_possible] = a #update_Psi
#print "update_Psi = " , update_Psi
self.CRF_Prob[layers+1][y_possible] = b #CRF_Prob
示例15: loglik
def loglik(self, a):
if not np.issubdtype(a.dtype, int):
raise RuntimeError('a must be an integer array')
if not np.all((a==0) + (a==1)):
raise RuntimeError('a must be a binary array')
log_p = -np.logaddexp(0., -self.odds)
log_1_minus_p = -np.logaddexp(0., self.odds)
return a * log_p + (1-a) * log_1_minus_p