本文整理汇总了Python中scipy.misc.logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python logsumexp函数的具体用法?Python logsumexp怎么用?Python logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logsumexp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: messages_backwards2
def messages_backwards2(self):
# this method is just for numerical testing
# returns HSMM messages using HMM embedding. the way of the future!
Al = np.log(self.trans_matrix)
T, num_states = self.T, self.num_states
betal = np.zeros((T,num_states))
betastarl = np.zeros((T,num_states))
starts = cumsum(self.rs,strict=True)
ends = cumsum(self.rs,strict=False)
foo = np.zeros((num_states,ends[-1]))
for idx, row in enumerate(self.bwd_enter_rows):
foo[idx,starts[idx]:ends[idx]] = row
bar = np.zeros_like(self.hmm_bwd_trans_matrix)
for start, end in zip(starts,ends):
bar[start:end,start:end] = self.hmm_bwd_trans_matrix[start:end,start:end]
pmess = np.zeros(ends[-1])
# betal[-1] is 0
for t in range(T-1,-1,-1):
pmess += self.hmm_aBl[t]
betastarl[t] = logsumexp(np.log(foo) + pmess, axis=1)
betal[t-1] = logsumexp(Al + betastarl[t], axis=1)
pmess = logsumexp(np.log(bar) + pmess, axis=1)
pmess[ends-1] = np.logaddexp(pmess[ends-1],betal[t-1] + np.log(1-self.ps))
betal[-1] = 0.
return betal, betastarl
示例2: bayesFactors
def bayesFactors(model1, model2):
"""
Computes the Bayes factor for two competing models.
The computation is based on Newton & Raftery 1994 (Eq. 13).
Parameters
----------
model1 : PyAstronomy.funcFit.TraceAnalysis instance
TraceAnalysis for the first model.
model2 : PyAstronomy.funcFit.TraceAnalysis instance
TraceAnalysis for the second model.
Returns
-------
BF : float
The Bayes factor BF (neither log10(BF) nor ln(BF)).
Note that a small value means that the first model
is favored, i.e., BF=p(M2)/p(M1).
"""
logp1 = model1["deviance"] / (-2.)
logp2 = model2["deviance"] / (-2.)
# Given a number of numbers x1, x2, ... whose logarithms are given by l_i=ln(x_i) etc.,
# logsum calculates: ln(sum_i exp(l_i)) = ln(sum_i x_i)
bf = numpy.exp(sm.logsumexp(-logp1) - numpy.log(len(logp1))
- (sm.logsumexp(-logp2) - numpy.log(len(logp2))))
return bf
示例3: observe
def observe(self, population):
lweights = np.array([s.lweight for s in population])
#print(lweights)
lweights = lweights - logsumexp(lweights) #+ 1000
#print(lweights)
indices = np.array([self.prop2idx[s.prop_obj] for s in population])
for i in range(len(lweights)):
prop_idx = indices[i]
self.num_samp[prop_idx] = self.num_samp[prop_idx] + 1
self.sum[prop_idx] = logsumexp((self.sum[prop_idx], lweights[i]))
self.sqr_sum[prop_idx] = logsumexp((self.sqr_sum[prop_idx], 2*lweights[i]))
lnum_samp = log(self.num_samp)
self.var = exp(logsumexp([self.sum, self.sqr_sum - lnum_samp], 0) - lnum_samp)
#self.var = exp(self.var - logsumexp(self.var))
if self.var.size > 1:
tmp = self.var.sum()
if tmp == 0 or np.isnan(tmp):
prop_prob = np.array([1./self.var.size] * self.var.size)
else:
prop_prob = (self.var.sum() - self.var)
prop_prob = prop_prob/prop_prob.sum()/2 + np.random.dirichlet(1 + self.num_samp)/2
else:
prop_prob = np.array([1./self.var.size] * self.var.size)
self.prop_dist = categorical(prop_prob)
示例4: predictive_log_likelihood
def predictive_log_likelihood(self, X_pred, data_index=0, M=100):
"""
Hacky way of computing the predictive log likelihood
:param X_pred:
:param data_index:
:param M:
:return:
"""
Tpred = X_pred.shape[0]
data = self.data_list[data_index]
conditional_mean = self.emission_distn.conditional_mean(data)
conditional_cov = self.emission_distn.conditional_cov(data, flat=True)
lls = []
z_preds = data["states"].predict_states(
conditional_mean, conditional_cov, Tpred=Tpred, Npred=M)
for m in range(M):
ll_pred = self.emission_distn.log_likelihood(
{"z": z_preds[...,m], "x": X_pred})
lls.append(ll_pred)
# Compute the average
hll = logsumexp(lls) - np.log(M)
# Use bootstrap to compute error bars
samples = np.random.choice(lls, size=(100, M), replace=True)
hll_samples = logsumexp(samples, axis=1) - np.log(M)
std_hll = hll_samples.std()
return hll, std_hll
示例5: _get_predictive_likelihoods
def _get_predictive_likelihoods(k):
future_likelihoods = logsumexp(
np.log(scaled_alphal[:-k].dot(np.linalg.matrix_power(trans_matrix,k))) \
+ cmaxes[:-k,None] + aBl[k:], axis=1)
past_likelihoods = logsumexp(alphal[:-k], axis=1)
return future_likelihoods - past_likelihoods
示例6: compute_forward_messages
def compute_forward_messages(optimizer, preorder_node_lst, gen_per_len, subtree_data_likelihoods):
node_likelihoods = {}
for node in preorder_node_lst:
# Skip root node
if node.parent_node is None:
root_id = node.oid
continue
have_data = False
trans_matrix = numpy.log(optimizer.get_transition_matrix(int(node.edge_length*gen_per_len))).transpose()
if node.parent_node.oid in node_likelihoods:
trans_matrix += node_likelihoods[node.parent_node.oid]
have_data = True
if node.parent_node.oid in subtree_data_likelihoods:
for sibling in node.parent_node.child_nodes():
if sibling.oid == node.oid:
continue
if sibling.oid in subtree_data_likelihoods[node.parent_node.oid]:
have_data = True
trans_matrix += subtree_data_likelihoods[node.parent_node.oid][sibling.oid]
if have_data:
tot_probs = logsumexp(trans_matrix, axis=1)
norm_factor = logsumexp(tot_probs)
log_posteriors = tot_probs - norm_factor
node_likelihoods[node.oid] = log_posteriors
return node_likelihoods
示例7: magic_function
def magic_function(encoding, train_toks, classifier):
V, N, M = encoding.encode(train_toks)
Np = (np.dot(N, np.dot(classifier.weight_n(), M.transpose())))
Vp = (np.dot(V, np.dot(classifier.weight_v(), M.transpose())))
aclist = [(lambda x,y: 'n' if x > y else 'v')(x,y) for (x,y) in zip(np.diag(Np), np.diag(Vp))]
total = 0
correct = 0
Z = np.exp(Np) + np.exp(Vp)
nprob = sm.logsumexp(Vp)/Z
vprob = sm.logsumexp(Vp)/Z
ll = []
for (tok, tag) in train_toks:
if tag == 'n':
ll.append(np.exp(np.log(np.exp(np.diag(Np)[total])) - np.log(np.exp(np.diag(Vp)[total]) + np.exp(np.diag(Np)[total]))))
elif tag == 'v':
ll.append(np.exp(np.log(np.exp(np.diag(Vp)[total])) - np.log(np.exp(np.diag(Vp)[total]) + np.exp(np.diag(Np)[total]))))
if aclist[total] == tag:
correct += 1
total += 1
acc = float(correct)/total
empn, empv = classifier.getemp()
nfeat = np.dot(np.exp(nprob), empn)
vfeat = np.dot(np.exp(vprob), empv)
grad_n = (nfeat - empn)
grad_v = (vfeat - empv)
return acc, -float(sum(ll))/len(ll), grad_n, grad_v
示例8: get_segment_quality_end
def get_segment_quality_end(self, end_index: int, call_state: int) -> float:
"""Calculates the complement of phred-scaled posterior probability that a site marks the end of a segment.
This is done by directly summing the probability of the following complementary paths in the log space:
... [end_index] [end_index+1] ...
call_state => call_state
(any state except for call_state) => (any state)
Args:
end_index: right breakpoint index of a segment
call_state: segment call state index
Returns:
a phred-scaled probability
"""
assert 0 <= end_index < self.num_sites
all_other_states_list = self.leave_one_out_state_lists[call_state]
if end_index == self.num_sites - 1:
logp = logsumexp(self.log_posterior_prob_tc[self.num_sites - 1, all_other_states_list])
else:
complementary_paths_unnorm_logp = [(self.alpha_tc[end_index, end_state] +
self.log_trans_tcc[end_index, end_state, next_state] +
self.log_emission_tc[end_index + 1, next_state] +
self.beta_tc[end_index + 1, end_state])
for end_state in all_other_states_list
for next_state in self.all_states_list]
complementary_paths_unnorm_logp.append((self.alpha_tc[end_index, call_state] +
self.log_trans_tcc[end_index, call_state, call_state] +
self.log_emission_tc[end_index + 1, call_state] +
self.beta_tc[end_index + 1, call_state]))
logp = logsumexp(np.asarray(complementary_paths_unnorm_logp)) - self.log_data_likelihood
return logp_to_phred(logp)
示例9: test_mixture_weight_init
def test_mixture_weight_init():
train_m_file = 'nltcs_2015-01-29_18-39-06/train.m.log'
valid_m_file = 'nltcs_2015-01-29_18-39-06/valid.m.log'
test_m_file = 'nltcs_2015-01-29_18-39-06/test.m.log'
logging.basicConfig(level=logging.DEBUG)
train = dataset.csv_2_numpy(train_m_file,
path='',
type='float32')
valid = dataset.csv_2_numpy(valid_m_file,
path='',
type='float32')
test = dataset.csv_2_numpy(test_m_file,
path='',
type='float32')
k_components = train.shape[1]
unif_weights = numpy.array([1 for i in range(k_components)])
unif_weights = unif_weights / unif_weights.sum()
rand_weights = numpy.random.rand(k_components)
rand_weights = rand_weights / rand_weights.sum()
unif_mixture = logsumexp(train + numpy.log(unif_weights), axis=1).mean()
rand_mixture = logsumexp(train + numpy.log(rand_weights), axis=1).mean()
print('UNIF W LL', unif_mixture)
print('RAND W LL', rand_mixture)
示例10: transitioncounts
def transitioncounts(fwdlattice, bwdlattice, framelogprob, log_transmat):
n_observations, n_components = fwdlattice.shape
lneta = np.zeros((n_observations-1, n_components, n_components))
from scipy.misc import logsumexp
logprob = logsumexp(fwdlattice[n_observations-1, :])
print 'logprob', logprob
for t in range(n_observations - 1):
for i in range(n_components):
for j in range(n_components):
lneta[t, i, j] = fwdlattice[t, i] + log_transmat[i, j] \
+ framelogprob[t + 1, j] + bwdlattice[t + 1, j] - logprob
print framelogprob
print 'fwdlattice[:, 0]'
print fwdlattice[:, 0]
print 'logtransmat[0,0]'
print log_transmat[0,0]
print 'framelogprob'
print framelogprob[:, 0]
print 'bwdlattice[:, 0]'
print bwdlattice[:, 0]
print 'lneta{0,0}'
print lneta[:, 0, 0]
return np.exp(logsumexp(lneta, 0))
示例11: hsmm_messages_forwards_log
def hsmm_messages_forwards_log(
trans_potential, initial_state_potential,
reverse_cumulative_obs_potentials, reverse_dur_potentials, reverse_dur_survival_potentials,
alphal, alphastarl,
left_censoring=False, right_censoring=True):
T, _ = alphal.shape
alphastarl[0] = initial_state_potential
for t in xrange(T-1):
cB = reverse_cumulative_obs_potentials(t)
alphal[t] = logsumexp(
alphastarl[t+1-cB.shape[0]:t+1] + cB + reverse_dur_potentials(t), axis=0)
if left_censoring:
raise NotImplementedError
alphastarl[t+1] = logsumexp(
alphal[t][:,na] + trans_potential(t), axis=0)
t = T-1
cB = reverse_cumulative_obs_potentials(t)
alphal[t] = logsumexp(
alphastarl[t+1-cB.shape[0]:t+1] + cB + reverse_dur_potentials(t), axis=0)
if not right_censoring:
normalizer = logsumexp(alphal[t])
else:
normalizer = None # TODO
return alphal, alphastarl, normalizer
示例12: log_weighted_ave
def log_weighted_ave(arrs, weights):
arrs = np.array(arrs)
log_weights = np.log(weights)
log_weights -= logsumexp(log_weights)
for _ in range(len(arrs.shape) - 1):
log_weights = log_weights[..., np.newaxis]
return logsumexp(arrs + log_weights, axis=0)
示例13: e_step
def e_step(x,N,d):
global miu
global p
global sigma
soft_p = np.zeros((N))
labels = np.zeros(N)
#e-step
for i in range(N):
[t, w] = Gaussian(x[i])
#print 't',t
#print 'w',w
num1 = np.exp(logsumexp(t[0], b = w[0]))
#print 'num',num1
num2 = np.exp(logsumexp(t[1], b = w[1]))
soft_p[i] = num1 / (num1 + num2)
if soft_p[i] >= 0.5 :
labels[i] = 1
else:
labels[i] = 0
#print 'label',labels
#print 'soft_p',soft_p
#print np.sum(soft_p)
#return soft_p
return labels
示例14: sample_lpost_based
def sample_lpost_based(num_samples, initial_particles, proposal_method, population_size = 20):
rval = []
anc = proposal_method.process_initial_samples(initial_particles)
num_initial = len(rval)
while len(rval) - num_initial < num_samples:
ancest_dist = np.array([a.lpost for a in anc])
ancest_dist = categorical(ancest_dist - logsumexp(ancest_dist), p_in_logspace = True)
#choose ancestor uniformly at random from previous samples
pop = [proposal_method.gen_proposal(anc[ancest_dist.rvs()])
for _ in range(population_size)]
prop_w = np.array([s.lweight for s in pop])
prop_w = exp(prop_w - logsumexp(prop_w))
# Importance Resampling
while True:
try:
draws = np.random.multinomial(population_size, prop_w)
break
except ValueError:
prop_w /= prop_w.sum()
for idx in range(len(draws)):
rval.extend([pop[idx]] * draws[idx])
anc.append(pop[idx])
return (np.array([s.sample for s in rval]), np.array([s.lpost for s in rval]))
示例15: basis_log_like
def basis_log_like(beta):
beta = beta[0] + beta[1:]
logBk = beta - logsumexp(beta) - np.log(self._dx)
logLams = logsumexp(logW[k,:]) + logBk + np.log(self._dt) + np.log(self._dx)
stable_ll = np.sum(logLams*Zbasis[k] - np.exp(logLams))
#print "!!!!!!!!!! basis_log_like %2.5f, %2.5f"%(ll, stable_ll)
return stable_ll