本文整理汇总了Python中scipy.misc.logsumexp方法的典型用法代码示例。如果您正苦于以下问题:Python misc.logsumexp方法的具体用法?Python misc.logsumexp怎么用?Python misc.logsumexp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.misc
的用法示例。
在下文中一共展示了misc.logsumexp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _logistic
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _logistic(x):
"""
Note that this is not a vectorized function
"""
x = np.array(x)
# np.exp(x) / (1 + np.exp(x))
if x.ndim == 0:
y = np.reshape(x, (1, 1, 1))
# np.exp(x[i]) / (1 + np.sum(np.exp(x[:])))
elif x.ndim == 1:
y = np.reshape(x, (len(x), 1, 1))
# np.exp(x[i,t]) / (1 + np.sum(np.exp(x[:,t])))
elif x.ndim == 2:
y = np.reshape(x, (x.shape[0], 1, x.shape[1]))
# np.exp(x[i,j,t]) / (1 + np.sum(np.exp(x[:,j,t])))
elif x.ndim == 3:
y = x
else:
raise NotImplementedError
tmp = np.c_[np.zeros((y.shape[-1], y.shape[1], 1)), y.T].T
evaluated = np.reshape(np.exp(y - logsumexp(tmp, axis=0)), x.shape)
return evaluated
示例2: test_logsumexp
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def test_logsumexp():
"""Test whether logsumexp() function correctly handles large inputs."""
a = np.arange(200)
desired = np.log(np.sum(np.exp(a)))
assert_almost_equal(logsumexp(a), desired)
# Now test with large numbers
b = [1000, 1000]
desired = 1000.0 + np.log(2.0)
assert_almost_equal(logsumexp(b), desired)
n = 1000
b = np.ones(n) * 10000
desired = 10000.0 + np.log(n)
assert_almost_equal(logsumexp(b), desired)
x = np.array([1e-40] * 1000000)
logx = np.log(x)
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
示例3: test_logsumexp_b
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def test_logsumexp_b():
a = np.arange(200)
b = np.arange(200, 0, -1)
desired = np.log(np.sum(b*np.exp(a)))
assert_almost_equal(logsumexp(a, b=b), desired)
a = [1000, 1000]
b = [1.2, 1.2]
desired = 1000 + np.log(2 * 1.2)
assert_almost_equal(logsumexp(a, b=b), desired)
x = np.array([1e-40] * 100000)
b = np.linspace(1, 1000, 1e5)
logx = np.log(x)
X = np.vstack((x, x))
logX = np.vstack((logx, logx))
B = np.vstack((b, b))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
(B * X).sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
(B * X).sum(axis=1))
示例4: partial_eval
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def partial_eval(self, X, n_samples=5):
if n_samples < 1000:
res, iwae = self.sess.run(
(self.lHat, self.iwae),
feed_dict={self.x: X, self.n_samples: n_samples})
res = [iwae] + res
else: # special case to handle OOM
assert n_samples % 100 == 0, "When using large # of samples, it must be divisble by 100"
res = []
for i in xrange(int(n_samples/100)):
logF, = self.sess.run(
(self.logF,),
feed_dict={self.x: X, self.n_samples: 100})
res.append(logsumexp(logF, axis=1))
res = [np.mean(logsumexp(res, axis=0) - np.log(n_samples))]
return res
# Random samplers
示例5: _alpha_div
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _alpha_div(omas, Bs, dim, num_q, rhos, nus, clamp=True):
N = rhos.shape[0]
# the actual main estimate:
# rho^(- dim * est alpha) nu^(- dim * est beta)
# = (rho / nu) ^ (dim * (1 - alpha))
# do some reshaping trickery to get broadcasting right
estimates = np.log(rhos)[:, np.newaxis, :]
estimates -= np.log(nus)[:, np.newaxis, :]
estimates = estimates * (dim * omas.reshape(1, -1, 1))
estimates = logsumexp(estimates, axis=0)
# turn the sum into a mean, multiply by Bs
estimates += np.log(Bs / N)
# factors based on the sizes:
# 1 / [ (n-1)^(est alpha) * m^(est beta) ] = ((n-1) / m) ^ (1 - alpha)
estimates += omas * np.log((N - 1) / num_q)
np.exp(estimates, out=estimates)
if clamp:
np.maximum(estimates, 0, out=estimates)
return estimates
示例6: sample_logits
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def sample_logits(preds, temperature=1.0):
"""
Sample an index from a logit vector.
:param preds:
:param temperature:
:return:
"""
preds = np.asarray(preds).astype('float64')
if temperature == 0.0:
return np.argmax(preds)
preds = preds / temperature
preds = preds - logsumexp(preds)
choice = np.random.choice(len(preds), 1, p=np.exp(preds))
return choice
示例7: n_effective
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def n_effective(self):
"""
Estimate the effective number of posterior samples using the Kish
Effective Sample Size (ESS) where `ESS = sum(wts)^2 / sum(wts^2)`.
Note that this is `len(wts)` when `wts` are uniform and
`1` if there is only one non-zero element in `wts`.
"""
if len(self.saved_logwt) == 0:
# If there are no saved weights, return 0.
return 0
else:
# Otherwise, compute Kish ESS.
logwts = np.array(self.saved_logwt)
logneff = logsumexp(logwts) * 2 - logsumexp(logwts * 2)
return np.exp(logneff)
示例8: n_effective
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def n_effective(self):
"""
Estimate the effective number of posterior samples using the Kish
Effective Sample Size (ESS) where `ESS = sum(wts)^2 / sum(wts^2)`.
Note that this is `len(wts)` when `wts` are uniform and
`1` if there is only one non-zero element in `wts`.
"""
if (len(self.saved_logwt) == 0) or (np.max(self.saved_logwt) >
0.01 * np.nan_to_num(-np.inf)):
# If there are no saved weights, or its -inf return 0.
return 0
else:
# Otherwise, compute Kish ESS.
logwts = np.array(self.saved_logwt)
logneff = logsumexp(logwts) * 2 - logsumexp(logwts * 2)
return np.exp(logneff)
示例9: _mc_heldout_log_likelihood
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _mc_heldout_log_likelihood(self, X, M=100):
# Estimate the held out likelihood using Monte Carlo
T, K = X.shape
assert K == self.K
lls = np.zeros(M)
for m in range(M):
# Sample latent states from the prior
data = self.generate(T=T, keep=False)
data["x"] = X
lls[m] = self.emission_distn.log_likelihood(data)
# Compute the average
hll = logsumexp(lls) - np.log(M)
# Use bootstrap to compute error bars
samples = np.random.choice(lls, size=(100, M), replace=True)
hll_samples = logsumexp(samples, axis=1) - np.log(M)
std_hll = hll_samples.std()
return hll, std_hll
示例10: _conditional_psi
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _conditional_psi(self, d, t, Lmbda, N, c):
nott = np.arange(self.T) != t
psi = self.psi[d]
omega = self.omega[d]
mu = self.theta_prior.mu
zetat = logsumexp(psi[nott])
mut_marg = mu[t] - 1./Lmbda[t,t] * Lmbda[t,nott].dot(psi[nott] - mu[nott])
sigmat_marg = 1./Lmbda[t,t]
sigmat_cond = 1./(omega[t] + 1./sigmat_marg)
# kappa is the mean dot precision, i.e. the sufficient statistic of a Gaussian
# therefore we can sum over datapoints
kappa = (c[t] - N/2.0).sum()
mut_cond = sigmat_cond * (kappa + mut_marg / sigmat_marg + omega[t]*zetat)
return mut_cond, sigmat_cond
示例11: log_z
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def log_z(n, j, beta):
return (
-log(2) + 1 / 2 * n**2 * log(2 * sinh(2 * h(j, beta))) + logsumexp([
fsum([
log(2 * cosh(n / 2 * gamma(n, j, beta, 2 * r)))
for r in range(n)
]),
fsum([
log(2 * sinh(n / 2 * gamma(n, j, beta, 2 * r)))
for r in range(n)
]),
fsum([
log(2 * cosh(n / 2 * gamma(n, j, beta, 2 * r + 1)))
for r in range(n)
]),
fsum([
log(2 * sinh(n / 2 * gamma(n, j, beta, 2 * r + 1)))
for r in range(n)
]),
]))
示例12: _create_lookup_table
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _create_lookup_table(self):
""" Make the lookup table """
logger.info('Building lookup table for distance marginalisation.')
self._dist_margd_loglikelihood_array = np.zeros((400, 800))
for ii, optimal_snr_squared_ref in enumerate(self._optimal_snr_squared_ref_array):
optimal_snr_squared_array = (
optimal_snr_squared_ref * self._ref_dist ** 2. /
self._distance_array ** 2)
for jj, d_inner_h_ref in enumerate(self._d_inner_h_ref_array):
d_inner_h_array = (
d_inner_h_ref * self._ref_dist / self._distance_array)
if self.phase_marginalization:
d_inner_h_array =\
self._bessel_function_interped(abs(d_inner_h_array))
self._dist_margd_loglikelihood_array[ii][jj] = \
logsumexp(d_inner_h_array - optimal_snr_squared_array / 2,
b=self.distance_prior_array * self._delta_distance)
log_norm = logsumexp(0. / self._distance_array,
b=self.distance_prior_array * self._delta_distance)
self._dist_margd_loglikelihood_array -= log_norm
self.cache_lookup_table()
示例13: _evaluate_point_logpdf
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _evaluate_point_logpdf(args):
"""
Evaluate the Gaussian KDE at a given point `p`. This lives outside the KDE method to allow for
parallelization using :mod:`multipocessing`. Since :func:`map` only allows single-argument
functions, the following arguments to be packed into a single tuple.
:param p:
The point to evaluate the KDE at.
:param data:
The `(N, ndim)`-shaped array of data used to construct the KDE.
:param cho_factor:
A Cholesky decomposition of the kernel covariance matrix.
"""
point, data, cho_factor = args
# Use Cholesky decomposition to avoid direct inversion of covariance matrix
diff = data - point
tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T
diff *= tdiff
# Work in the log to avoid large numbers
return logsumexp(-np.sum(diff, axis=1)/2)
示例14: logpdf
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def logpdf(self, rowid, targets, constraints=None, inputs=None):
assert targets.keys() == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
y = targets[self.outputs[0]]
return logsumexp([
np.log(.5)+self.uniform.logpdf(y-x**2),
np.log(.5)+self.uniform.logpdf(-y-x**2)
])
示例15: _regime_transition_matrix_tvtp
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import logsumexp [as 别名]
def _regime_transition_matrix_tvtp(self, params, exog_tvtp=None):
if exog_tvtp is None:
exog_tvtp = self.exog_tvtp
nobs = len(exog_tvtp)
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, nobs),
dtype=np.promote_types(np.float64, params.dtype))
# Compute the predicted values from the regression
for i in range(self.k_regimes):
coeffs = params[self.parameters[i, 'regime_transition']]
regime_transition_matrix[:-1, i, :] = np.dot(
exog_tvtp,
np.reshape(coeffs, (self.k_regimes-1, self.k_tvtp)).T).T
# Perform the logistic transformation
tmp = np.c_[np.zeros((nobs, self.k_regimes, 1)),
regime_transition_matrix[:-1, :, :].T].T
regime_transition_matrix[:-1, :, :] = np.exp(
regime_transition_matrix[:-1, :, :] - logsumexp(tmp, axis=0))
# Compute the last column of the transition matrix
regime_transition_matrix[-1, :, :] = (
1 - np.sum(regime_transition_matrix[:-1, :, :], axis=0))
return regime_transition_matrix