本文整理汇总了Python中numpy.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: joint_logdist
def joint_logdist(pi, alpha, sigma, tau, u):
abs_pi = len(pi)
n = np.sum(pi)
tmp = abs_pi * log(alpha) + (n - 1.) * log(u) - gammaln(n) - (n - sigma * abs_pi) * log(u + tau) \
- (alpha / sigma) * ((u + tau) ** sigma - tau ** sigma)
tmp += np.sum(gammaln(pi - sigma) - gammaln(1. - sigma))
return tmp
示例2: log_diff_exp
def log_diff_exp(x, axis=0):
""" Calculates the logarithm of the diffs of e to the power of input 'x'. The method tries to avoid
overflows by using the relationship: log(diff(exp(x))) = alpha + log(diff(exp(x-alpha))).
:Parameter:
x: data.
-type: float or numpy array
axis: Sums along the given axis.
-type: int
:Return:
Logarithm of the sum of exp of x.
-type: float or numpy array.
"""
alpha = x.max(axis) - numx.log(numx.finfo(numx.float64).max)/2.0
if axis == 1:
return numx.squeeze(alpha + numx.log(
numx.diff(
numx.exp(x.T - alpha)
, n=1, axis=0)))
else:
return numx.squeeze(alpha + numx.log(
numx.diff(
numx.exp(x - alpha)
, n=1, axis=0)))
示例3: _lmvnpdffull
def _lmvnpdffull(obs, means, covars):
"""
Log probability for full covariance matrices.
WARNING: In certain cases, this function will modify in-place
some of the covariance matrices
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_obs, n_dim = obs.shape
nmix = len(means)
log_prob = np.empty((n_obs, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv[:] = 10 * np.eye(cv.shape[0])
cv_chol = cv
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (obs - mu).T, lower=True).T
log_prob[:, c] = -.5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例4: compute_cost
def compute_cost( X, y, theta, lam ):
'''Compute cost for logistic regression.'''
# Number of training examples
m = y.shape[0]
# Compute the prediction based on theta and X
predictions = X.dot( theta )
# Preprocessing values before sending to sigmoid function.
# If the argument to sigmoid function >= 0, we know that the
# sigmoid value is 1. Similarly for the negative values.
predictions[ where( predictions >= 20 ) ] = 20
predictions[ where( predictions <= -500 ) ] = -500
hypothesis = sigmoid( predictions )
hypothesis[ where( hypothesis == 1.0 ) ] = 0.99999
# Part of the cost function without regularization
J1 = ( -1.0 / m ) * sum( ( y * np.log( hypothesis ) ) +
( ( 1.0 - y ) * np.log( 1.0 - hypothesis ) ) )
# Computing the regularization term
J2 = lam / ( 2.0 * m ) * sum( theta[ 1:, ] * theta[ 1:, ] )
error = hypothesis - y
return J1 + J2
示例5: computeCost
def computeCost(theta, X, y):
theta.shape = (1, 3)
m = y.size
z=X.dot(theta.T)
h = 1.0 / (1.0 + e ** (-1.0 * z))
J = (1.0 / m) * ((-y.T.dot(log(h))) - ((1.0 - y.T).dot(log(1.0 - h))))
return 1 * J.sum()
示例6: klBern
def klBern(x, y):
r""" Kullback-Leibler divergence for Bernoulli distributions. https://en.wikipedia.org/wiki/Bernoulli_distribution#Kullback.E2.80.93Leibler_divergence
.. math:: \mathrm{KL}(\mathcal{B}(x), \mathcal{B}(y)) = x \log(\frac{x}{y}) + (1-x) \log(\frac{1-x}{1-y})."""
x = min(max(x, eps), 1 - eps)
y = min(max(y, eps), 1 - eps)
return x * np.log(x / y) + (1 - x) * np.log((1 - x) / (1 - y))
开发者ID:Naereen,项目名称:notebooks,代码行数:7,代码来源:Kullback_Leibler_divergences_in_native_Python__Cython_and_Numba.py
示例7: __init__
def __init__(self, shape, successes,
trials=None, coef=1., offset=None,
quadratic=None,
initial=None):
smooth_atom.__init__(self,
shape,
offset=offset,
quadratic=quadratic,
initial=initial,
coef=coef)
if sparse.issparse(successes):
#Convert sparse success vector to an array
self.successes = successes.toarray().flatten()
else:
self.successes = np.asarray(successes)
if trials is None:
if not set([0,1]).issuperset(np.unique(self.successes)):
raise ValueError("Number of successes is not binary - must specify number of trials")
self.trials = np.ones(self.successes.shape, np.float)
else:
if np.min(trials-self.successes) < 0:
raise ValueError("Number of successes greater than number of trials")
if np.min(self.successes) < 0:
raise ValueError("Response coded as negative number - should be non-negative number of successes")
self.trials = trials * 1.
saturated = self.successes / self.trials
deviance_terms = np.log(saturated) * self.successes + np.log(1-saturated) * (self.trials - self.successes)
deviance_constant = -2 * coef * deviance_terms[~np.isnan(deviance_terms)].sum()
devq = identity_quadratic(0,0,0,-deviance_constant)
self.quadratic += devq
示例8: loglike
def loglike(self, endog, mu, scale=1.):
"""
Loglikelihood function for Gamma exponential family distribution.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
--------
llf = -1/scale * sum(endog/mu + log(mu) + (scale-1)*log(endog) +\
log(scale) + scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(endog/mu + np.log(mu) + (scale - 1) *
np.log(endog) + np.log(scale) + scale *
special.gammaln(1./scale))
示例9: __init__
def __init__(self, ps=None, sigma_v=0.0, redshift=0.0, **kwargs):
if ps == None:
from os.path import join, dirname
#psfile = join(dirname(__file__),"data/ps_z1.5.dat")
#psfile = join(dirname(__file__),"data/wigglez_halofit_z1.5.dat")
psfile = join(dirname(__file__),"data/wigglez_halofit_z0.8.dat")
print "loading matter power file: " + psfile
redshift = 0.8
#pk_interp = cs.LogInterpolater.fromfile(psfile)
pwrspec_data = np.genfromtxt(psfile)
(log_k, log_pk) = (np.log(pwrspec_data[:,0]), \
np.log(pwrspec_data[:,1]))
logpk_interp = interpolate.interp1d(log_k, log_pk,
bounds_error=False,
fill_value=np.min(log_pk))
pk_interp = lambda k: np.exp(logpk_interp(np.log(k)))
kstar = 7.0
ps = lambda k: np.exp(-0.5 * k**2 / kstar**2) * pk_interp(k)
self._sigma_v = sigma_v
RedshiftCorrelation.__init__(self, ps_vv=ps, redshift=redshift)
示例10: nie_all
def nie_all(xi1, xi2, xc1, xc2, b, s, q, rot, ys1, ys2):
x1, x2 = xy_rotate(xi1, xi2, xc1, xc2, rot)
wx = np.sqrt(q * q * (x1 * x1 + s * s) + x2 * x2)
al1 = b / np.sqrt(1 - q * q) * np.arctan(x1 * np.sqrt(1 - q * q) / (wx + s))
al2 = b / np.sqrt(1 - q * q) * np.arctanh(x2 * np.sqrt(1 - q * q) / (wx + q * q * s))
kappa = b / (2.0 * wx)
hx = np.sqrt((wx + s) ** 2.0 + (1 - q * q) * x1 * x1)
phi = x1 * al1 + x2 * al2 - b * s * np.log(hx) + b * q * s * np.log((1 + q) * s)
Kc = 1.0
# Kc = (1.0+zl)/c*(Dl*Ds/Dls)
td = Kc * (0.5 * ((al1) ** 2.0 + (al2) ** 2.0) - phi)
# td = Kc*(0.5*((x1-ys1)**2.0+(x2-ys2)**2.0)-phi)
y1 = x1 - al1
y2 = x2 - al2
y1, y2 = xy_rotate(y1, y2, xc1, xc2, -rot)
# ------------------------------------------------------------------
demon1 = ((wx + s) ** 2 + (1.0 - q * q) * x1 * x1) * wx
demon2 = ((wx + q * q * s) ** 2 - (1.0 - q * q) * x2 * x2) * wx
y11 = 1 - b * (wx * (wx + s) - q * q * x1 * x1) / demon1
y22 = 1 - b * (wx * (wx + q * q * s) - x2 * x2) / demon2
y12 = -b * x1 * x2 / demon1
y21 = -b * x1 * x2 * q * q / demon2
mu = 1.0 / (y11 * y22 - y12 * y21)
return phi, td, al1, al2, kappa, mu, y1, y2
示例11: _SigmoidCrossEntropyWithLogits
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
示例12: _ComputeSampledLogitsNP
def _ComputeSampledLogitsNP(self, true_w, true_b, sampled_w, sampled_b,
hidden_acts,
num_true=1,
true_expected=None,
sampled_expected=None):
batch_size, dim = hidden_acts.shape
true_logits = np.sum(
hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(
(batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if true_expected is not None:
true_logits -= np.log(true_expected)
if sampled_expected is not None:
sampled_logits -= np.log(sampled_expected[np.newaxis, :])
out_logits = np.concatenate([true_logits, sampled_logits], axis=1)
out_labels = np.hstack((np.ones_like(true_logits) / num_true,
np.zeros_like(sampled_logits)))
return out_logits, out_labels
示例13: test_anisotropic_power
def test_anisotropic_power():
for n_coeffs in [6, 15, 28, 45, 66, 91]:
for norm_factor in [0.0005, 0.00001]:
# Create some really simple cases:
coeffs = np.ones((3, n_coeffs))
max_order = calculate_max_order(coeffs.shape[-1])
# For the case where all coeffs == 1, the ap is simply log of the
# number of even orders up to the maximal order:
analytic = (np.log(len(range(2, max_order + 2, 2))) -
np.log(norm_factor))
answers = [analytic] * 3
apvals = anisotropic_power(coeffs, norm_factor=norm_factor)
assert_array_almost_equal(apvals, answers)
# Test that this works for single voxel arrays as well:
assert_array_almost_equal(
anisotropic_power(coeffs[1],
norm_factor=norm_factor),
answers[1])
# Test that even when we look at an all-zeros voxel, this
# avoids a log-of-zero warning:
with warnings.catch_warnings(record=True) as w:
assert_equal(anisotropic_power(np.zeros(6)), 0)
assert len(w) == 0
示例14: compute_edge_weights
def compute_edge_weights( edge_ids, edge_probabilities, beta ):
"""
Convert edge probabilities to energies for the multicut problem.
edge_ids:
The list of edges in the graph. shape=(N, 2)
edge_probabilities:
1-D, float (1.0 means edge is CUT, disconnecting the two SPs)
beta:
scalar (float)
Special behavior:
If any node has ID 0, all of it's edges will be given an
artificially low energy, to prevent it from merging with its
neighbors, regardless of what the edge_probabilities say.
"""
p1 = edge_probabilities # P(Edge=CUT)
p1 = np.clip(p1, 0.001, 0.999)
p0 = 1.0 - p1 # P(Edge=NOT CUT)
edge_weights = np.log(p0/p1) + np.log( (1-beta)/(beta) )
# See note special behavior, above
edges_touching_zero = edge_ids[:,0] == 0
if edges_touching_zero.any():
logger.warn("Volume contains label 0, which will be excluded from the segmentation.")
MINIMUM_ENERGY = -1000.0
edge_weights[edges_touching_zero] = MINIMUM_ENERGY
return edge_weights
示例15: all_GL
def all_GL(self, q, maxpiv=None):
"""return (piv, f_binodal_gas, f_binodal_liquid, f_spinodal_gas, f_spinodal_liquid) at insersion works piv sampled between the critical point and maxpiv (default to 2.2*critical pressure)"""
fc, pivc = self.critical_point(q)
Fc = np.log(fc)
#start sensibly above the critical point
startp = pivc*1.1
fm = fminbound(self.mu, fc, self.maxf(), args=(startp, q))
fM = fminbound(lambda f: -self.pv(f, startp, q), 0, fc)
initial_guess = np.log([0.5*fM, 0.5*(fm+self.maxf())])
#construct the top of the GL binodal
if maxpiv is None:
maxpiv = startp*2
topp = 1./np.linspace(1./startp, 1./maxpiv)
topGL = [initial_guess]
for piv in topp:
topGL.append(self.binodalGL(piv, q, topGL[-1]))
#construct the GL binodal between the starting piv and the critical point
botp = np.linspace(startp, pivc)[:-1]
botGL = [initial_guess]
for piv in botp:
botGL.append(self.binodalGL(piv, q, botGL[-1]))
#join the two results and convert back from log
binodal = np.vstack((
[[pivc, fc, fc]],
np.column_stack((botp, np.exp(botGL[1:])))[::-1],
np.column_stack((topp, np.exp(topGL[1:])))[1:]
))
#spinodal at the same pivs
spinodal = self.spinodalGL(q, binodal[:,0])
#join everything
return np.column_stack((binodal, spinodal[:,1:]))