本文整理汇总了Python中tensorflow.python.ops.math_ops.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_loss
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.multiply(
labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
示例2: _inverse_log_det_jacobian
def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):
if not y.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(y.shape.as_list())
if not self.batchnorm.built:
# Create variables.
self.batchnorm.build(input_shape)
event_dims = self.batchnorm.axis
reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims]
if use_saved_statistics or not self._training:
log_variance = math_ops.log(
self.batchnorm.moving_variance + self.batchnorm.epsilon)
else:
# At training-time, ildj is computed from the mean and log-variance across
# the current minibatch.
_, v = nn.moments(y, axes=reduction_axes, keepdims=True)
log_variance = math_ops.log(v + self.batchnorm.epsilon)
# `gamma` and `log Var(y)` reductions over event_dims.
# Log(total change in area from gamma term).
log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma))
# Log(total change in area from log-variance term).
log_total_variance = math_ops.reduce_sum(log_variance)
# The ildj is scalar, as it does not depend on the values of x and are
# constant across minibatch elements.
return log_total_gamma - 0.5 * log_total_variance
示例3: _log_prob
def _log_prob(self, x):
y = (x - self.mu) / self.sigma
half_df = 0.5 * self.df
return (math_ops.lgamma(0.5 + half_df) - math_ops.lgamma(half_df) - 0.5 *
math_ops.log(self.df) - 0.5 * math.log(math.pi) -
math_ops.log(self.sigma) -
(0.5 + half_df) * math_ops.log(1. + math_ops.square(y) / self.df))
示例4: _sample_n
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits * array_ops.ones(sample_shape)
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
np_dtype = self.dtype.as_numpy_dtype
# Uniform variates must be sampled from the interval (0,1] rather than
# [0,1], as they are passed through log() to compute Gumbel variates.
# We need to use np.finfo(np_dtype).tiny because it is the smallest,
# positive, "normal" number. A "normal" number is such that the mantissa
# has an implicit leading 1. Normal, positive numbers x, y have the
# reasonable property that: x + y >= max(x, y).
# minval=np.nextafter(np.float32(0),1)) can cause
# tf.random_uniform(dtype=tf.float32) to sample 0.
uniform = random_ops.random_uniform(shape=array_ops.shape(logits_2d),
minval=np.finfo(np_dtype).tiny,
maxval=1,
dtype=self.dtype,
seed=seed)
gumbel = -math_ops.log(-math_ops.log(uniform))
noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)
samples = nn_ops.log_softmax(noisy_logits)
ret = array_ops.reshape(samples, sample_shape)
return ret
示例5: log_prob
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(
[check_ops.assert_positive(x)] if self.strict else [],
x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
return (alpha * math_ops.log(beta) + (alpha - 1) * math_ops.log(x) -
beta * x - math_ops.lgamma(self._alpha))
示例6: _BetaincGrad
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
# pylint: disable=protected-access
_, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# pylint: enable=protected-access
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
示例7: compute_step
def compute_step(x_val, geometric=False):
if geometric:
# Consider geometric series where t_mul != 1
# 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)
# First find how many restarts were performed for a given x_val
# Find maximal integer i_restart value for which this equation holds
# x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
# x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
# t_mul^i_restart <= (1 - x_val * (1 - t_mul))
# tensorflow allows only log with base e
# i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
# Find how many restarts were performed
i_restart = math_ops.floor(
math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
# Compute the sum of all restarts before the current one
sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
# Compute our position within the current restart
x_val = (x_val - sum_r) / t_mul ** i_restart
else:
# Find how many restarts were performed
i_restart = math_ops.floor(x_val)
# Compute our position within the current restart
x_val = x_val - i_restart
return i_restart, x_val
示例8: log_prob
def log_prob(self, counts, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
For each batch member of counts `k`, `P[counts]` is the probability that
after sampling `n` draws from this Binomial distribution, the number of
successes is `k`. Note that different sequences of draws can result in the
same counts, thus the probability includes a combinatorial coefficient.
Args:
counts: Non-negative tensor with dtype `dtype` and whose shape can be
broadcast with `self.p` and `self.n`. `counts` is only legal if it is
less than or equal to `n` and its components are equal to integer
values.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nm]`.
"""
n = self._n
p = self._p
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._n, self._p, counts]):
counts = self._check_counts(counts)
prob_prob = counts * math_ops.log(p) + (
n - counts) * math_ops.log(1 - p)
combinations = math_ops.lgamma(n + 1) - math_ops.lgamma(
counts + 1) - math_ops.lgamma(n - counts + 1)
log_prob = prob_prob + combinations
return log_prob
示例9: _inverse
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
if self.power == 0.:
return math_ops.log(y)
# If large y accuracy is an issue, consider using:
# (y**self.power - 1.) / self.power when y >> 1.
return math_ops.expm1(math_ops.log(y) * self.power) / self.power
示例10: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
return (
-(x / self.scale) ** self.concentration +
(self.concentration - 1) * math_ops.log(x) +
math_ops.log(self.concentration) +
-self.concentration * math_ops.log(self.scale))
示例11: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape, minval=minval, maxval=1, dtype=self.dtype, seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
示例12: logloss
def logloss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
losses += math_ops.multiply((1 - y_true),
math_ops.log(1 - y_pred + K.epsilon()))
return K.mean(-losses, axis=-1)
示例13: _phi
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with ops.name_scope('phi'):
if order == 1:
r = math_ops.maximum(r, EPSILON)
r = math_ops.sqrt(r)
return r
elif order == 2:
return 0.5 * r * math_ops.log(math_ops.maximum(r, EPSILON))
elif order == 4:
return 0.5 * math_ops.square(r) * math_ops.log(
math_ops.maximum(r, EPSILON))
elif order % 2 == 0:
r = math_ops.maximum(r, EPSILON)
return 0.5 * math_ops.pow(r, 0.5 * order) * math_ops.log(r)
else:
r = math_ops.maximum(r, EPSILON)
return math_ops.pow(r, 0.5 * order)
示例14: log_prob
def log_prob(self, x, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
Args:
x: Non-negative floating point tensor whose shape can
be broadcast with `self.a` and `self.b`. For fixed leading
dimensions, the last dimension represents counts for the corresponding
Beta distribution in `self.a` and `self.b`. `x` is only legal if
0 < x < 1.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nm]`.
"""
a = self._a
b = self._b
with ops.name_scope(self.name):
with ops.name_scope(name, values=[a, x]):
x = self._check_x(x)
unnorm_pdf = (a - 1) * math_ops.log(x) + (
b - 1) * math_ops.log(1 - x)
normalization_factor = -(math_ops.lgamma(a) + math_ops.lgamma(b)
- math_ops.lgamma(a + b))
log_prob = unnorm_pdf + normalization_factor
return log_prob
示例15: _kl_gamma_gamma
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))