本文整理汇总了Python中tensorflow.python.ops.math_ops.log1p函数的典型用法代码示例。如果您正苦于以下问题:Python log1p函数的具体用法?Python log1p怎么用?Python log1p使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log1p函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _inverse_log_det_jacobian
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
event_dims = self._event_dims_tensor(y)
return math_ops.reduce_sum(
-math_ops.log1p(-y) +
(1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) +
math_ops.log(self.scale / self.concentration),
axis=event_dims)
示例2: _call_log_survival_function
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError:
return math_ops.log1p(-self.cdf(value, **kwargs))
示例3: _forward
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# If large x accuracy is an issue, consider using:
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
示例4: _inverse_log_det_jacobian
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid(y)
event_dims = self._event_dims_tensor(y)
return math_ops.reduce_sum(
math_ops.log(self.concentration1) + math_ops.log(self.concentration0) +
(self.concentration1 - 1) * math_ops.log(y) +
(self.concentration0 - 1) * math_ops.log1p(-y**self.concentration1),
axis=event_dims)
示例5: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
event_dims = self._event_dims_tensor(x)
if self.power == 0.:
return math_ops.reduce_sum(x, axis=event_dims)
return (1. / self.power - 1.) * math_ops.reduce_sum(
math_ops.log1p(x * self.power),
axis=event_dims)
示例6: __init__
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedBernoulli"):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedBernoulli distributions. The temperature should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `Boolean`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs, temperature]) as ns:
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits, probs=probs, validate_args=validate_args)
dist = logistic.Logistic(self._logits / self._temperature,
1. / self._temperature,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
def inverse_log_det_jacobian_fn(y):
return -math_ops.log(y) - math_ops.log1p(-y)
sigmoid_bijector = bijector.Inline(
forward_fn=math_ops.sigmoid,
inverse_fn=(lambda y: math_ops.log(y) - math_ops.log1p(-y)),
inverse_log_det_jacobian_fn=inverse_log_det_jacobian_fn,
name="sigmoid")
super(RelaxedBernoulli, self).__init__(dist, sigmoid_bijector, name=name)
示例7: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# y = sinh((arcsinh(x) + skewness) * tailweight)
# Using sinh' = cosh, arcsinh'(x) = 1 / sqrt(x**2 + 1),
# dy/dx
# = cosh((arcsinh(x) + skewness) * tailweight) * tailweight / sqrt(x**2 + 1)
event_dims = self._event_dims_tensor(x)
return math_ops.reduce_sum(
log_cosh((arcsinh(x) + self.skewness) * self.tailweight) +
math_ops.log(self.tailweight) - 0.5 * math_ops.log1p(x**2),
axis=event_dims)
示例8: _inverse_log_det_jacobian
def _inverse_log_det_jacobian(self, y):
# x = sinh(arcsinh(y) / tailweight - skewness)
# Using sinh' = cosh, arcsinh'(y) = 1 / sqrt(y**2 + 1),
# dx/dy
# = cosh(arcsinh(y) / tailweight - skewness)
# / (tailweight * sqrt(y**2 + 1))
event_dims = self._event_dims_tensor(y)
return math_ops.reduce_sum(
log_cosh(arcsinh(y) / self.tailweight - self.skewness) -
math_ops.log(self.tailweight) - 0.5 * math_ops.log1p(y**2),
axis=event_dims)
示例9: _call_log_survival_function
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
示例10: _log_prob
def _log_prob(self, counts):
if self.validate_args:
counts = distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=True)
counts *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(counts)
safe_domain = array_ops.where(
math_ops.equal(counts, 0.),
array_ops.zeros_like(probs),
probs)
return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs)
示例11: _cdf
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
示例12: _cdf
def _cdf(self, counts):
if self.validate_args:
# We set `check_integer=False` since the CDF is defined on whole real
# line.
counts = math_ops.floor(
distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=False))
counts *= array_ops.ones_like(self.probs)
return array_ops.where(
counts < 0.,
array_ops.zeros_like(counts),
-math_ops.expm1(
(counts + 1) * math_ops.log1p(-self.probs)))
示例13: _log_prob
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
示例14: _sample_n
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
示例15: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))