本文整理汇总了Python中tensorflow.python.ops.math_ops.exp方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.exp方法的具体用法?Python math_ops.exp怎么用?Python math_ops.exp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _BetaincGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
# pylint: disable=protected-access
_, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# pylint: enable=protected-access
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b)
- gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp(
(b - 1) * math_ops.log(1 - x) + (a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
示例2: _log_prob_with_logsf_and_logcdf
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
示例3: _sample_n
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
示例4: prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
示例5: _adaptive_max_norm
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.cast(global_step, dtypes.float32)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
示例6: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
示例7: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
示例8: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _prob(self, y):
x = self.bijector.inverse(y)
ildj = self.bijector.inverse_log_det_jacobian(y)
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(ildj)
if self._is_maybe_event_override:
prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return prob
示例9: _call_cdf
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
示例10: ndtr
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
示例11: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _prob(self, event):
return math_ops.exp(self._log_prob(event))
示例12: _cdf
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
示例13: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _prob(self, k):
return math_ops.exp(self._log_prob(k))
示例14: _Expm1Grad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
y = math_ops.exp(x)
return grad * y
示例15: _ErfGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import exp [as 别名]
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))