当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.exp函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了exp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _forward

 def _forward(self, x):
   x = self._maybe_assert_valid_x(x)
   if self.power == 0.:
     return math_ops.exp(x)
   # If large x accuracy is an issue, consider using:
   # (1. + x * self.power)**(1. / self.power) when x >> 1.
   return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:power_transform.py

示例2: test_one_dimensional_arg

 def test_one_dimensional_arg(self):
   # Should evaluate to 1 and 1/2.
   x_one = [1, 1.]
   x_one_half = [2, 1.]
   with self.test_session(use_gpu=self._use_gpu):
     self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_one)).eval())
     self.assertAllClose(
         0.5, math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
     self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:special_math_ops_test.py

示例3: test_length_1_last_dimension_results_in_one

 def test_length_1_last_dimension_results_in_one(self):
   # If there is only one coefficient, the formula still works, and we get one
   # as the answer, always.
   x_a = [5.5]
   x_b = [0.1]
   with self.test_session(use_gpu=True):
     self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_a)).eval())
     self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_b)).eval())
     self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:9,代码来源:special_math_ops_test.py

示例4: jensen_shannon

def jensen_shannon(logu, self_normalized=False, name=None):
  """The Jensen-Shannon Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:

  ```none
  f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
  ```

  When `self_normalized = False` the `(u + 1) log(2)` term is omitted.

  Observe that as an f-Divergence, this Csiszar-function implies:

  ```none
  D_f[p, q] = KL[p, m] + KL[q, m]
  m(x) = 0.5 p(x) + 0.5 q(x)
  ```

  In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
  f-Divergence.

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  For more information, see:
    Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
    Inf. Th., 37, 145-151, 1991.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jensen_shannon_of_u: Floating-type `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with ops.name_scope(name, "jensen_shannon", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    npdt = logu.dtype.as_numpy_dtype
    y = nn_ops.softplus(logu)
    if self_normalized:
      y -= np.log(2).astype(npdt)
    return math_ops.exp(logu) * logu - (1. + math_ops.exp(logu)) * y
开发者ID:Joetz,项目名称:tensorflow,代码行数:56,代码来源:csiszar_divergence_impl.py

示例5: _SoftplusGradGrad

def _SoftplusGradGrad(op, grad):
  # Let:
  #   y = tf.nn.softplus(x)
  #   dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
  # This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
  dy, x = op.inputs
  with ops.control_dependencies([grad]):
    ddy = gen_nn_ops.softplus_grad(grad, x)
    d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
    return (ddy, d2x)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:10,代码来源:nn_grad.py

示例6: _compute_energy_change

def _compute_energy_change(current_target_log_prob,
                           current_momentums,
                           proposed_target_log_prob,
                           proposed_momentums,
                           independent_chain_ndims,
                           name=None):
  """Helper to `kernel` which computes the energy change."""
  with ops.name_scope(
      name, "compute_energy_change",
      ([current_target_log_prob, proposed_target_log_prob,
        independent_chain_ndims] +
       current_momentums + proposed_momentums)):
    # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
    # since they're a mouthful and lets us inline more.
    lk0, lk1 = [], []
    for current_momentum, proposed_momentum in zip(current_momentums,
                                                   proposed_momentums):
      axis = math_ops.range(independent_chain_ndims,
                            array_ops.rank(current_momentum))
      lk0.append(_log_sum_sq(current_momentum, axis))
      lk1.append(_log_sum_sq(proposed_momentum, axis))

    lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
                                                  axis=-1)
    lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
                                                  axis=-1)
    lp0 = -current_target_log_prob   # log_potential
    lp1 = -proposed_target_log_prob  # proposed_log_potential
    x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
                        axis=-1)

    # The sum is NaN if any element is NaN or we see both +Inf and -Inf.
    # Thus we will replace such rows with infinite energy change which implies
    # rejection. Recall that float-comparisons with NaN are always False.
    is_sum_determinate = (
        math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
        math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
    is_sum_determinate = array_ops.tile(
        is_sum_determinate[..., array_ops.newaxis],
        multiples=array_ops.concat([
            array_ops.ones(array_ops.rank(is_sum_determinate),
                           dtype=dtypes.int32),
            [4],
        ], axis=0))
    x = array_ops.where(is_sum_determinate,
                        x,
                        array_ops.fill(array_ops.shape(x),
                                       value=x.dtype.as_numpy_dtype(np.inf)))

    return math_ops.reduce_sum(x, axis=-1)
开发者ID:Yashar78,项目名称:tensorflow,代码行数:50,代码来源:hmc_impl.py

示例7: cosh

def cosh(x, name="cosh"):
  """Hyperbolic cosine:  `cosh(x) = (e**x + e**-x) / 2`.

  For `x in (-inf, inf)`, `arccosh(cosh(x)) = cosh(arccosh(x)) = x.`

  Args:
    x:  Numeric `Tensor`.
    name:  A string name to prepend to created Ops.

  Returns:
    Numeric `Tensor` of same `shape` and `dtype` as `x`.
  """
  with ops.name_scope(name):
    x = ops.convert_to_tensor(x, name="x")
    return 0.5 * (math_ops.exp(x) + math_ops.exp(-x))
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:15,代码来源:trig.py

示例8: ctc_loss_and_grad

def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None):
  """Computes the CTC loss and gradients.

  Most users will want fwd_bwd.ctc_loss

  This function returns the computed gradient, it does not have a gradient
  of its own defined.

  Args:
    logits: tensor of shape [frames, batch_size, num_labels]
    labels: tensor of shape [batch_size, max_label_seq_length]
    label_length: tensor of shape [batch_size]
      Length of reference label sequence in labels.
    logit_length: tensor of shape [batch_size]
      Length of input sequence in logits.
    unique: (optional) unique label indices as computed by unique(labels)
      If supplied, enables an implementation that is faster and more memory
      efficient on TPU.

  Returns:
    loss: tensor of shape [batch_size]
    gradient: tensor of shape [frames, batch_size, num_labels]
  """

  num_labels = _get_dim(logits, 2)
  max_label_seq_length = _get_dim(labels, 1)

  ilabel_log_probs = nn_ops.log_softmax(logits)
  state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs)
  state_trans_probs = _ctc_state_trans(labels)
  initial_state_log_probs, final_state_log_probs = ctc_state_log_probs(
      label_length, max_label_seq_length)
  fwd_bwd_log_probs, log_likelihood = _forward_backward_log(
      state_trans_log_probs=math_ops.log(state_trans_probs),
      initial_state_log_probs=initial_state_log_probs,
      final_state_log_probs=final_state_log_probs,
      observed_log_probs=state_log_probs,
      sequence_length=logit_length)

  if unique:
    olabel_log_probs = _state_to_olabel_unique(
        labels, num_labels, fwd_bwd_log_probs, unique)
  else:
    olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs)

  grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs)
  loss = -log_likelihood
  return loss, grad
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:48,代码来源:ctc_ops.py

示例9: _call_cdf

 def _call_cdf(self, value, name, **kwargs):
   with self._name_scope(name, values=[value]):
     value = ops.convert_to_tensor(value, name="value")
     try:
       return self._cdf(value, **kwargs)
     except NotImplementedError:
       return math_ops.exp(self._log_cdf(value, **kwargs))
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:7,代码来源:distribution.py

示例10: _ErfGrad

def _ErfGrad(op, grad):
  """Returns grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py

示例11: monte_carlo_hypersphere_volume

 def monte_carlo_hypersphere_volume(dist, num_samples, radius, center):
   # https://en.wikipedia.org/wiki/Importance_sampling
   x = dist.sample(num_samples, seed=seed)
   x = array_ops.identity(x)  # Invalidate bijector cacheing.
   return math_ops.reduce_mean(
       math_ops.exp(-dist.log_prob(x)) * is_in_ball(x, radius, center),
       axis=0)
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:7,代码来源:test_util.py

示例12: _ErfcGrad

def _ErfcGrad(op, grad):
  """Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  minus_two_over_root_pi = constant_op.constant(-2 / np.sqrt(np.pi),
                                                dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    return  grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
开发者ID:TeMedy,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py

示例13: cdf

  def cdf(self, value, name="cdf", **condition_kwargs):
    """Cumulative distribution function.

    Given random variable `X`, the cumulative distribution function `cdf` is:

    ```
    cdf(x) := P[X <= x]
    ```

    Args:
      value: `float` or `double` `Tensor`.
      name: The name to give this op.
      **condition_kwargs: Named arguments forwarded to subclass implementation.

    Returns:
      cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
        values of type `self.dtype`.
    """
    with self._name_scope(name, values=[value]):
      value = ops.convert_to_tensor(value, name="value")
      try:
        return self._cdf(value, **condition_kwargs)
      except NotImplementedError as original_exception:
        try:
          return math_ops.exp(self._log_cdf(value, **condition_kwargs))
        except NotImplementedError:
          raise original_exception
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:27,代码来源:distribution.py

示例14: test_two_dimensional_arg_dynamic

 def test_two_dimensional_arg_dynamic(self):
   # Should evaluate to 1/2.
   x_one_half = [[2, 1.], [2, 1.]]
   with self.test_session(use_gpu=True):
     ph = array_ops.placeholder(dtypes.float32)
     beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
     self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:7,代码来源:special_math_ops_test.py

示例15: test_two_dimensional_arg

 def test_two_dimensional_arg(self):
   # Should evaluate to 1/2.
   x_one_half = [[2, 1.], [2, 1.]]
   with self.test_session(use_gpu=self._use_gpu):
     self.assertAllClose(
         [0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
     self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:7,代码来源:special_math_ops_test.py


注:本文中的tensorflow.python.ops.math_ops.exp函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。