当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.lgamma方法代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.lgamma方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.lgamma方法的具体用法?Python math_ops.lgamma怎么用?Python math_ops.lgamma使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.lgamma方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _kl_gamma_gamma

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma", values=[
      g0.concentration, g0.rate, g1.concentration, g1.rate]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return (((g0.concentration - g1.concentration)
             * math_ops.digamma(g0.concentration))
            + math_ops.lgamma(g1.concentration)
            - math_ops.lgamma(g0.concentration)
            + g1.concentration * math_ops.log(g0.rate)
            - g1.concentration * math_ops.log(g1.rate)
            + g0.concentration * (g1.rate / g0.rate - 1.)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:gamma.py

示例2: _BetaincGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _BetaincGrad(op, grad):
  """Returns gradient of betainc(a, b, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
  a, b, x = op.inputs

  # two cases: x is a scalar and a/b are same-shaped tensors, or vice
  # versa; so its sufficient to check against shape(a).
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  _, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because terms
  # can grow large.
  log_beta = (gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b)
              - gen_math_ops.lgamma(a + b))
  partial_x = math_ops.exp(
      (b - 1) * math_ops.log(1 - x) + (a - 1) * math_ops.log(x) - log_beta)

  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None,  # da
          None,  # db
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:math_grad.py

示例3: _log_prob

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _log_prob(self, x):
    x = self._assert_valid_sample(x)
    # broadcast logits or x if need be.
    logits = self.logits
    if (not x.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        x.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
      x = array_ops.ones_like(logits, dtype=x.dtype) * x
    logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))
    logits_2d = array_ops.reshape(logits, [-1, self.event_size])
    x_2d = array_ops.reshape(x, [-1, self.event_size])
    # compute the normalization constant
    k = math_ops.cast(self.event_size, x.dtype)
    log_norm_const = (math_ops.lgamma(k)
                      + (k - 1.)
                      * math_ops.log(self.temperature))
    # compute the unnormalized density
    log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
    log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keep_dims=False)
    # combine unnormalized density with normalization constant
    log_prob = log_norm_const + log_unnorm_prob
    # Reshapes log_prob to be consistent with shape of user-supplied logits
    ret = array_ops.reshape(log_prob, logits_shape)
    return ret 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:relaxed_onehot_categorical.py

示例4: _kl_gamma_gamma

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma",
                      values=[g0.alpha, g0.beta, g1.alpha, g1.beta]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return ((g0.alpha - g1.alpha) * math_ops.digamma(g0.alpha)
            + math_ops.lgamma(g1.alpha)
            - math_ops.lgamma(g0.alpha)
            + g1.alpha * math_ops.log(g0.beta)
            - g1.alpha * math_ops.log(g1.beta)
            + g0.alpha * (g1.beta / g0.beta - 1.)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:26,代码来源:gamma.py

示例5: _kl_beta_beta

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _kl_beta_beta(d1, d2, name=None):
  """Calculate the batched KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  """
  inputs = [d1.a, d1.b, d1.a_b_sum, d2.a_b_sum]
  with ops.name_scope(name, "kl_beta_beta", inputs):
    # ln(B(a', b') / B(a, b))
    log_betas = (math_ops.lgamma(d2.a) + math_ops.lgamma(d2.b)
                - math_ops.lgamma(d2.a_b_sum) + math_ops.lgamma(d1.a_b_sum)
                - math_ops.lgamma(d1.a) - math_ops.lgamma(d1.b))
    # (a - a')*psi(a) + (b - b')*psi(b) + (a' - a + b' - b)*psi(a + b)
    digammas = ((d1.a - d2.a)*math_ops.digamma(d1.a)
              + (d1.b - d2.b)*math_ops.digamma(d1.b)
              + (d2.a_b_sum - d1.a_b_sum)*math_ops.digamma(d1.a_b_sum))
    return log_betas + digammas 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:25,代码来源:beta.py

示例6: _IgammaGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because Gamma(a)
  # and Gamma'(a) can grow large.
  partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None,
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:19,代码来源:math_grad.py

示例7: _log_normalization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _log_normalization(self):
    return (math_ops.lgamma(self.concentration)
            - self.concentration * math_ops.log(self.rate)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:5,代码来源:gamma.py

示例8: _entropy

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _entropy(self):
    return (self.concentration
            - math_ops.log(self.rate)
            + math_ops.lgamma(self.concentration)
            + ((1. - self.concentration) *
               math_ops.digamma(self.concentration))) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,代码来源:gamma.py

示例9: log_combinations

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def log_combinations(n, counts, name="log_combinations"):
  """Multinomial coefficient.

  Given `n` and `counts`, where `counts` has last dimension `k`, we compute
  the multinomial coefficient as:

  ```n! / sum_i n_i!```

  where `i` runs over all `k` classes.

  Args:
    n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
      outcomes.
    counts: Floating-point `Tensor` broadcastable with `n`. This represents
      counts in `k` classes, where `k` is the last dimension of the tensor.
    name: A name for this operation (optional).

  Returns:
    `Tensor` representing the multinomial coefficient between `n` and `counts`.
  """
  # First a bit about the number of ways counts could have come in:
  # E.g. if counts = [1, 2], then this is 3 choose 2.
  # In general, this is (sum counts)! / sum(counts!)
  # The sum should be along the last dimension of counts. This is the
  # "distribution" dimension. Here n a priori represents the sum of counts.
  with ops.name_scope(name, values=[n, counts]):
    n = ops.convert_to_tensor(n, name="n")
    counts = ops.convert_to_tensor(counts, name="counts")
    total_permutations = math_ops.lgamma(n + 1)
    counts_factorial = math_ops.lgamma(counts + 1)
    redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
    return total_permutations - redundant_permutations 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:util.py

示例10: _log_normalization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _log_normalization(self):
    return (math_ops.log(math_ops.abs(self.scale)) +
            0.5 * math_ops.log(self.df) +
            0.5 * np.log(np.pi) +
            math_ops.lgamma(0.5 * self.df) -
            math_ops.lgamma(0.5 * (self.df + 1.))) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,代码来源:student_t.py

示例11: _IgammaGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)

  # Perform operations in log space before summing, because Gamma(a)
  # and Gamma'(a) can grow large.
  partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None,
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:17,代码来源:math_grad.py

示例12: _entropy

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _entropy(self):
    return (self.concentration
            + math_ops.log(self.rate)
            + math_ops.lgamma(self.concentration)
            - ((1. + self.concentration) *
               math_ops.digamma(self.concentration))) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:8,代码来源:inverse_gamma.py

示例13: _log_normalization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _log_normalization(self, counts):
    counts = self._maybe_assert_valid_sample(counts)
    return (math_ops.lgamma(1. + self.total_count - counts)
            + math_ops.lgamma(1. + counts)
            - math_ops.lgamma(1. + self.total_count)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:7,代码来源:binomial.py

示例14: _log_normalization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _log_normalization(self, positive_counts):
    if self.validate_args:
      positive_counts = distribution_util.embed_check_nonnegative_discrete(
          positive_counts, check_integer=True)
    return (-math_ops.lgamma(self.total_count + positive_counts)
            + math_ops.lgamma(positive_counts + 1.)
            + math_ops.lgamma(self.total_count)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:9,代码来源:negative_binomial.py

示例15: _IgammaGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import lgamma [as 别名]
def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to a and x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)

  # Perform operations in log space before summing, because Gamma(a)
  # and Gamma'(a) can grow large.
  partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
  return (None,
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:16,代码来源:math_grad.py


注:本文中的tensorflow.python.ops.math_ops.lgamma方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。