當前位置: 首頁>>代碼示例>>Python>>正文


Python math_ops.log1p方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.math_ops.log1p方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.log1p方法的具體用法?Python math_ops.log1p怎麽用?Python math_ops.log1p使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.log1p方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _sample_n

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _sample_n(self, n, seed=None):
    # Uniform variates must be sampled from the open-interval `(0, 1)` rather
    # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
    # because it is the smallest, positive, "normal" number. A "normal" number
    # is such that the mantissa has an implicit leading 1. Normal, positive
    # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
    # this case, a subnormal number (i.e., np.nextafter) can cause us to sample
    # 0.
    uniform = random_ops.random_uniform(
        shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
        minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
    return sampled * self.scale + self.loc 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:logistic.py

示例2: _sample_n

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _sample_n(self, n, seed=None):
    # Uniform variates must be sampled from the open-interval `(0, 1)` rather
    # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
    # because it is the smallest, positive, "normal" number. A "normal" number
    # is such that the mantissa has an implicit leading 1. Normal, positive
    # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
    # this case, a subnormal number (i.e., np.nextafter) can cause us to sample
    # 0.
    sampled = random_ops.random_uniform(
        array_ops.concat([[n], array_ops.shape(self._probs)], 0),
        minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
        maxval=1.,
        seed=seed,
        dtype=self.dtype)

    return math_ops.floor(
        math_ops.log(sampled) / math_ops.log1p(-self.probs)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:geometric.py

示例3: _sample_n

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _sample_n(self, n, seed=None):
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    # Uniform variates must be sampled from the open-interval `(-1, 1)` rather
    # than `[-1, 1)`. In the case of `(0, 1)` we'd use
    # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
    # positive, "normal" number. However, the concept of subnormality exists
    # only at zero; here we need the smallest usable number larger than -1,
    # i.e., `-1 + eps/2`.
    uniform_samples = random_ops.random_uniform(
        shape=shape,
        minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                            self.dtype.as_numpy_dtype(0.)),
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    return (self.loc - self.scale * math_ops.sign(uniform_samples) *
            math_ops.log1p(-math_ops.abs(uniform_samples))) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:19,代碼來源:laplace.py

示例4: _log_unnormalized_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_unnormalized_prob(self, x):
    x = self._maybe_assert_valid_sample(x)
    return ((self.concentration1 - 1.) * math_ops.log(x)
            + (self.concentration0 - 1.) * math_ops.log1p(-x)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:6,代碼來源:beta.py

示例5: _call_log_survival_function

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _call_log_survival_function(self, value, name, **kwargs):
    with self._name_scope(name, values=[value]):
      value = ops.convert_to_tensor(value, name="value")
      try:
        return self._log_survival_function(value, **kwargs)
      except NotImplementedError as original_exception:
        try:
          return math_ops.log1p(-self.cdf(value, **kwargs))
        except NotImplementedError:
          raise original_exception 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:distribution.py

示例6: _log_unnormalized_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_unnormalized_prob(self, x):
    y = (x - self.loc) / self.scale  # Abs(scale) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:5,代碼來源:student_t.py

示例7: _cdf

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _cdf(self, counts):
    if self.validate_args:
      # We set `check_integer=False` since the CDF is defined on whole real
      # line.
      counts = math_ops.floor(
          distribution_util.embed_check_nonnegative_discrete(
              counts, check_integer=False))
    counts *= array_ops.ones_like(self.probs)
    return array_ops.where(
        counts < 0.,
        array_ops.zeros_like(counts),
        -math_ops.expm1(
            (counts + 1) * math_ops.log1p(-self.probs))) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:15,代碼來源:geometric.py

示例8: _log_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_prob(self, counts):
    if self.validate_args:
      counts = distribution_util.embed_check_nonnegative_discrete(
          counts, check_integer=True)
    counts *= array_ops.ones_like(self.probs)
    probs = self.probs * array_ops.ones_like(counts)

    safe_domain = array_ops.where(
        math_ops.equal(counts, 0.),
        array_ops.zeros_like(probs),
        probs)
    return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:14,代碼來源:geometric.py

示例9: _forward

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _forward(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.power == 0.:
      return math_ops.exp(x)
    # If large x accuracy is an issue, consider using:
    # (1. + x * self.power)**(1. / self.power) when x >> 1.
    return math_ops.exp(math_ops.log1p(x * self.power) / self.power) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:power_transform_impl.py

示例10: _inverse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _inverse(self, y):
    return math_ops.log(y) - math_ops.log1p(-y) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:4,代碼來源:sigmoid_impl.py

示例11: _inverse_log_det_jacobian

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _inverse_log_det_jacobian(self, y):
    return -math_ops.log(y) - math_ops.log1p(-y) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:4,代碼來源:sigmoid_impl.py

示例12: _log_unnormalized_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_unnormalized_prob(self, counts):
    counts = self._maybe_assert_valid_sample(counts)
    return (counts * math_ops.log(self.probs) +
            (self.total_count - counts) * math_ops.log1p(-self.probs)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:6,代碼來源:binomial.py

示例13: _log_unnormalized_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_unnormalized_prob(self, positive_counts):
    if self.validate_args:
      positive_counts = distribution_util.embed_check_nonnegative_discrete(
          positive_counts, check_integer=True)
    return self.total_count * math_ops.log1p(
        -self.probs) + positive_counts * math_ops.log(self.probs) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:negative_binomial.py

示例14: _log_unnormalized_prob

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _log_unnormalized_prob(self, x):
    y = (x - self.mu) / self.sigma  # Abs(sigma) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:5,代碼來源:student_t.py

示例15: _forward

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import log1p [as 別名]
def _forward(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.power == 0.:
      return math_ops.exp(x)
    # TODO(jvdillon): If large x accuracy is an issue, consider using
    # (1. + x * self.power)**(1. / self.power) when x >> 1.
    return math_ops.exp(math_ops.log1p(x * self.power) / self.power) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:9,代碼來源:bijector.py


注:本文中的tensorflow.python.ops.math_ops.log1p方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。