当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.is_inf方法代码示例

本文整理汇总了Python中tensorflow.is_inf方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.is_inf方法的具体用法?Python tensorflow.is_inf怎么用?Python tensorflow.is_inf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.is_inf方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Exponential

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def Exponential(lambda_, name=None):
    X = tf.placeholder(config.dtype, name=name)

    Distribution.logp = tf.log(lambda_) - lambda_*X

    def integral(lower, upper):
        upper_integrand = tf.cond(
            tf.is_inf(tf.cast(upper, config.dtype)),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.exp(-lambda_*upper)
        )

        lower_integrand = tf.cond(
            tf.is_inf(tf.cast(lower, config.dtype)),
            lambda: tf.constant(0, dtype=config.dtype),
            lambda: tf.exp(-lambda_*lower)
        )

        return lower_integrand - upper_integrand

    Distribution.integral = integral

    return X 
开发者ID:tensorprob,项目名称:tensorprob,代码行数:25,代码来源:exponential.py

示例2: Uniform

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def Uniform(name=None):
    X = tf.placeholder(config.dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        return tf.cond(
            tf.logical_or(
                tf.is_inf(tf.cast(lower, config.dtype)),
                tf.is_inf(tf.cast(upper, config.dtype))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )

    Distribution.integral = integral

    return X 
开发者ID:tensorprob,项目名称:tensorprob,代码行数:20,代码来源:uniform.py

示例3: UniformInt

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def UniformInt(name=None):
    X = tf.placeholder(config.int_dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        val = tf.cond(
            tf.logical_or(
                tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))),
                tf.is_inf(tf.floor(tf.cast(upper, config.dtype)))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )
        return val

    Distribution.integral = integral

    return X 
开发者ID:tensorprob,项目名称:tensorprob,代码行数:21,代码来源:uniform.py

示例4: get_cubic_root

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def get_cubic_root(self):
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    # assert_array = \
    #   [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]),
    #   tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])]
    # with tf.control_dependencies(assert_array):
    # EPS in the numerator to prevent momentum being exactly one in case of 0 gradient
    p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS)
    w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
    w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
    y = w - p / 3.0 / (w + EPS)
    x = y + 1
    return x 
开发者ID:JianGoForIt,项目名称:YellowFin,代码行数:26,代码来源:yellowfin.py

示例5: Normal

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def Normal(mu, sigma, name=None):
    # TODO(chrisburr) Just use NormalN?
    X = tf.placeholder(config.dtype, name=name)

    Distribution.logp = _normal_logp(X, mu, sigma)

    def integral(lower, upper):
        upper_integrand = tf.cond(
            tf.is_inf(tf.cast(upper, config.dtype)),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: _normal_cdf(upper, mu, sigma)
        )

        lower_integrand = tf.cond(
            tf.is_inf(tf.cast(lower, config.dtype)),
            lambda: tf.constant(0, dtype=config.dtype),
            lambda: _normal_cdf(lower, mu, sigma)
        )

        return upper_integrand - lower_integrand

    Distribution.integral = integral

    return X


# @Distribution
# def NormalN(mus, sigmas, name=None):
#     X = tf.placeholder(config.dtype, name=name)

#     logps = [_normal_logp(X, mu, sigma) for mu, sigma in zip(mus, sigmas)]

#     def cdf(lim):
#         raise NotImplementedError

#     Distribution.logp = sum(logps)
#     Distribution.integral = lambda lower, upper: cdf(upper) - cdf(lower)

#     return X 
开发者ID:tensorprob,项目名称:tensorprob,代码行数:41,代码来源:normal.py

示例6: _compare

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def _compare(self, x, use_gpu):
    np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
    with self.test_session(use_gpu=use_gpu) as sess:
      inx = tf.convert_to_tensor(x)
      ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
          inx), tf.is_nan(inx)
      tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
    self.assertAllEqual(np_inf, tf_inf)
    self.assertAllEqual(np_nan, tf_nan)
    self.assertAllEqual(np_finite, tf_finite)
    self.assertShapeEqual(np_inf, oinf)
    self.assertShapeEqual(np_nan, onan)
    self.assertShapeEqual(np_finite, ofinite) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:15,代码来源:cwise_ops_test.py

示例7: tf_safe_log

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def tf_safe_log(value, replacement_value=-100.0):
    log_value = tf.log(value + 1e-9)
    replace = tf.logical_or(tf.is_nan(log_value), tf.is_inf(log_value))
    log_value = tf.where(replace, replacement_value * tf.ones_like(log_value), log_value)
    return log_value 
开发者ID:e2crawfo,项目名称:auto_yolo,代码行数:7,代码来源:core.py

示例8: update_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def update_op(self, has_nan, amax):
    def overflow_case():
      new_scale_val = tf.clip_by_value(self.scale / self.step_factor,
                                       self.scale_min, self.scale_max)
      scale_assign = tf.assign(self.scale, new_scale_val)
      overflow_iter_assign = tf.assign(self.last_overflow_iteration,
                                       self.iteration)
      with tf.control_dependencies([scale_assign, overflow_iter_assign]):
        return tf.identity(self.scale)

    def scale_case():
      since_overflow = self.iteration - self.last_overflow_iteration
      should_update = tf.equal(since_overflow % self.step_window, 0)
      def scale_update_fn():
        new_scale_val = tf.clip_by_value(self.scale * self.step_factor,
                                         self.scale_min, self.scale_max)
        return tf.assign(self.scale, new_scale_val)
      return tf.cond(should_update,
                     scale_update_fn,
                     lambda: self.scale)

    iter_update = tf.assign_add(self.iteration, 1)
    overflow = tf.logical_or(has_nan, tf.is_inf(amax))

    update_op = tf.cond(overflow,
                        overflow_case,
                        scale_case)
    with tf.control_dependencies([update_op]):
      return tf.identity(iter_update) 
开发者ID:NVIDIA,项目名称:OpenSeq2Seq,代码行数:31,代码来源:automatic_loss_scaler.py

示例9: gradient_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def gradient_summaries(gvs, suppress_inf_and_nans=False):
  """Creates summaries for norm, mean and var of gradients."""
  gs = [gv[0] for gv in gvs]
  grad_global_norm = tf.global_norm(gs, 'gradient_global_norm')

  if suppress_inf_and_nans:
    is_nan_or_inf = tf.logical_or(tf.is_nan(grad_global_norm),
                                  tf.is_inf(grad_global_norm))

    grad_global_norm = tf.where(is_nan_or_inf,
                                tf.zeros_like(grad_global_norm) - 1.,
                                grad_global_norm)

  grad_abs_max, grad_abs_mean, grad_mean, grad_var = [0.] * 4
  n_grads = 1e-8
  for g, _ in gvs:
    if isinstance(g, tf.IndexedSlices):
      g = g.values

    if g is not None:
      current_n_grads = np.prod(g.shape.as_list())
      abs_g = abs(g)
      mean, var = tf.nn.moments(g, list(range(len(g.shape))))
      grad_abs_max = tf.maximum(grad_abs_max, tf.reduce_max(abs_g))
      grad_abs_mean += tf.reduce_sum(abs_g)
      grad_mean += mean * current_n_grads
      grad_var += var
      n_grads += current_n_grads

  tf.summary.scalar('grad/abs_max', grad_abs_max)
  tf.summary.scalar('grad/abs_mean', grad_abs_mean / n_grads)
  tf.summary.scalar('grad/mean', grad_mean / n_grads)
  tf.summary.scalar('grad/var', grad_var / n_grads)

  return dict(grad_global_norm=grad_global_norm) 
开发者ID:akosiorek,项目名称:stacked_capsule_autoencoders,代码行数:37,代码来源:tools.py

示例10: test_forward_isinf

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_inf [as 别名]
def test_forward_isinf():
    _verify_infiniteness_ops(tf.is_inf, "isinf") 
开发者ID:apache,项目名称:incubator-tvm,代码行数:4,代码来源:test_forward.py


注:本文中的tensorflow.is_inf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。