当前位置: 首页>>代码示例>>Python>>正文


Python nn_ops.softplus函数代码示例

本文整理汇总了Python中tensorflow.python.ops.nn_ops.softplus函数的典型用法代码示例。如果您正苦于以下问题:Python softplus函数的具体用法?Python softplus怎么用?Python softplus使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了softplus函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testGammaWithSoftplusAlphaBeta

 def testGammaWithSoftplusAlphaBeta(self):
   with self.test_session():
     alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
     beta_v = constant_op.constant([1.0, -3.6], name="beta")
     gamma = gamma_lib.GammaWithSoftplusAlphaBeta(alpha=alpha_v, beta=beta_v)
     self.assertAllEqual(nn_ops.softplus(alpha_v).eval(), gamma.alpha.eval())
     self.assertAllEqual(nn_ops.softplus(beta_v).eval(), gamma.beta.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:gamma_test.py

示例2: testBetaWithSoftplusConcentration

 def testBetaWithSoftplusConcentration(self):
   a, b = -4.2, -9.1
   dist = beta_lib.BetaWithSoftplusConcentration(a, b)
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(a)), self.evaluate(dist.concentration1))
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(b)), self.evaluate(dist.concentration0))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:beta_test.py

示例3: testInverseGammaWithSoftplusAlphaBeta

 def testInverseGammaWithSoftplusAlphaBeta(self):
   with self.test_session():
     alpha = constant_op.constant([-0.1, -2.9], name="alpha")
     beta = constant_op.constant([1.0, -4.8], name="beta")
     inv_gamma = inverse_gamma.InverseGammaWithSoftplusAlphaBeta(
         alpha=alpha, beta=beta, validate_args=True)
     self.assertAllClose(nn_ops.softplus(alpha).eval(), inv_gamma.alpha.eval())
     self.assertAllClose(nn_ops.softplus(beta).eval(), inv_gamma.beta.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:8,代码来源:inverse_gamma_test.py

示例4: testGammaWithSoftplusConcentrationRate

 def testGammaWithSoftplusConcentrationRate(self):
   with self.test_session():
     alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
     beta_v = constant_op.constant([1.0, -3.6], name="beta")
     gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
         concentration=alpha_v, rate=beta_v)
     self.assertAllEqual(nn_ops.softplus(alpha_v).eval(),
                         gamma.concentration.eval())
     self.assertAllEqual(nn_ops.softplus(beta_v).eval(),
                         gamma.rate.eval())
开发者ID:jzuern,项目名称:tensorflow,代码行数:10,代码来源:gamma_test.py

示例5: testGammaWithSoftplusConcentrationRate

 def testGammaWithSoftplusConcentrationRate(self):
   alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
   beta_v = constant_op.constant([1.0, -3.6], name="beta")
   gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
       concentration=alpha_v, rate=beta_v)
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(alpha_v)),
       self.evaluate(gamma.concentration))
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(beta_v)), self.evaluate(gamma.rate))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:gamma_test.py

示例6: testInverseGammaWithSoftplusConcentrationRate

 def testInverseGammaWithSoftplusConcentrationRate(self):
   with self.cached_session():
     alpha = constant_op.constant([-0.1, -2.9], name="alpha")
     beta = constant_op.constant([1.0, -4.8], name="beta")
     inv_gamma = inverse_gamma.InverseGammaWithSoftplusConcentrationRate(
         concentration=alpha, rate=beta, validate_args=True)
     self.assertAllClose(nn_ops.softplus(alpha).eval(),
                         inv_gamma.concentration.eval())
     self.assertAllClose(nn_ops.softplus(beta).eval(),
                         inv_gamma.rate.eval())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:10,代码来源:inverse_gamma_test.py

示例7: testNormalWithSoftplusScale

 def testNormalWithSoftplusScale(self):
   mu = array_ops.zeros((10, 3))
   rho = array_ops.ones((10, 3)) * -2.
   normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
   self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py

示例8: testNormalWithSoftplusScale

 def testNormalWithSoftplusScale(self):
   with self.test_session():
     mu = array_ops.zeros((10, 3))
     rho = array_ops.ones((10, 3)) * -2.
     normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
     self.assertAllEqual(mu.eval(), normal.loc.eval())
     self.assertAllEqual(nn_ops.softplus(rho).eval(), normal.scale.eval())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py

示例9: _testSoftplus

  def _testSoftplus(self, np_features, use_gpu=False):
    np_features = np.asarray(np_features)
    np_softplus = self._npSoftplus(np_features)
    with self.test_session(use_gpu=use_gpu) as sess:
      softplus = nn_ops.softplus(np_features)
      softplus_inverse = distribution_util.softplus_inverse(softplus)
      [tf_softplus, tf_softplus_inverse] = sess.run([
          softplus, softplus_inverse])
    self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
    rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
        str(np_features.dtype), 1e-6)
    # This will test that we correctly computed the inverse by verifying we
    # recovered the original input.
    self.assertAllCloseAccordingToType(
        np_features, tf_softplus_inverse,
        atol=0., rtol=rtol)
    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        tf_softplus > 0)

    self.assertShapeEqual(np_softplus, softplus)
    self.assertShapeEqual(np_softplus, softplus_inverse)

    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        np.isfinite(tf_softplus))
    self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
                        np.isfinite(tf_softplus_inverse))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:distribution_util_test.py

示例10: testNormalWithSoftplusSigma

 def testNormalWithSoftplusSigma(self):
   with self.test_session():
     mu = array_ops.zeros((10, 3))
     rho = array_ops.ones((10, 3)) * -2.
     normal = normal_lib.NormalWithSoftplusSigma(mu=mu, sigma=rho)
     self.assertAllEqual(mu.eval(), normal.mu.eval())
     self.assertAllEqual(nn_ops.softplus(rho).eval(), normal.sigma.eval())
开发者ID:ivankreso,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py

示例11: testLaplaceWithSoftplusScale

 def testLaplaceWithSoftplusScale(self):
   with self.test_session():
     loc_v = constant_op.constant([0.0, 1.0], name="loc")
     scale_v = constant_op.constant([-1.0, 2.0], name="scale")
     laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v)
     self.assertAllClose(nn_ops.softplus(scale_v).eval(), laplace.scale.eval())
     self.assertAllClose(loc_v.eval(), laplace.loc.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:laplace_test.py

示例12: testLaplaceWithSoftplusScale

 def testLaplaceWithSoftplusScale(self):
   loc_v = constant_op.constant([0.0, 1.0], name="loc")
   scale_v = constant_op.constant([-1.0, 2.0], name="scale")
   laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v)
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(scale_v)), self.evaluate(laplace.scale))
   self.assertAllClose(self.evaluate(loc_v), self.evaluate(laplace.loc))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:laplace_test.py

示例13: _forward_log_det_jacobian

 def _forward_log_det_jacobian(self, x):
   if self._static_event_ndims == 0:
     return x - 2. * nn_ops.softplus(x)
   else:
     # This code is similar to nn_ops.log_softmax but different because we have
     # an implicit zero column to handle. I.e., instead of:
     #   reduce_sum(logits - reduce_sum(exp(logits), dim))
     # we must do:
     #   log_normalization = 1 + reduce_sum(exp(logits))
     #   -log_normalization + reduce_sum(logits - log_normalization)
     log_normalization = nn_ops.softplus(
         math_ops.reduce_logsumexp(x, axis=-1, keep_dims=True))
     fldj = (-log_normalization +
             math_ops.reduce_sum(x - log_normalization,
                                 axis=-1,
                                 keep_dims=True))
     return array_ops.squeeze(fldj, squeeze_dims=-1)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:17,代码来源:softmax_centered.py

示例14: _testSoftplus

 def _testSoftplus(self, np_features, use_gpu=False):
   np_softplus = self._npSoftplus(np_features)
   with self.test_session(use_gpu=use_gpu):
     softplus = nn_ops.softplus(np_features)
     tf_softplus = softplus.eval()
   self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
   self.assertTrue(np.all(tf_softplus > 0))
   self.assertShapeEqual(np_softplus, softplus)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:8,代码来源:softplus_op_test.py

示例15: jensen_shannon

def jensen_shannon(logu, self_normalized=False, name=None):
  """The Jensen-Shannon Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:

  ```none
  f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
  ```

  When `self_normalized = False` the `(u + 1) log(2)` term is omitted.

  Observe that as an f-Divergence, this Csiszar-function implies:

  ```none
  D_f[p, q] = KL[p, m] + KL[q, m]
  m(x) = 0.5 p(x) + 0.5 q(x)
  ```

  In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
  f-Divergence.

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  For more information, see:
    Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
    Inf. Th., 37, 145-151, 1991.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jensen_shannon_of_u: Floating-type `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with ops.name_scope(name, "jensen_shannon", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    npdt = logu.dtype.as_numpy_dtype
    y = nn_ops.softplus(logu)
    if self_normalized:
      y -= np.log(2).astype(npdt)
    return math_ops.exp(logu) * logu - (1. + math_ops.exp(logu)) * y
开发者ID:Joetz,项目名称:tensorflow,代码行数:56,代码来源:csiszar_divergence_impl.py


注:本文中的tensorflow.python.ops.nn_ops.softplus函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。