当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.atanh方法代码示例

本文整理汇总了Python中tensorflow.atanh方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.atanh方法的具体用法?Python tensorflow.atanh怎么用?Python tensorflow.atanh使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.atanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_forward_unary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def test_forward_unary():
    def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
        """test unary operators"""
        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
        tf.reset_default_graph()
        with tf.Graph().as_default():
            in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
            out = op(in_data)
            compare_tf_with_tvm([np_data], ['in_data:0'], out.name)

    _test_forward_unary(tf.acos, -1, 1)
    _test_forward_unary(tf.asin, -1, 1)
    _test_forward_unary(tf.atanh, -1, 1)
    _test_forward_unary(tf.sinh)
    _test_forward_unary(tf.cosh)
    _test_forward_unary(tf.acosh)
    _test_forward_unary(tf.asinh)
    _test_forward_unary(tf.atan)
    _test_forward_unary(tf.sin)
    _test_forward_unary(tf.cos)
    _test_forward_unary(tf.tan)
    _test_forward_unary(tf.tanh)
    _test_forward_unary(tf.erf)
    _test_forward_unary(tf.log)
    _test_forward_unary(tf.log1p) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_forward.py

示例2: testDiscretizedMixLogisticLoss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def testDiscretizedMixLogisticLoss(self):
    batch = 2
    height = 4
    width = 4
    channels = 3
    num_mixtures = 5
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
                                   minval=-1., maxval=1.)
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=-.9, maxval=.9)
    locs_0 = locs[..., :3]
    log_scales_0 = log_scales[..., :3]
    centered_labels = labels - locs_0
    inv_stdv = tf.exp(-log_scales_0)
    plus_in = inv_stdv * (centered_labels + 1. / 255.)
    min_in = inv_stdv * (centered_labels - 1. / 255.)
    cdf_plus = tf.nn.sigmoid(plus_in)
    cdf_min = tf.nn.sigmoid(min_in)
    expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)

    actual_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=labels)
    with self.test_session() as session:
      actual_loss_val, expected_loss_val = session.run(
          [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:38,代码来源:common_layers_test.py

示例3: testSampleFromDiscretizedMixLogistic

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def testSampleFromDiscretizedMixLogistic(self):
    batch = 2
    height = 4
    width = 4
    num_mixtures = 5
    seed = 42
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    locs_0 = locs[..., :3]
    expected_sample = tf.clip_by_value(locs_0, -1., 1.)

    actual_sample = common_layers.sample_from_discretized_mix_logistic(
        pred, seed=seed)
    with self.test_session() as session:
      actual_sample_val, expected_sample_val = session.run(
          [actual_sample, expected_sample])
    # Use a low tolerance: samples numerically differ, as the actual
    # implementation clips log-scales so they always contribute to sampling.
    self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:29,代码来源:common_layers_test.py

示例4: _graph_fn_unsquash

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _graph_fn_unsquash(self, values):
        """
        Reverse operation as _graph_fn_squash (using argus tanh).

        Args:
            values (DataOp): The values to unsquash.

        Returns:
            The unsquashed values.
        """
        if get_backend() == "tf":
            return tf.atanh((values - self.low) / (self.high - self.low) * 2.0 - 1.0)
        elif get_backend() == "tf":
            return torch.atanh((values - self.low) / (self.high - self.low) * 2.0 - 1.0) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:16,代码来源:squashed_normal.py

示例5: testDiscretizedMixLogisticLoss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def testDiscretizedMixLogisticLoss(self):
    batch = 2
    height = 4
    width = 4
    channels = 3
    num_mixtures = 5
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
                                   minval=-1., maxval=1.)
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=-.9, maxval=.9)
    locs_0 = locs[..., :3]
    log_scales_0 = log_scales[..., :3]
    centered_labels = labels - locs_0
    inv_stdv = tf.exp(-log_scales_0)
    plus_in = inv_stdv * (centered_labels + 1. / 255.)
    min_in = inv_stdv * (centered_labels - 1. / 255.)
    cdf_plus = tf.nn.sigmoid(plus_in)
    cdf_min = tf.nn.sigmoid(min_in)
    expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)

    actual_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=labels)
    actual_loss_val, expected_loss_val = self.evaluate(
        [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) 
开发者ID:yyht,项目名称:BERT,代码行数:37,代码来源:common_layers_test.py

示例6: testSampleFromDiscretizedMixLogistic

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def testSampleFromDiscretizedMixLogistic(self):
    batch = 2
    height = 4
    width = 4
    num_mixtures = 5
    seed = 42
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    locs_0 = locs[..., :3]
    expected_sample = tf.clip_by_value(locs_0, -1., 1.)

    actual_sample = common_layers.sample_from_discretized_mix_logistic(
        pred, seed=seed)
    actual_sample_val, expected_sample_val = self.evaluate(
        [actual_sample, expected_sample])
    # Use a low tolerance: samples numerically differ, as the actual
    # implementation clips log-scales so they always contribute to sampling.
    self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2) 
开发者ID:yyht,项目名称:BERT,代码行数:28,代码来源:common_layers_test.py

示例7: artanh

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def artanh(x):
  eps = BALL_EPS[x.dtype]
  return tf.atanh(tf.minimum(tf.maximum(x, -1 + eps), 1 - eps)) 
开发者ID:tensorflow,项目名称:neural-structured-learning,代码行数:5,代码来源:hyperbolic.py

示例8: squash_correction

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def squash_correction(actions, squashed=True):
    if squashed:
        actions = tf.atanh(actions)
    return tf.reduce_sum(tf.math.log(1 - tf.tanh(actions) ** 2 + EPS), axis=1) 
开发者ID:ying-wen,项目名称:malib,代码行数:6,代码来源:tf_utils.py

示例9: _inverse

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _inverse(self, y):
        return tf.atanh(y) 
开发者ID:ying-wen,项目名称:malib,代码行数:4,代码来源:squash_bijector.py

示例10: tf_atanh

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def tf_atanh(x):
    return tf.atanh(tf.minimum(x, 1. - EPS)) # Only works for positive real x.

# Real x, not vector! 
开发者ID:dalab,项目名称:hyperbolic_nn,代码行数:6,代码来源:util.py

示例11: _inverse

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _inverse(self, y):
    precision = 0.99999997
    clipped = tf.where(
        tf.less_equal(tf.abs(y), 1.),
        tf.clip_by_value(y, -precision, precision), y)
    # y = tf.stop_gradient(clipped) + y - tf.stop_gradient(y)
    return tf.atanh(clipped) 
开发者ID:google-research,项目名称:dreamer,代码行数:9,代码来源:tanh_normal.py

示例12: _GetActvFn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _GetActvFn(name):
    '''
    Helper function for selecting an activation function
    name: The name of the activation function
    return: A handle for the tensorflow activation function
    '''
    return {'atanh': tf.atanh,          'elu': tf.nn.elu,
            'ident': tf.identity,
            'sig': tf.sigmoid,          'softplus': tf.nn.softplus, 
            'softsign': tf.nn.softsign, 'relu': tf.nn.relu,
            'relu6': tf.nn.relu6,       'tanh': tf.tanh}.get(name) 
开发者ID:nicholastoddsmith,项目名称:pythonml,代码行数:13,代码来源:TFANN.py

示例13: log_pis_for

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def log_pis_for(self, actions):
        raw_actions = actions
        if self._squash:
           raw_actions = tf.atanh(actions) 
           log_pis = self._distribution.log_prob(raw_actions)
           log_pis -= self._squash_correction(raw_actions)
           return log_pis
        return self._distribution.log_prob(raw_actions) 
开发者ID:ml3705454,项目名称:mapr2,代码行数:10,代码来源:gaussian_policy.py

示例14: _create_opponent_prior_update

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _create_opponent_prior_update(self):
        prior = self._get_opponent_prior(self._recent_opponent_observations_ph)
        raw_actions = tf.atanh(self._recent_opponent_actions_pl)
        log_pis = prior.dist.log_prob(raw_actions)
        log_pis = log_pis - squash_correction(raw_actions)
        loss = -tf.reduce_mean(log_pis) + prior.reg_loss_t
        vars = U.scope_vars(self._opponent_prior_scope)
        with tf.variable_scope('opponent_prior_opt_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            if self._train_policy:
                optimizer = tf.train.AdamOptimizer(self._policy_lr)
                prior_training_op = optimizer.minimize(
                        loss=loss,
                        var_list=vars)
                self._training_ops.append(prior_training_op) 
开发者ID:ml3705454,项目名称:mapr2,代码行数:16,代码来源:rommeo_ac.py

示例15: _create_opponent_p_update

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import atanh [as 别名]
def _create_opponent_p_update(self):
        opponent_actions, opponent_actions_log_pis, reg_loss = self.opponent_policy.actions_for(
            observations=self._observations_ph,
            reuse=tf.AUTO_REUSE, with_log_pis=True, return_reg=True)
        assert_shape(opponent_actions, [None, self._opponent_action_dim])

        prior = self._get_opponent_prior(self._observations_ph)
        raw_actions = tf.atanh(opponent_actions)
        prior_log_pis = prior.dist.log_prob(raw_actions)
        prior_log_pis = prior_log_pis - squash_correction(raw_actions)

        actions, agent_log_pis = self.policy.actions_for(observations=self._observations_ph,
                                                         reuse=tf.AUTO_REUSE,
                                                         with_log_pis=True,
                                                         opponent_actions=opponent_actions)

        q_values = self.joint_qf.output_for(
            self._observations_ph, actions, opponent_actions, reuse=True)


        opponent_p_loss = tf.reduce_mean(opponent_actions_log_pis) - tf.reduce_mean(prior_log_pis) - tf.reduce_mean(q_values) + self._annealing_pl * agent_log_pis
        opponent_p_loss = opponent_p_loss + reg_loss
        with tf.variable_scope('opponent_policy_opt_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            if self._train_policy:
                optimizer = tf.train.AdamOptimizer(self._policy_lr)
                om_training_op = optimizer.minimize(
                    loss=opponent_p_loss,
                    var_list=self.opponent_policy.get_params_internal())
                self._training_ops.append(om_training_op) 
开发者ID:ml3705454,项目名称:mapr2,代码行数:31,代码来源:rommeo_ac.py


注:本文中的tensorflow.atanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。