当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.log1p方法代码示例

本文整理汇总了Python中tensorflow.log1p方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.log1p方法的具体用法?Python tensorflow.log1p怎么用?Python tensorflow.log1p使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.log1p方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: input_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def input_fn(partition, training, batch_size):
    """Generate an input function for the Estimator."""
    def _input_fn():
        if partition == "train":
            dataset = tf.data.Dataset.from_tensor_slices(({
                FEATURES_KEY: tf.log1p(x_train)
            }, tf.log1p(y_train)))
        else:
            dataset = tf.data.Dataset.from_tensor_slices(({
                FEATURES_KEY: tf.log1p(x_test)
            }, tf.log1p(y_test)))

        if training:
            dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat()

        dataset = dataset.batch(batch_size)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()
        return features, labels
    return _input_fn 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:22,代码来源:1_simple_boston.py

示例2: _sample

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:22,代码来源:univariate.py

示例3: calculate_latent_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def calculate_latent_loss(self, latent_weights):
        """ Calculate the latent loss in the form of KL divergence """
        for posterior in self.posteriors:
            # Minimize the chi squared divergence of the posterior 'f' from the prior 'g' (a
            # standard normal distribution), this amounts to minimizing the square of the difference
            # of the first moment of f from the first moment of g divided by the squared variance of
            # g (NOTE: mt_f is the t-th moment of the distribution f):
            #    min(chisq) = (m1_f - m1_g)^2 / sigma_g^2
            #
            # The idea behind using the chi squared divergence is that it is an upper bound for the
            # Kullback-Leibler divergence. The following inequality holds:
            #    KL(f||g) <= log(1 + Chi^2(f||g))
            #
            # So minimize this bound rather than the chi squared divergence directly
            mean, _ = self.compute_moments(posterior)

            axes = tf.range(1, tf.rank(mean))
            chisq = tf.log1p(tf.square(mean - self.prior.mean()) / self.prior.variance())
            chisq = tf.reduce_sum(latent_weights * chisq, axes)
            tf.losses.add_loss(tf.reduce_mean(chisq, name='chisq')) 
开发者ID:dojoteef,项目名称:glas,代码行数:22,代码来源:sample.py

示例4: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def __call__(self, x):
    """Computes regularization given an ed.Normal random variable as input."""
    if not isinstance(x, ed.RandomVariable):
      raise ValueError('Input must be an ed.RandomVariable (for correct math, '
                       'an ed.Normal random variable).')
    # Clip magnitude of dropout rate, where we get the dropout rate alpha from
    # the additive parameterization (Molchanov et al., 2017): for weight ~
    # Normal(mu, sigma**2), the variance `sigma**2 = alpha * mu**2`.
    mean = x.distribution.mean()
    log_variance = tf.log(x.distribution.variance())
    log_alpha = log_variance - tf.log(tf.square(mean) +
                                      tf.keras.backend.epsilon())
    log_alpha = tf.clip_by_value(log_alpha, -8., 8.)

    # Set magic numbers for cubic polynomial approx. (Molchanov et al., 2017).
    k1 = 0.63576
    k2 = 1.8732
    k3 = 1.48695
    c = -k1
    output = tf.reduce_sum(k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) +
                           -0.5 * tf.log1p(tf.exp(-log_alpha)) + c)
    return output 
开发者ID:yyht,项目名称:BERT,代码行数:24,代码来源:regularizers.py

示例5: bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log1p(-float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
开发者ID:yyht,项目名称:BERT,代码行数:23,代码来源:autoencoders.py

示例6: test_forward_unary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def test_forward_unary():
    def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
        """test unary operators"""
        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
        tf.reset_default_graph()
        with tf.Graph().as_default():
            in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
            out = op(in_data)
            compare_tf_with_tvm([np_data], ['in_data:0'], out.name)

    _test_forward_unary(tf.acos, -1, 1)
    _test_forward_unary(tf.asin, -1, 1)
    _test_forward_unary(tf.atanh, -1, 1)
    _test_forward_unary(tf.sinh)
    _test_forward_unary(tf.cosh)
    _test_forward_unary(tf.acosh)
    _test_forward_unary(tf.asinh)
    _test_forward_unary(tf.atan)
    _test_forward_unary(tf.sin)
    _test_forward_unary(tf.cos)
    _test_forward_unary(tf.tan)
    _test_forward_unary(tf.tanh)
    _test_forward_unary(tf.erf)
    _test_forward_unary(tf.log)
    _test_forward_unary(tf.log1p) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_forward.py

示例7: mu_law_encode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def mu_law_encode(audio, quantization_channels):
    '''Quantizes waveform amplitudes.'''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.sign(audio) * magnitude
        # Quantize signal to the specified number of levels.
        return tf.to_int32((signal + 1) / 2 * mu + 0.5) 
开发者ID:ibab,项目名称:tensorflow-wavenet,代码行数:14,代码来源:ops.py

示例8: geo_mean

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def geo_mean(sname, true, model):
    with tf.name_scope(sname):
        waveform_loss = tf.exp(tf.reduce_mean(tf.log1p(
                                tf.abs(tf.subtract(true, model)))))
    tf.summary.scalar(sname, waveform_loss)
    return waveform_loss 
开发者ID:jhetherly,项目名称:EnglishSpeechUpsampler,代码行数:8,代码来源:losses.py

示例9: pt_dense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def pt_dense(input_tensor, num_inputs, num_outputs, name, stochastic=True, with_bias=True, reuse=False):
    with tf.variable_scope(name) as scope:
        W = tf.get_variable('W', [num_inputs, num_outputs], initializer=tf.truncated_normal_initializer(1e-2),
                            dtype=tf.float32, trainable=True)
        log_alpha = tf.get_variable('log_alpha', [], initializer=tf.constant_initializer(-10.0), dtype=tf.float32,
                                    trainable=True)
        log_alpha = tf.clip_by_value(log_alpha, -20.0, 20.0)

        if not reuse:
            # computing reg
            k1, k2, k3 = 0.63576, 1.8732, 1.48695
            C = -k1
            mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
            kl = -tf.reduce_sum(mdkl) * tf.reduce_prod(tf.cast(W.get_shape(), tf.float32))
            tf.add_to_collection('kl_loss', kl)

        # computing output
        mu = tf.matmul(input_tensor, W)
        si = tf.sqrt(tf.matmul(input_tensor * input_tensor, tf.exp(log_alpha) * W * W)   + 1e-16)
        output = mu
        if stochastic:
            output += tf.random_normal(mu.shape, mean=0, stddev=1) * si
        if with_bias:
            biases = tf.get_variable('biases', num_outputs, tf.float32, tf.constant_initializer(0.0))
            output = tf.nn.bias_add(output, biases)

        # summaries
        if not reuse:
            if with_bias:
                error = 0.5*(1.0+tf.erf((-mu-biases)/tf.sqrt(2.0)/si))
            else:
                error = 0.5*(1.0+tf.erf((-mu)/tf.sqrt(2.0)/si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            tf.summary.scalar('log_alpha', log_alpha)
            tf.add_to_collection('log_alpha', log_alpha)
    return output 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:38,代码来源:layers.py

示例10: pt_conv_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def pt_conv_2d(input_tensor, filter_shape, input_channels, output_channels, padding, name, stochastic=True,
               with_bias=True, reuse=False):
    with tf.variable_scope(name) as scope:
        kernel = tf.get_variable('kernel', [filter_shape[0], filter_shape[1], input_channels, output_channels],
                                 initializer=tf.contrib.layers.xavier_initializer(seed=322), dtype=tf.float32,
                                 trainable=True)
        log_alpha = tf.get_variable('log_alpha', [], initializer=tf.constant_initializer(-10.0), dtype=tf.float32,
                                    trainable=True)
        log_alpha = tf.clip_by_value(log_alpha, -20.0, 20.0)

        if not reuse:
            # computing reg
            k1, k2, k3 = 0.63576, 1.8732, 1.48695
            C = -k1
            mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
            kl = -tf.reduce_sum(mdkl) * tf.reduce_prod(tf.cast(kernel.get_shape(), tf.float32))
            tf.add_to_collection('kl_loss', kl)

        # computing output
        conved_mu = tf.nn.conv2d(input_tensor, kernel, [1, 1, 1, 1], padding=padding)
        conved_si = tf.sqrt(tf.nn.conv2d(input_tensor * input_tensor,
                                         tf.exp(log_alpha) * kernel * kernel,
                                         [1, 1, 1, 1], padding=padding)+1e-16)
        output = conved_mu
        if stochastic:
            output += tf.random_normal(conved_mu.shape, mean=0, stddev=1) * conved_si
        if with_bias:
            biases = tf.get_variable('biases', output_channels, tf.float32, tf.constant_initializer(0.0))
            output = tf.nn.bias_add(output, biases)

        # summaries
        if not reuse:
            if with_bias:
                error = 0.5*(1.0+tf.erf((-conved_mu-biases)/tf.sqrt(2.0)/conved_si))
            else:
                error = 0.5*(1.0+tf.erf((-conved_mu)/tf.sqrt(2.0)/conved_si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            tf.summary.scalar('log_alpha', log_alpha)
            tf.add_to_collection('log_alpha', log_alpha)

    return output 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:43,代码来源:layers.py

示例11: norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def norm(magnitude):
    '''
    Log(1 + magnitude)
    :param magnitude: 
    :return: 
    '''
    return tf.log1p(magnitude) 
开发者ID:Veleslavia,项目名称:vimss,代码行数:9,代码来源:Input.py

示例12: norm_with_noise

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def norm_with_noise(magnitude):
    '''
    Log(1 + magnitude) + Noise
    :param magnitude: 
    :return: 
    '''
    return tf.log1p(magnitude) + tf.random_uniform(magnitude.shape, minval=1e-7, maxval=1e-5) 
开发者ID:Veleslavia,项目名称:vimss,代码行数:9,代码来源:Input.py

示例13: get_bounded_class_weight

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def get_bounded_class_weight(labels, weights, ub=None):
    if weights is None:
        return 1.0
    else:
        w = tf.gather(weights, labels)
        w = w / tf.reduce_min(w)
        w = tf.clip_by_value(1.0 + tf.log1p(w),
                             clip_value_min=1.0,
                             clip_value_max=ub if ub is not None else tf.cast(tf.shape(weights)[0], tf.float32) / 2.0)
    return w 
开发者ID:hanxiao,项目名称:encoding-blocks,代码行数:12,代码来源:nn.py

示例14: testFloatBasic

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def testFloatBasic(self):
    x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
    y = (x + .5).astype(np.float32)     # no zero
    z = (x + 15.5).astype(np.float32)   # all positive
    k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1

    self._compareBoth(x, np.abs, tf.abs)
    self._compareBoth(x, np.abs, _ABS)
    self._compareBoth(x, np.negative, tf.neg)
    self._compareBoth(x, np.negative, _NEG)
    self._compareBoth(y, self._inv, tf.inv)
    self._compareBoth(x, np.square, tf.square)
    self._compareBoth(z, np.sqrt, tf.sqrt)
    self._compareBoth(z, self._rsqrt, tf.rsqrt)
    self._compareBoth(x, np.exp, tf.exp)
    self._compareBoth(z, np.log, tf.log)
    self._compareBoth(z, np.log1p, tf.log1p)
    self._compareBoth(x, np.tanh, tf.tanh)
    self._compareBoth(x, self._sigmoid, tf.sigmoid)
    self._compareBoth(y, np.sign, tf.sign)
    self._compareBoth(x, np.sin, tf.sin)
    self._compareBoth(x, np.cos, tf.cos)
    self._compareBoth(k, np.arcsin, tf.asin)
    self._compareBoth(k, np.arccos, tf.acos)
    self._compareBoth(x, np.arctan, tf.atan)
    self._compareBoth(x, np.tan, tf.tan)
    self._compareBoth(
        y,
        np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
        tf.lgamma)
    self._compareBoth(x, np.vectorize(math.erf), tf.erf)
    self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)

    self._compareBothSparse(x, np.abs, tf.abs)
    self._compareBothSparse(x, np.negative, tf.neg)
    self._compareBothSparse(x, np.square, tf.square)
    self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
    self._compareBothSparse(x, np.tanh, tf.tanh)
    self._compareBothSparse(y, np.sign, tf.sign)
    self._compareBothSparse(x, np.vectorize(math.erf), tf.erf) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:42,代码来源:cwise_ops_test.py

示例15: testFloatEmpty

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log1p [as 别名]
def testFloatEmpty(self):
    x = np.empty((2, 0, 5), dtype=np.float32)
    self._compareBoth(x, np.abs, tf.abs)
    self._compareBoth(x, np.abs, _ABS)
    self._compareBoth(x, np.negative, tf.neg)
    self._compareBoth(x, np.negative, _NEG)
    self._compareBoth(x, self._inv, tf.inv)
    self._compareBoth(x, np.square, tf.square)
    self._compareBoth(x, np.sqrt, tf.sqrt)
    self._compareBoth(x, self._rsqrt, tf.rsqrt)
    self._compareBoth(x, np.exp, tf.exp)
    self._compareBoth(x, np.log, tf.log)
    self._compareBoth(x, np.log1p, tf.log1p)
    self._compareBoth(x, np.tanh, tf.tanh)
    self._compareBoth(x, self._sigmoid, tf.sigmoid)
    self._compareBoth(x, np.sign, tf.sign)
    self._compareBoth(x, np.sin, tf.sin)
    self._compareBoth(x, np.cos, tf.cos)
    # Can't use vectorize below, so just use some arbitrary function
    self._compareBoth(x, np.sign, tf.lgamma)
    self._compareBoth(x, np.sign, tf.erf)
    self._compareBoth(x, np.sign, tf.erfc)
    self._compareBoth(x, np.tan, tf.tan)
    self._compareBoth(x, np.arcsin, tf.asin)
    self._compareBoth(x, np.arccos, tf.acos)
    self._compareBoth(x, np.arctan, tf.atan)

    self._compareBothSparse(x, np.abs, tf.abs)
    self._compareBothSparse(x, np.negative, tf.neg)
    self._compareBothSparse(x, np.square, tf.square)
    self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
    self._compareBothSparse(x, np.tanh, tf.tanh)
    self._compareBothSparse(x, np.sign, tf.sign)
    self._compareBothSparse(x, np.sign, tf.erf) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:36,代码来源:cwise_ops_test.py


注:本文中的tensorflow.log1p方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。