當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.expm1方法代碼示例

本文整理匯總了Python中tensorflow.expm1方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.expm1方法的具體用法?Python tensorflow.expm1怎麽用?Python tensorflow.expm1使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.expm1方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: bottleneck

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def bottleneck(self, x):
    hparams = self.hparams
    z_size = hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope("vae"):
      mu = tf.layers.dense(x, z_size, name="mu")
      if hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
      free_bits = z_size // 4
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * hparams.kl_beta 
開發者ID:yyht,項目名稱:BERT,代碼行數:18,代碼來源:autoencoders.py

示例2: vae

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def vae(x, z_size, name=None):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
    name: Name for the bottleneck scope.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name, default_name="vae"):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
開發者ID:yyht,項目名稱:BERT,代碼行數:24,代碼來源:discretization.py

示例3: denorm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def denorm(logmagnitude):
    return tf.expm1(logmagnitude) 
開發者ID:Veleslavia,項目名稱:vimss,代碼行數:4,代碼來源:Input.py

示例4: denorm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def denorm(logmagnitude):
    '''
    Exp(logmagnitude) - 1
    :param logmagnitude: Log-normalized magnitude spectrogram
    :return: Unnormalized magnitude spectrogram
    '''
    return tf.expm1(logmagnitude) 
開發者ID:f90,項目名稱:AdversarialAudioSeparation,代碼行數:9,代碼來源:Input.py

示例5: signed_expm1

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def signed_expm1(inputs):
  return tf.multiply(tf.sign(inputs), tf.expm1(tf.abs(inputs))) 
開發者ID:DeepBlender,項目名稱:DeepDenoiser,代碼行數:4,代碼來源:Utilities.py

示例6: sonify

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def sonify(spectrogram, samples, transform_op_fn, logscaled=True):
    graph = tf.Graph()
    with graph.as_default():

        noise = tf.Variable(tf.random_normal([samples], stddev=1e-6))

        x = transform_op_fn(noise)
        y = spectrogram

        if logscaled:
            x = tf.expm1(x)
            y = tf.expm1(y)

        x = tf.nn.l2_normalize(x)
        y = tf.nn.l2_normalize(y)
        tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:])

        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            loss=tf.losses.get_total_loss(),
            var_list=[noise],
            tol=1e-16,
            method='L-BFGS-B',
            options={
                'maxiter': 1000,
                'disp': True
            })

    with tf.Session(graph=graph) as session:
        session.run(tf.global_variables_initializer())
        optimizer.minimize(session)
        waveform = session.run(noise)

    return waveform 
開發者ID:kastnerkyle,項目名稱:representation_mixing,代碼行數:35,代碼來源:magrecnp.py

示例7: sonify

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import expm1 [as 別名]
def sonify(spectrogram, samples, transform_op_fn, logscaled=True):
    graph = tf.Graph()
    with graph.as_default():

        noise = tf.Variable(tf.random_normal([samples], stddev=1e-6))

        x = transform_op_fn(noise)
        y = spectrogram

        if logscaled:
            x = tf.expm1(x)
            y = tf.expm1(y)

        # tf.nn.normalize arguments changed between versions...
        def normalize(a):
            return a / tf.sqrt(tf.maximum(tf.reduce_sum(a ** 2, axis=0), 1E-12))

        x = normalize(x)
        y = normalize(y)
        tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:])

        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            loss=tf.losses.get_total_loss(),
            var_list=[noise],
            tol=1e-16,
            method='L-BFGS-B',
            options={
                'maxiter': sonify_steps,
                'disp': True
            })

    # THIS REALLY SHOULDN'T RUN ON GPU BUT SEEMS TO?
    config = tf.ConfigProto(
        device_count={'CPU' : 1, 'GPU' : 0},
        allow_soft_placement=True,
        log_device_placement=False
        )
    with tf.Session(config=config, graph=graph) as session:
        session.run(tf.global_variables_initializer())
        optimizer.minimize(session)
        waveform = session.run(noise)
    return waveform 
開發者ID:kastnerkyle,項目名稱:representation_mixing,代碼行數:44,代碼來源:sample_rnn_unaligned_speech_ljspeech.py


注:本文中的tensorflow.expm1方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。