當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.log方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.log方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.log方法的具體用法?Python v1.log怎麽用?Python v1.log使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.log方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_optimizer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def get_optimizer(params, learning_rate):
  """Returns the optimizer that should be used based on params."""
  if params.optimizer == 'momentum':
    mlperf.logger.log(key=mlperf.tags.OPT_NAME,
                      value=mlperf.tags.SGD_WITH_MOMENTUM)
    mlperf.logger.log(key=mlperf.tags.OPT_MOMENTUM, value=params.momentum)
    opt = tf.train.MomentumOptimizer(
        learning_rate, params.momentum, use_nesterov=True)
  elif params.optimizer == 'sgd':
    mlperf.logger.log(key=mlperf.tags.OPT_NAME, value=mlperf.tags.SGD)
    opt = tf.train.GradientDescentOptimizer(learning_rate)
  elif params.optimizer == 'rmsprop':
    opt = tf.train.RMSPropOptimizer(
        learning_rate,
        params.rmsprop_decay,
        momentum=params.rmsprop_momentum,
        epsilon=params.rmsprop_epsilon)
  elif params.optimizer == 'adam':
    opt = tf.train.AdamOptimizer(learning_rate, params.adam_beta1,
                                 params.adam_beta2, params.adam_epsilon)
  else:
    raise ValueError('Optimizer "{}" was not recognized'.
                     format(params.optimizer))
  return opt 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:26,代碼來源:benchmark_cnn.py

示例2: testTwoClassLogLikelihoodVersusOldImplementation

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def testTwoClassLogLikelihoodVersusOldImplementation(self):
    def alt_two_class_log_likelihood_impl(predictions, labels):
      float_labels = tf.cast(labels, dtype=tf.float64)
      float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
      # likelihood should be just p for class 1, and 1 - p for class 0.
      # signs is 1 for class 1, and -1 for class 0
      signs = 2 * float_labels - tf.ones_like(float_labels)
      # constant_term is 1 for class 0, and 0 for class 1.
      constant_term = tf.ones_like(float_labels) - float_labels
      likelihoods = constant_term + signs * float_predictions
      log_likelihoods = tf.log(likelihoods)
      avg_log_likelihood = tf.reduce_mean(log_likelihoods)
      return avg_log_likelihood
    predictions = np.random.rand(1, 10, 1)
    targets = np.random.randint(2, size=10)
    with self.test_session() as session:
      new_log_likelihood, _ = metrics.two_class_log_likelihood(
          predictions, targets)
      alt_log_likelihood = alt_two_class_log_likelihood_impl(
          predictions, targets)
      new_impl, alt_impl = session.run([new_log_likelihood, alt_log_likelihood])
    self.assertAlmostEqual(new_impl, alt_impl) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics_test.py

示例3: shuffle_layer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def shuffle_layer(inputs, shuffle_fn=rol):
  """Shuffles the elements according to bitwise left or right rotation.

  Args:
    inputs: Tensor input from previous layer
    shuffle_fn: Shift function rol or ror

  Returns:
    tf.Tensor: Inputs shifted according to shuffle_fn
  """

  length = tf.shape(inputs)[1]
  n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0)
  n_bits = tf.cast(n_bits, tf.int32) + 1

  indices = tf.range(0, length)
  rev_indices = shuffle_fn(indices, n_bits)
  return tf.gather(inputs, rev_indices, axis=1) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:shuffle_network.py

示例4: uniform_binning_correction

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def uniform_binning_correction(x, n_bits=8):
  """Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).

  Args:
    x: 4-D Tensor of shape (NHWC)
    n_bits: optional.
  Returns:
    x: x ~ U(x, x + 1.0 / 256)
    objective: Equivalent to -q(x)*log(q(x)).
  """
  n_bins = 2**n_bits
  batch_size, height, width, n_channels = common_layers.shape_list(x)
  hwc = float(height * width * n_channels)

  x = x + tf.random_uniform(
      shape=(batch_size, height, width, n_channels),
      minval=0.0, maxval=1.0/n_bins)
  objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
  return x, objective 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:glow_ops.py

示例5: residual_shuffle_network

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def residual_shuffle_network(inputs, hparams):
  """Residual Shuffle-Exchange network with weight sharing.

  Args:
    inputs: inputs to the Shuffle-Exchange network. Should be in length of power
      of 2.
    hparams: Model configuration

  Returns:
    tf.Tensor: Outputs of the Shuffle-Exchange last layer
  """
  input_shape = tf.shape(inputs)
  n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0)
  n_bits = tf.cast(n_bits, tf.int32) + 1

  block_out = inputs

  for k in range(hparams.num_hidden_layers):
    with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE):
      forward_output = forward_part(block_out, hparams, n_bits)
      block_out = reverse_part(forward_output, hparams, n_bits)

  return RSU("last_layer", hparams.dropout, hparams.mode)(block_out) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:residual_shuffle_exchange.py

示例6: lossfn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
  """Loss function."""
  eps = 1e-12
  with tf.variable_scope(name):
    d1 = discriminator(real_input, compress, hparams, "discriminator")
    d2 = discriminator(fake_input, compress, hparams, "discriminator",
                       reuse=True)
    if lsgan:
      dloss = tf.reduce_mean(
          tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
      gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
      loss = (dloss + gloss)/2
    else:  # cross_entropy
      dloss = -tf.reduce_mean(
          tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2))
      gloss = -tf.reduce_mean(tf.log(d2 + eps))
      loss = (dloss + gloss)/2
    return loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:cycle_gan.py

示例7: actnorm

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def actnorm(name, x, x_mask, inverse, init, logscale_factor=3.0):
  """Activation normalization, returns logabsdet of shape [B]."""
  eps = tf.keras.backend.epsilon()
  n_channels = common_layers.shape_list(x)[2]

  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x_mean, x_var = gops.moments_over_bl(x, x_mask)
    b = gops.get_variable_ddi(
        "b", (n_channels), -x_mean, init, tf.zeros_initializer)
    log_w_init = -0.5 * tf.log(x_var + eps) / logscale_factor
    log_w = gops.get_variable_ddi(
        "log_w", (n_channels), log_w_init, init,
        tf.zeros_initializer) * logscale_factor

    if not inverse:
      x = (x + b) * tf.exp(log_w)
    else:
      x = x * tf.exp(-log_w) - b

    x_length = tf.reduce_sum(x_mask, -1)
    logabsdet = x_length * tf.reduce_sum(log_w)
    if inverse:
      logabsdet *= -1
    return x, logabsdet 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:transformer_glow_layers.py

示例8: get_timing_signal

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:common_layers.py

示例9: stfts_to_specgrams

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def stfts_to_specgrams(self, stfts):
    """Converts stfts to specgrams.

    Args:
      stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].

    Returns:
      specgrams: Tensor of log magnitudes and instantaneous frequencies,
        shape [batch, time, freq, 2].
    """
    stfts = stfts[:, :, :, 0]

    logmag = self._safe_log(tf.abs(stfts))

    phase_angle = tf.angle(stfts)
    if self._ifreq:
      p = spectral_ops.instantaneous_frequency(phase_angle)
    else:
      p = phase_angle / np.pi

    return tf.concat(
        [logmag[:, :, :, tf.newaxis], p[:, :, :, tf.newaxis]], axis=-1) 
開發者ID:magenta,項目名稱:magenta,代碼行數:24,代碼來源:specgrams_helper.py

示例10: specgrams_to_stfts

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def specgrams_to_stfts(self, specgrams):
    """Converts specgrams to stfts.

    Args:
      specgrams: Tensor of log magnitudes and instantaneous frequencies,
        shape [batch, time, freq, 2].

    Returns:
      stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
    """
    logmag = specgrams[:, :, :, 0]
    p = specgrams[:, :, :, 1]

    mag = tf.exp(logmag)

    if self._ifreq:
      phase_angle = tf.cumsum(p * np.pi, axis=-2)
    else:
      phase_angle = p * np.pi

    return spectral_ops.polar2rect(mag, phase_angle)[:, :, :, tf.newaxis] 
開發者ID:magenta,項目名稱:magenta,代碼行數:23,代碼來源:specgrams_helper.py

示例11: mu_law

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def mu_law(x, mu=255, int8=False):
  """A TF implementation of Mu-Law encoding.

  Args:
    x: The audio samples to encode.
    mu: The Mu to use in our Mu-Law.
    int8: Use int8 encoding.

  Returns:
    out: The Mu-Law encoded int8 data.
  """
  out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
  out = tf.floor(out * 128)
  if int8:
    out = tf.cast(out, tf.int8)
  return out 
開發者ID:magenta,項目名稱:magenta,代碼行數:18,代碼來源:utils.py

示例12: calculate_softmax_and_summaries

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def calculate_softmax_and_summaries(logits, one_hot_labels, name):
  """Calculate the softmax cross entropy loss and associated summaries.

  Args:
    logits: Tensor of logits, first dimension is batch size.
    one_hot_labels: Tensor of one hot encoded categorical labels. First
      dimension is batch size.
    name: Name to use as prefix for summaries.

  Returns:
    loss: Dimensionless tensor representing the mean negative
      log-probability of the true class.
  """
  loss = tf.nn.softmax_cross_entropy_with_logits(
      logits=logits, labels=one_hot_labels)
  loss = tf.reduce_mean(loss)
  softmax_summaries(loss, logits, one_hot_labels, name)
  return loss 
開發者ID:magenta,項目名稱:magenta,代碼行數:20,代碼來源:utils.py

示例13: calculate_sparse_softmax_and_summaries

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def calculate_sparse_softmax_and_summaries(logits, labels, name):
  """Calculate the softmax cross entropy loss and associated summaries.

  Args:
    logits: Tensor of logits, first dimension is batch size.
    labels: Tensor of categorical labels [ints]. First
      dimension is batch size.
    name: Name to use as prefix for summaries.

  Returns:
    loss: Dimensionless tensor representing the mean negative
      log-probability of the true class.
  """
  loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels)
  loss = tf.reduce_mean(loss)
  softmax_summaries(loss, logits, labels, name)
  return loss 
開發者ID:magenta,項目名稱:magenta,代碼行數:20,代碼來源:utils.py

示例14: frequency_weighted_cost_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def frequency_weighted_cost_mask(peak=10.0, hz_flat=1000, sr=16000, n_fft=512):
  """Calculates a mask to weight lower frequencies higher.

  Piecewise linear approximation. Assumes magnitude is in log scale.
  Args:
    peak: Cost increase at 0 Hz.
    hz_flat: Hz at which cost increase is 0.
    sr: Sample rate.
    n_fft: FFT size.

  Returns:
    Constant tensor [1, N_freq, 1] of cost weighting.
  """
  n = int(n_fft / 2)
  cutoff = np.where(
      librosa.core.fft_frequencies(sr=sr, n_fft=n_fft) >= hz_flat)[0][0]
  mask = np.concatenate([np.linspace(peak, 1.0, cutoff), np.ones(n - cutoff)])
  return tf.constant(mask[np.newaxis, :, np.newaxis], dtype=tf.float32)


#---------------------------------------------------
# Neural Nets
#--------------------------------------------------- 
開發者ID:magenta,項目名稱:magenta,代碼行數:25,代碼來源:utils.py

示例15: _grow_alive

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import log [as 別名]
def _grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished,
                batch_size, beam_size):
  """Given sequences and scores, will gather the top k=beam size sequences.

  Args:
    curr_seq: current topk sequence that has been grown by one position.
      [batch_size, beam_size, decode_length + 1]
    curr_scores: scores for each of these sequences. [batch_size, beam_size]
    curr_log_probs: log probs for each of these sequences.
      [batch_size, beam_size]
    curr_finished: Finished flags for each of these sequences.
      [batch_size, beam_size]
    batch_size: Integer specifying batch size.
    beam_size: Integer specifying beam size.
  Returns:
    Tuple of
      (Topk sequences based on scores,
       log probs of these sequences,
       Finished flags of these sequences)
  """
  # Set the scores of the finished seq in curr_seq to large negative
  # values
  curr_scores = _apply_negative_infinity_mask(curr_scores, curr_finished)
  return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
                                     curr_finished, beam_size, batch_size) 
開發者ID:google-research,項目名稱:language,代碼行數:27,代碼來源:beam_search.py


注:本文中的tensorflow.compat.v1.log方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。