当前位置: 首页>>代码示例>>Python>>正文


Python v1.one_hot方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.one_hot方法的典型用法代码示例。如果您正苦于以下问题:Python v1.one_hot方法的具体用法?Python v1.one_hot怎么用?Python v1.one_hot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.one_hot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: two_class_log_likelihood

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
  """Log-likelihood for two class classification with 0/1 labels.

  Args:
    predictions: A float valued tensor of shape [`batch_size`].  Each
      component should be between 0 and 1.
    labels: An int valued tensor of shape [`batch_size`].  Each component
      should either be 0 or 1.
    weights_fn: unused.

  Returns:
    A pair, with the average log likelihood in the first component.
  """
  del weights_fn
  float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
  batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
  int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
  onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
  chosen_probs = tf.einsum(
      "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
  avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
  return avg_log_likelihood, tf.constant(1.0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:metrics.py

示例2: set_precision

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:metrics.py

示例3: set_recall

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:metrics.py

示例4: sigmoid_precision_one_hot

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
  """Calculate precision for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    precision (scalar), weights
  """
  with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, precision = tf.metrics.precision(labels=labels, predictions=predictions)
    return precision, tf.constant(1.0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:metrics.py

示例5: sigmoid_recall_one_hot

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
  """Calculate recall for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    recall (scalar), weights
  """
  with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, recall = tf.metrics.recall(labels=labels, predictions=predictions)
    return recall, tf.constant(1.0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:metrics.py

示例6: testAccuracyTopKMetric

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def testAccuracyTopKMetric(self):
    predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
    targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
    expected = np.mean((predictions == targets).astype(float))
    with self.test_session() as session:
      predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
      scores1, _ = metrics.padded_accuracy_topk(
          predicted, tf.constant(targets, dtype=tf.int32), k=1)
      scores2, _ = metrics.padded_accuracy_topk(
          predicted, tf.constant(targets, dtype=tf.int32), k=7)
      a1 = tf.reduce_mean(scores1)
      a2 = tf.reduce_mean(scores2)
      session.run(tf.global_variables_initializer())
      actual1, actual2 = session.run([a1, a2])
    self.assertAlmostEqual(actual1, expected)
    self.assertAlmostEqual(actual2, 1.0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:metrics_test.py

示例7: testPrefixAccuracy

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def testPrefixAccuracy(self):
    vocab_size = 10
    predictions = tf.one_hot(
        tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [0]]]),
        vocab_size)
    labels = tf.expand_dims(
        tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [0]]]),
        axis=-1)
    expected_accuracy = np.average([4.0 / 8.0,
                                    5.0 / 8.0,
                                    5.0 / 7.0])
    accuracy, _ = metrics.prefix_accuracy(predictions, labels)
    with self.test_session() as session:
      accuracy_value = session.run(accuracy)
      self.assertAlmostEqual(expected_accuracy, accuracy_value) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:21,代码来源:metrics_test.py

示例8: testSequenceEditDistanceMetric

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def testSequenceEditDistanceMetric(self):
    predictions = np.array([[3, 4, 5, 1, 0, 0],
                            [2, 1, 3, 4, 0, 0],
                            [2, 1, 3, 4, 0, 0]])
    # Targets are just a bit different:
    #  - first sequence has a different prediction
    #  - second sequence has a different prediction and one extra step
    #  - third sequence is identical
    targets = np.array([[5, 4, 5, 1, 0, 0],
                        [2, 5, 3, 4, 1, 0],
                        [2, 1, 3, 4, 0, 0]])
    # Reshape to match expected input format by metric fns.
    predictions = np.reshape(predictions, [3, 6, 1, 1])
    targets = np.reshape(targets, [3, 6, 1, 1])
    with self.test_session() as session:
      scores, weight = metrics.sequence_edit_distance(
          tf.one_hot(predictions, depth=6, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      session.run(tf.global_variables_initializer())
      actual_scores, actual_weight = session.run([scores, weight])
    self.assertAlmostEqual(actual_scores, 3.0 / 13)
    self.assertEqual(actual_weight, 13) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:metrics_test.py

示例9: testNegativeLogPerplexityMaskedAssert

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def testNegativeLogPerplexityMaskedAssert(self):
    predictions = np.random.randint(4, size=(12, 12, 12, 1))
    targets = np.random.randint(4, size=(12, 12, 12, 1))
    features = {}

    with self.assertRaisesRegexp(
        ValueError,
        'masked_neg_log_perplexity requires targets_mask feature'):
      with self.test_session() as session:
        scores, _ = metrics.padded_neg_log_perplexity_with_masking(
            tf.one_hot(predictions, depth=4, dtype=tf.float32),
            tf.constant(targets, dtype=tf.int32),
            features)
        a = tf.reduce_mean(scores)
        session.run(tf.global_variables_initializer())
        _ = session.run(a) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:metrics_test.py

示例10: testMultilabelMatch3

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def testMultilabelMatch3(self):
    predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
    targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
    weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
    targets *= weights

    predictions_repeat = np.repeat(predictions, 10, axis=1)
    expected = (predictions_repeat == targets).astype(float)
    expected = np.sum(expected, axis=(1, 2, 3))
    expected = np.minimum(expected / 3.0, 1.)
    expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
    with self.test_session() as session:
      scores, weights_ = metrics.multilabel_accuracy_match3(
          tf.one_hot(predictions, depth=5, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a, a_op = tf.metrics.mean(scores, weights_)
      session.run(tf.local_variables_initializer())
      session.run(tf.global_variables_initializer())
      _ = session.run(a_op)
      actual = session.run(a)
    self.assertAlmostEqual(actual, expected, places=6) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:metrics_test.py

示例11: initialize_write_strengths

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def initialize_write_strengths(self, batch_size):
    """Initialize write strengths to write to the first memory address.

    This is exposed as its own function so that it can be overridden to provide
    alternate write adressing schemes.

    Args:
      batch_size: The size of the current batch.

    Returns:
      A tf.float32 tensor of shape [num_write_heads, memory_size, 1] where the
      first element in the second dimension is set to 1.0.
    """
    return tf.expand_dims(
        tf.one_hot([[0] * self._num_write_heads] * batch_size,
                   depth=self._memory_size, dtype=tf.float32), axis=3) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:neural_stack.py

示例12: vq_nearest_neighbor

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:21,代码来源:transformer_nat.py

示例13: fill_memory_slot

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def fill_memory_slot(memory, value, index):
  """Fills the memory slot at a particular index with the given value.

  Args:
    memory: a 4-d tensor [memory_size, batch, length, channel] containing
      the state of all steps
    value: a 3-d tensor [batch, length, channel] as the sate
    index: integer in [0, memory_size)

  Returns:
    filled memory

  """
  mask = tf.to_float(
      tf.one_hot(index,
                 tf.shape(memory)[0])[:, None, None, None])
  fill_memory = (1 - mask) * memory + mask * value[None, ...]
  return fill_memory 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:universal_transformer_util.py

示例14: gumbel_sample

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def gumbel_sample(self, reconstr_gan):
    hparams = self.hparams
    is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
    vocab_size = self._problem_hparams.vocab_size["targets"]
    if hasattr(self._hparams, "vocab_divisor"):
      vocab_size += (-vocab_size) % self._hparams.vocab_divisor
    reconstr_gan = tf.nn.log_softmax(reconstr_gan)
    if is_training and hparams.gumbel_temperature > 0.0:
      gumbel_samples = discretization.gumbel_sample(
          common_layers.shape_list(reconstr_gan))
      gumbel_samples *= hparams.gumbel_noise_factor
      reconstr_gan += gumbel_samples
      reconstr_sample = latent_layers.multinomial_sample(
          reconstr_gan, temperature=hparams.gumbel_temperature)
      reconstr_gan = tf.nn.softmax(reconstr_gan / hparams.gumbel_temperature)
    else:
      reconstr_sample = tf.argmax(reconstr_gan, axis=-1)
      reconstr_gan = tf.nn.softmax(reconstr_gan / 0.1)  # Sharpen a bit.
    # Use 1-hot forward, softmax backward.
    reconstr_hot = tf.one_hot(reconstr_sample, vocab_size)
    reconstr_gan += reconstr_hot - tf.stop_gradient(reconstr_gan)
    return reconstr_gan 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:autoencoders.py

示例15: lm_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import one_hot [as 别名]
def lm_loss(self):
        """
        :return: stuff
        """
        target_ids_flat = tf.reshape(self.target_ids, [-1])

        # 1 if it's valid and 0 otherwise.
        label_weights = tf.cast(tf.not_equal(target_ids_flat, self.pad_token_id), dtype=self.logits_flat.dtype)

        # [batch_size * seq_length, vocab_size]
        one_hot_labels = tf.one_hot(target_ids_flat,
                                    depth=self.config.vocab_size,
                                    dtype=self.logits_flat.dtype)

        # [batch_size * seq_length, vocab_size]
        logprobs_flat = tf.nn.log_softmax(self.logits_flat, axis=-1)

        per_example_loss = -tf.reduce_sum(logprobs_flat * one_hot_labels, axis=[-1])

        # per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_flat, labels=target_ids_flat)

        numerator = tf.reduce_sum(label_weights * per_example_loss)
        denominator = tf.reduce_sum(label_weights) + 1e-5
        loss = numerator / denominator
        return loss 
开发者ID:imcaspar,项目名称:gpt2-ml,代码行数:27,代码来源:modeling.py


注:本文中的tensorflow.compat.v1.one_hot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。