當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.constant方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.constant方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.constant方法的具體用法?Python v1.constant怎麽用?Python v1.constant使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.constant方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: normalize_image

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def normalize_image(images):
  """Normalize image to zero mean and unit variance.

  Args:
    images: a tensor representing images, at least 3-D.
  Returns:
    images normalized by mean and stdev.
  """
  data_type = images.dtype
  mean = tf.constant(ssd_constants.NORMALIZATION_MEAN, data_type)
  std = tf.constant(ssd_constants.NORMALIZATION_STD, data_type)
  images = tf.divide(tf.subtract(images, mean), std)

  mlperf.logger.log(key=mlperf.tags.DATA_NORMALIZATION_MEAN,
                    value=ssd_constants.NORMALIZATION_MEAN)
  mlperf.logger.log(key=mlperf.tags.DATA_NORMALIZATION_STD,
                    value=ssd_constants.NORMALIZATION_STD)
  return images 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:20,代碼來源:ssd_dataloader.py

示例2: testAppendGradientsWithLossScaleWithtNan

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def testAppendGradientsWithLossScaleWithtNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(True))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 0)  # Skip updating for v.
      # halve loss_scale and reset local_scale_normal_steps.
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 2)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:25,代碼來源:variable_mgr_util_test.py

示例3: two_class_log_likelihood

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
  """Log-likelihood for two class classification with 0/1 labels.

  Args:
    predictions: A float valued tensor of shape [`batch_size`].  Each
      component should be between 0 and 1.
    labels: An int valued tensor of shape [`batch_size`].  Each component
      should either be 0 or 1.
    weights_fn: unused.

  Returns:
    A pair, with the average log likelihood in the first component.
  """
  del weights_fn
  float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
  batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
  int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
  onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
  chosen_probs = tf.einsum(
      "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
  avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
  return avg_log_likelihood, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics.py

示例4: sigmoid_accuracy_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
  """Calculate accuracy for a set, given one-hot labels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    accuracy (scalar), weights
  """
  with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
    del weights_fn
    predictions = tf.nn.sigmoid(logits)
    labels = tf.argmax(labels, -1)
    predictions = tf.argmax(predictions, -1)
    _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
    return accuracy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:metrics.py

示例5: sigmoid_accuracy

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def sigmoid_accuracy(logits, labels, weights_fn=None):
  """Calculate accuracy for a set, given integer labels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    accuracy (scalar), weights
  """
  with tf.variable_scope("sigmoid_accuracy", values=[logits, labels]):
    del weights_fn
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
    return accuracy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:metrics.py

示例6: sigmoid_precision_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
  """Calculate precision for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    precision (scalar), weights
  """
  with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, precision = tf.metrics.precision(labels=labels, predictions=predictions)
    return precision, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例7: sigmoid_recall_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
  """Calculate recall for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    recall (scalar), weights
  """
  with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, recall = tf.metrics.recall(labels=labels, predictions=predictions)
    return recall, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例8: sigmoid_cross_entropy_one_hot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
  """Calculate sigmoid cross entropy for one-hot lanels and logits.

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    cross_entropy (scalar), weights
  """
  with tf.variable_scope("sigmoid_cross_entropy_one_hot",
                         values=[logits, labels]):
    del weights_fn
    cross_entropy = tf.losses.sigmoid_cross_entropy(
        multi_class_labels=labels, logits=logits)
    return cross_entropy, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:metrics.py

示例9: pearson_correlation_coefficient

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def pearson_correlation_coefficient(predictions, labels, weights_fn=None):
  """Calculate pearson correlation coefficient.

  Args:
    predictions: The raw predictions.
    labels: The actual labels.
    weights_fn: Weighting function.

  Returns:
    The pearson correlation coefficient.
  """
  del weights_fn
  _, pearson = contrib.metrics().streaming_pearson_correlation(
      predictions, labels)
  return pearson, tf.constant(1.0)

# Metrics are functions that take predictions and labels and return
# a tensor of metrics and a tensor of weights.
# If the function has "features" as an argument, it will receive the whole
# features dict as well.
# The results are passed to tf.metrics.mean to accumulate properly. 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例10: rouge_l_fscore

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def rouge_l_fscore(predictions, labels, **unused_kwargs):
  """ROUGE scores computation between labels and predictions.

  This is an approximate ROUGE scoring method since we do not glue word pieces
  or decode the ids and tokenize the output.

  Args:
    predictions: tensor, model predictions
    labels: tensor, gold output.

  Returns:
    rouge_l_fscore: approx rouge-l f1 score.
  """
  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
  # Convert the outputs and labels to a [batch_size, input_length] tensor.
  outputs = tf.squeeze(outputs, axis=[-1, -2])
  labels = tf.squeeze(labels, axis=[-1, -2])
  rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
                               tf.float32)
  return rouge_l_f_score, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:rouge.py

示例11: rouge_2_fscore

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def rouge_2_fscore(predictions, labels, **unused_kwargs):
  """ROUGE-2 F1 score computation between labels and predictions.

  This is an approximate ROUGE scoring method since we do not glue word pieces
  or decode the ids and tokenize the output.

  Args:
    predictions: tensor, model predictions
    labels: tensor, gold output.

  Returns:
    rouge2_fscore: approx rouge-2 f1 score.
  """

  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
  # Convert the outputs and labels to a [batch_size, input_length] tensor.
  outputs = tf.squeeze(outputs, axis=[-1, -2])
  labels = tf.squeeze(labels, axis=[-1, -2])
  rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)
  return rouge_2_f_score, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:rouge.py

示例12: testPrefixAccuracy

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def testPrefixAccuracy(self):
    vocab_size = 10
    predictions = tf.one_hot(
        tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [0]]]),
        vocab_size)
    labels = tf.expand_dims(
        tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [0]]]),
        axis=-1)
    expected_accuracy = np.average([4.0 / 8.0,
                                    5.0 / 8.0,
                                    5.0 / 7.0])
    accuracy, _ = metrics.prefix_accuracy(predictions, labels)
    with self.test_session() as session:
      accuracy_value = session.run(accuracy)
      self.assertAlmostEqual(expected_accuracy, accuracy_value) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:metrics_test.py

示例13: testNegativeLogPerplexityMaskedAssert

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def testNegativeLogPerplexityMaskedAssert(self):
    predictions = np.random.randint(4, size=(12, 12, 12, 1))
    targets = np.random.randint(4, size=(12, 12, 12, 1))
    features = {}

    with self.assertRaisesRegexp(
        ValueError,
        'masked_neg_log_perplexity requires targets_mask feature'):
      with self.test_session() as session:
        scores, _ = metrics.padded_neg_log_perplexity_with_masking(
            tf.one_hot(predictions, depth=4, dtype=tf.float32),
            tf.constant(targets, dtype=tf.int32),
            features)
        a = tf.reduce_mean(scores)
        session.run(tf.global_variables_initializer())
        _ = session.run(a) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:metrics_test.py

示例14: testMultilabelMatch3

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def testMultilabelMatch3(self):
    predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
    targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
    weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
    targets *= weights

    predictions_repeat = np.repeat(predictions, 10, axis=1)
    expected = (predictions_repeat == targets).astype(float)
    expected = np.sum(expected, axis=(1, 2, 3))
    expected = np.minimum(expected / 3.0, 1.)
    expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
    with self.test_session() as session:
      scores, weights_ = metrics.multilabel_accuracy_match3(
          tf.one_hot(predictions, depth=5, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a, a_op = tf.metrics.mean(scores, weights_)
      session.run(tf.local_variables_initializer())
      session.run(tf.global_variables_initializer())
      _ = session.run(a_op)
      actual = session.run(a)
    self.assertAlmostEqual(actual, expected, places=6) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics_test.py

示例15: testShapes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import constant [as 別名]
def testShapes(self):
    batch_size = 2
    beam_size = 3
    vocab_size = 4
    decode_length = 10

    initial_ids = tf.constant([0, 0])  # GO

    def symbols_to_logits(_):
      # Just return random logits
      return tf.random_uniform((batch_size * beam_size, vocab_size))

    final_ids, final_probs, _ = beam_search.beam_search(
        symbols_to_logits, initial_ids, beam_size, decode_length, vocab_size,
        0.)

    self.assertEqual(final_ids.get_shape().as_list(), [None, beam_size, None])

    self.assertEqual(final_probs.get_shape().as_list(), [batch_size, beam_size]) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:beam_search_test.py


注:本文中的tensorflow.compat.v1.constant方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。