当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.one_hot方法代码示例

本文整理汇总了Python中tensorflow.one_hot方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.one_hot方法的具体用法?Python tensorflow.one_hot怎么用?Python tensorflow.one_hot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.one_hot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build_input

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def _build_input(self):
        self.tails = tf.placeholder(tf.int32, [None])
        self.heads = tf.placeholder(tf.int32, [None])
        self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity)
            
        if not self.query_is_language:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step])
            self.query_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_query + 1, # <END> token 
                                                          self.query_embed_size), 
                                                      dtype=tf.float32)
        
            rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params, 
                                                self.queries)
        else:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word])
            self.vocab_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_vocab + 1, # <END> token
                                                          self.vocab_embed_size),
                                                      dtype=tf.float32)
            embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params, 
                                                    self.queries)
            rnn_inputs = tf.reduce_mean(embedded_query, axis=2)

        return rnn_inputs 
开发者ID:fanyangxyz,项目名称:Neural-LP,代码行数:27,代码来源:model.py

示例2: label_smoothing_regularization

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def label_smoothing_regularization(self, chars_labels, weight=0.1):
    """Applies a label smoothing regularization.

    Uses the same method as in https://arxiv.org/abs/1512.00567.

    Args:
      chars_labels: ground truth ids of charactes,
        shape=[batch_size, seq_length];
      weight: label-smoothing regularization weight.

    Returns:
      A sensor with the same shape as the input.
    """
    one_hot_labels = tf.one_hot(
        chars_labels, depth=self._params.num_char_classes, axis=-1)
    pos_weight = 1.0 - weight
    neg_weight = weight / self._params.num_char_classes
    return one_hot_labels * pos_weight + neg_weight 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:model.py

示例3: visit_count_fc

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout):
  with tf.variable_scope('embed_visit_count'):
    visit_count = tf.reshape(visit_count, shape=[-1])
    last_visit = tf.reshape(last_visit, shape=[-1])
    
    visit_count = tf.clip_by_value(visit_count, clip_value_min=-1,
                                   clip_value_max=15)
    last_visit = tf.clip_by_value(last_visit, clip_value_min=-1,
                                   clip_value_max=15)
    visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32,
                             on_value=10., off_value=0.)
    last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32,
                             on_value=10., off_value=0.)
    f = tf.concat([visit_count, last_visit], 1)
    x, _ = tf_utils.fc_network(
        f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed',
        offset=0, batch_norm_param=None, dropout_ratio=fc_dropout,
        is_training=is_training)
  return x 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:vision_baseline_lstm.py

示例4: one_hot_encoding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def one_hot_encoding(labels, num_classes=None):
  """One-hot encodes the multiclass labels.

  Example usage:
    labels = tf.constant([1, 4], dtype=tf.int32)
    one_hot = OneHotEncoding(labels, num_classes=5)
    one_hot.eval()    # evaluates to [0, 1, 0, 0, 1]

  Args:
    labels: A tensor of shape [None] corresponding to the labels.
    num_classes: Number of classes in the dataset.
  Returns:
    onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
      encoding of the labels.
  Raises:
    ValueError: if num_classes is not specified.
  """
  with tf.name_scope('OneHotEncoding', values=[labels]):
    if num_classes is None:
      raise ValueError('num_classes must be specified')

    labels = tf.one_hot(labels, num_classes, 1, 0)
    return tf.reduce_max(labels, 0) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:preprocessor.py

示例5: log_prob_action

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def log_prob_action(self, action, logits,
                      sampling_dim, act_dim, act_type):
    """Calculate log-prob of action sampled from distribution."""
    if self.env_spec.is_discrete(act_type):
      act_log_prob = tf.reduce_sum(
          tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
    elif self.env_spec.is_box(act_type):
      means = logits[:, :sampling_dim / 2]
      std = logits[:, sampling_dim / 2:]
      act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
                      - 0.5 * tf.square(action - means) / tf.square(std))
      act_log_prob = tf.reduce_sum(act_log_prob, -1)
    else:
      assert False

    return act_log_prob 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:18,代码来源:policy.py

示例6: _testBuildDefaultModel

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def _testBuildDefaultModel(self):
    images = tf.to_float(np.random.rand(32, 28, 28, 1))
    labels = {}
    labels['classes'] = tf.one_hot(
        tf.to_int32(np.random.randint(0, 9, (32))), 10)

    params = {
        'use_separation': True,
        'layers_to_regularize': 'fc3',
        'weight_decay': 0.0,
        'ps_tasks': 1,
        'domain_separation_startpoint': 1,
        'alpha_weight': 1,
        'beta_weight': 1,
        'gamma_weight': 1,
        'recon_loss_name': 'sum_of_squares',
        'decoder_name': 'small_decoder',
        'encoder_name': 'default_encoder',
    }
    return images, labels, params 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:dsn_test.py

示例7: set_precision

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:metrics.py

示例8: set_recall

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:22,代码来源:metrics.py

示例9: sigmoid_precision_one_hot

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
  """Calculate precision for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    precision (scalar), weights
  """
  with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, precision = tf.metrics.precision(labels=labels, predictions=predictions)
    return precision, tf.constant(1.0) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:metrics.py

示例10: sigmoid_recall_one_hot

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
  """Calculate recall for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    recall (scalar), weights
  """
  with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, recall = tf.metrics.recall(labels=labels, predictions=predictions)
    return recall, tf.constant(1.0) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:metrics.py

示例11: testSequenceEditDistanceMetric

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def testSequenceEditDistanceMetric(self):
    predictions = np.array([[3, 4, 5, 1, 0, 0],
                            [2, 1, 3, 4, 0, 0],
                            [2, 1, 3, 4, 0, 0]])
    # Targets are just a bit different:
    #  - first sequence has a different prediction
    #  - second sequence has a different prediction and one extra step
    #  - third sequence is identical
    targets = np.array([[5, 4, 5, 1, 0, 0],
                        [2, 5, 3, 4, 1, 0],
                        [2, 1, 3, 4, 0, 0]])
    # Reshape to match expected input format by metric fns.
    predictions = np.reshape(predictions, [3, 6, 1, 1])
    targets = np.reshape(targets, [3, 6, 1, 1])
    with self.test_session() as session:
      scores, weight = metrics.sequence_edit_distance(
          tf.one_hot(predictions, depth=6, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      session.run(tf.global_variables_initializer())
      actual_scores, actual_weight = session.run([scores, weight])
    self.assertAlmostEqual(actual_scores, 3.0 / 13)
    self.assertEqual(actual_weight, 13) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:metrics_test.py

示例12: testMultilabelMatch3

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def testMultilabelMatch3(self):
    predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
    targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
    weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
    targets *= weights

    predictions_repeat = np.repeat(predictions, 10, axis=1)
    expected = (predictions_repeat == targets).astype(float)
    expected = np.sum(expected, axis=(1, 2, 3))
    expected = np.minimum(expected / 3.0, 1.)
    expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
    with self.test_session() as session:
      scores, weights_ = metrics.multilabel_accuracy_match3(
          tf.one_hot(predictions, depth=5, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a, a_op = tf.metrics.mean(scores, weights_)
      session.run(tf.local_variables_initializer())
      session.run(tf.global_variables_initializer())
      _ = session.run(a_op)
      actual = session.run(a)
    self.assertAlmostEqual(actual, expected, places=6) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:metrics_test.py

示例13: testRougeLMetricE2E

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def testRougeLMetricE2E(self):
    vocab_size = 4
    batch_size = 12
    seq_length = 12
    predictions = tf.one_hot(
        np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)),
        depth=4,
        dtype=tf.float32)
    targets = np.random.randint(4, size=(12, 12, 1, 1))
    with self.test_session() as session:
      scores, _ = rouge.rouge_l_fscore(
          predictions,
          tf.constant(targets, dtype=tf.int32))
      a = tf.reduce_mean(scores)
      session.run(tf.global_variables_initializer())
      session.run(a) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:rouge_test.py

示例14: vq_nearest_neighbor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:transformer_nat.py

示例15: fill_memory_slot

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import one_hot [as 别名]
def fill_memory_slot(memory, value, index):
  """Fills the memory slot at a particular index with the given value.

  Args:
    memory: a 4-d tensor [memory_size, batch, length, channel] containing
      the state of all steps
    value: a 3-d tensor [batch, length, channel] as the sate
    index: integer in [0, memory_size)

  Returns:
    filled memory

  """
  mask = tf.to_float(
      tf.one_hot(index,
                 tf.shape(memory)[0])[:, None, None, None])
  fill_memory = (1 - mask) * memory + mask * value[None, ...]
  return fill_memory 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:universal_transformer_util.py


注:本文中的tensorflow.one_hot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。