當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.scatter_nd方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.scatter_nd方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.scatter_nd方法的具體用法?Python v1.scatter_nd怎麽用?Python v1.scatter_nd使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.scatter_nd方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: restore

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:expert_utils.py

示例2: set_final

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def set_final(sequence, sequence_length, values, time_major=False):
  """Sets the final values in a batch of sequences, and clears those after."""
  sequence_batch_major = (
      sequence if not time_major else tf.transpose(sequence, [1, 0, 2]))
  final_index = _get_final_index(sequence_length, time_major=False)
  mask = tf.sequence_mask(
      tf.maximum(0, sequence_length - 1),
      maxlen=sequence_batch_major.shape[1],
      dtype=tf.float32)
  sequence_batch_major = (
      tf.expand_dims(mask, axis=-1) * sequence_batch_major +
      tf.scatter_nd(final_index, values, tf.shape(sequence_batch_major)))
  if time_major:
    return tf.transpose(sequence_batch_major, [1, 0, 2])
  return sequence_batch_major 
開發者ID:magenta,項目名稱:magenta,代碼行數:17,代碼來源:lstm_utils.py

示例3: __call__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def __call__(self, shape, dtype=None, partition_info=None):
    del partition_info  # unused
    assert len(shape) > 2, shape

    support = tuple(shape[:-2]) + (1, 1)
    indices = [[s // 2 for s in support]]
    updates = tf.constant([self.gain], dtype=dtype)
    kernel = tf.scatter_nd(indices, updates, support)

    assert shape[-2] == shape[-1], shape
    if shape[-1] != 1:
      kernel *= tf.eye(shape[-1], dtype=dtype)

    return kernel 
開發者ID:tensorflow,項目名稱:compression,代碼行數:16,代碼來源:initializers.py

示例4: _pad_to_full_keypoint_dim

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
                              num_total_keypoints):
  """Scatter keypoint elements into tensors with full keypoints dimension.

  Args:
    keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
      tensor.
    keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
      tensor.
    keypoint_inds: a list of integers that indicate the keypoint indices for
      this specific keypoint class. These indices are used to scatter into
      tensors that have a `num_total_keypoints` dimension.
    num_total_keypoints: The total number of keypoints that this model predicts.

  Returns:
    A tuple with
    keypoint_coords_padded: a
      [batch_size, num_instances, num_total_keypoints,2] float32 tensor.
    keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
      float32 tensor.
  """
  batch_size, num_instances, _, _ = (
      shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
  kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
  kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
  kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
  kpt_coords_scattered = tf.scatter_nd(
      indices=kpt_inds_tensor,
      updates=kpt_coords_transposed,
      shape=[num_total_keypoints, batch_size, num_instances, 2])
  kpt_scores_scattered = tf.scatter_nd(
      indices=kpt_inds_tensor,
      updates=kpt_scores_transposed,
      shape=[num_total_keypoints, batch_size, num_instances])
  keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
  keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
  return keypoint_coords_padded, keypoint_scores_padded 
開發者ID:tensorflow,項目名稱:models,代碼行數:39,代碼來源:center_net_meta_arch.py

示例5: _pad_to_full_instance_dim

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
                              max_instances):
  """Scatter keypoint elements into tensors with full instance dimension.

  Args:
    keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
      tensor.
    keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
      tensor.
    instance_inds: a list of integers that indicate the instance indices for
      these keypoints. These indices are used to scatter into tensors
      that have a `max_instances` dimension.
    max_instances: The maximum number of instances detected by the model.

  Returns:
    A tuple with
    keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
      float32 tensor.
    keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
      float32 tensor.
  """
  batch_size, _, num_keypoints, _ = (
      shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
  kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
  kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
  instance_inds = tf.expand_dims(instance_inds, axis=-1)
  kpt_coords_scattered = tf.scatter_nd(
      indices=instance_inds,
      updates=kpt_coords_transposed,
      shape=[max_instances, batch_size, num_keypoints, 2])
  kpt_scores_scattered = tf.scatter_nd(
      indices=instance_inds,
      updates=kpt_scores_transposed,
      shape=[max_instances, batch_size, num_keypoints])
  keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
  keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
  return keypoint_coords_padded, keypoint_scores_padded 
開發者ID:tensorflow,項目名稱:models,代碼行數:39,代碼來源:center_net_meta_arch.py

示例6: next_inputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import scatter_nd [as 別名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with tf.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                       [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledOutputTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))
      sample_ids = tf.cast(sample_ids, tf.bool)

      def maybe_sample():
        """Perform scheduled sampling."""

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = tf.nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = tf.gather_nd(auxiliary_inputs, indices)
          return tf.nest.map_structure(
              lambda x, y: tf.concat((x, y), -1),
              outputs_, auxiliary_inputs)

        if self._next_inputs_fn is None:
          return tf.where(
              sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
              base_next_inputs)

        where_sampling = tf.cast(
            tf.where(sample_ids), tf.int32)
        where_not_sampling = tf.cast(
            tf.where(tf.logical_not(sample_ids)), tf.int32)
        outputs_sampling = tf.gather_nd(outputs, where_sampling)
        inputs_not_sampling = tf.gather_nd(base_next_inputs,
                                           where_not_sampling)
        sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
            self._next_inputs_fn(outputs_sampling), where_sampling)

        base_shape = tf.shape(base_next_inputs)
        return (tf.scatter_nd(indices=where_sampling,
                              updates=sampled_next_inputs,
                              shape=base_shape)
                + tf.scatter_nd(indices=where_not_sampling,
                                updates=inputs_not_sampling,
                                shape=base_shape))

      all_finished = tf.reduce_all(finished)
      no_samples = tf.logical_not(tf.reduce_any(sample_ids))
      next_inputs = tf.cond(
          tf.logical_or(all_finished, no_samples),
          lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state) 
開發者ID:magenta,項目名稱:magenta,代碼行數:60,代碼來源:seq2seq.py


注:本文中的tensorflow.compat.v1.scatter_nd方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。