当前位置: 首页>>代码示例>>Python>>正文


Python v1.unstack方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.unstack方法的典型用法代码示例。如果您正苦于以下问题:Python v1.unstack方法的具体用法?Python v1.unstack怎么用?Python v1.unstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.unstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def __init__(self, num_experts, gates):
    """Create a SparseDispatcher.

    Args:
      num_experts: an integer.
      gates: a `Tensor` of shape `[batch_size, num_experts]`.

    Returns:
      a SparseDispatcher
    """
    self._gates = gates
    self._num_experts = num_experts

    where = tf.to_int32(tf.where(tf.transpose(gates) > 0))
    self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1)
    self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0])
    self._nonzero_gates = tf.gather(
        tf.reshape(self._gates, [-1]),
        self._batch_index * num_experts + self._expert_index) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:21,代码来源:expert_utils.py

示例2: combine

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:expert_utils.py

示例3: get_center_coordinates_and_sizes

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def get_center_coordinates_and_sizes(self, scope=None):
    """Computes the center coordinates, height and width of the boxes.

    Args:
      scope: name scope of the function.

    Returns:
      a list of 4 1-D tensors [ycenter, xcenter, height, width].
    """
    with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
      box_corners = self.get()
      ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
      width = xmax - xmin
      height = ymax - ymin
      ycenter = ymin + height / 2.
      xcenter = xmin + width / 2.
      return [ycenter, xcenter, height, width] 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:19,代码来源:box_list.py

示例4: ApplyPhotometricImageDistortionsCheap

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def ApplyPhotometricImageDistortionsCheap(
    images):
  """Apply photometric distortions to the input images.

  Args:
    images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
      to apply the random photometric distortions to. Assumed to be normalized
      to range (0, 1), float32 encoding.
  Returns:
    images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
      resulting from applying random photometric distortions to the inputs.
  """
  with tf.name_scope('photometric_distortion'):
    channels = tf.unstack(images, axis=-1)
    # Per-channel random gamma correction.
    # Lower gamma = brighter image, decreased contrast.
    # Higher gamma = dark image, increased contrast.
    gamma_corrected = [c**tf.random_uniform([], 0.5, 1.5) for c in channels]
    images = tf.stack(gamma_corrected, axis=-1)
    return images 
开发者ID:google-research,项目名称:tensor2robot,代码行数:22,代码来源:image_transformations.py

示例5: preprocess

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def preprocess(self, inputs):
    """Converts a batch of unscaled images to a scale suitable for the model.

    This method normalizes the image using the given `channel_means` and
    `channels_stds` values at initialization time while optionally flipping
    the channel order if `bgr_ordering` is set.

    Args:
      inputs: a [batch, height, width, channels] float32 tensor

    Returns:
      outputs: a [batch, height, width, channels] float32 tensor

    """

    if self._bgr_ordering:
      red, green, blue = tf.unstack(inputs, axis=3)
      inputs = tf.stack([blue, green, red], axis=3)

    channel_means = tf.reshape(tf.constant(self._channel_means),
                               [1, 1, 1, -1])
    channel_stds = tf.reshape(tf.constant(self._channel_stds),
                              [1, 1, 1, -1])

    return (inputs - channel_means)/channel_stds 
开发者ID:tensorflow,项目名称:models,代码行数:27,代码来源:center_net_meta_arch.py

示例6: clip_to_window

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def clip_to_window(keypoints, window, scope=None):
  """Clips keypoints to a window.

  This op clips any input keypoints to a window.

  Args:
    keypoints: a tensor of shape [num_instances, num_keypoints, 2]
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip the keypoints.
    scope: name scope.

  Returns:
    new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
  """
  with tf.name_scope(scope, 'ClipToWindow'):
    y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
    x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
    new_keypoints = tf.concat([y, x], 2)
    return new_keypoints 
开发者ID:tensorflow,项目名称:models,代码行数:23,代码来源:keypoint_ops.py

示例7: clip_to_window

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def clip_to_window(dp_surface_coords, window, scope=None):
  """Clips DensePose points to a window.

  This op clips any input DensePose points to a window.

  Args:
    dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
      DensePose surface coordinates in (y, x, v, u) format.
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip the keypoints.
    scope: name scope.

  Returns:
    new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4].
  """
  with tf.name_scope(scope, 'DensePoseClipToWindow'):
    y, x, v, u = tf.split(value=dp_surface_coords, num_or_size_splits=4, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
    x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
    new_dp_surface_coords = tf.concat([y, x, v, u], 2)
    return new_dp_surface_coords 
开发者ID:tensorflow,项目名称:models,代码行数:24,代码来源:densepose_ops.py

示例8: test_tensor_array_unstack

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def test_tensor_array_unstack():
    def run(dtype_str, input_shape, infer_shape):
        if package_version.parse(tf.VERSION) >= package_version.parse('1.15.0'):
            pytest.skip("Needs fixing for tflite >= 1.15.0")

        with tf.Graph().as_default():
            dtype = tf_dtypes[dtype_str]
            t = tf.constant(np.random.choice([0, 1, 2, 3],
                                             size=input_shape).astype(dtype.name))
            ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
            ta2 = ta1.unstack(t)
            out0 = ta2.size()
            out1 = ta2.read(0)
            compare_tf_with_tvm([], [], 'TensorArraySizeV3:0', mode='debug')
            compare_tf_with_tvm([], [], 'TensorArrayReadV3:0', mode='debug')
    for dtype in ["float32", "int8"]:
        run(dtype, (5,), False)
        run(dtype, (5, 5), True)
        run(dtype, (5, 5, 5), False)
        run(dtype, (5, 5, 5, 5), True)


#######################################################################
# ConcatV2
# -------- 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_forward.py

示例9: _test_unstack

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def _test_unstack(ip_shape, axis, dtype):
    np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)

    tf.reset_default_graph()
    with tf.Graph().as_default():
        in_data = tf.placeholder(dtype, ip_shape, name="in_data")
        unstack = tf.unstack(in_data, axis=axis)

        compare_tf_with_tvm([np_data], ['in_data:0'], [n.name for n in unstack])

    tf.reset_default_graph()
    with tf.Graph().as_default():
        in_data = tf.placeholder(dtype, ip_shape, name="in_data")
        tf.stack(tf.unstack(in_data, axis=axis), axis=axis)

        compare_tf_with_tvm([np_data], ['in_data:0'], 'stack:0') 
开发者ID:apache,项目名称:incubator-tvm,代码行数:18,代码来源:test_forward.py

示例10: _rowwise_unsorted_segment_sum

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def _rowwise_unsorted_segment_sum(values, indices, n):
  """UnsortedSegmentSum on each row.

  Args:
    values: a `Tensor` with shape `[batch_size, k]`.
    indices: an integer `Tensor` with shape `[batch_size, k]`.
    n: an integer.
  Returns:
    A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
  """
  batch, k = tf.unstack(tf.shape(indices), num=2)
  indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
  ret_flat = tf.unsorted_segment_sum(
      tf.reshape(values, [-1]), indices_flat, batch * n)
  return tf.reshape(ret_flat, [batch, n]) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:17,代码来源:expert_utils.py

示例11: universal_transformer_basic

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def universal_transformer_basic(layer_inputs,
                                step, hparams,
                                ffn_unit,
                                attention_unit):
  """Basic Universal Transformer.

  This model is pretty similar to the vanilla transformer in which weights are
  shared between layers. For some tasks, this simple idea brings a
  generalization that is not achievable by playing with the size of the model
  or drop_out parameters in the vanilla transformer.

  Args:
    layer_inputs:
        - state: state
    step: indicates number of steps taken so far
    hparams: model hyper-parameters
    ffn_unit: feed-forward unit
    attention_unit: multi-head attention unit

  Returns:
    layer_output:
         new_state: new state
  """
  state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0,
                                     name="unstack")
  new_state = step_preprocess(state, step, hparams)

  for i in range(hparams.num_inrecurrence_layers):
    with tf.variable_scope("rec_layer_%d" % i):
      new_state = ffn_unit(attention_unit(new_state))

  return new_state, inputs, memory 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:34,代码来源:universal_transformer_util.py

示例12: visualize_predictions

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def visualize_predictions(self, real_frames, gen_frames, actions=None):

    def concat_on_y_axis(x):
      x = tf.unstack(x, axis=1)
      x = tf.concat(x, axis=1)
      return x
    frames_gd = common_video.swap_time_and_batch_axes(real_frames)
    frames_pd = common_video.swap_time_and_batch_axes(gen_frames)
    if actions is not None:
      actions = common_video.swap_time_and_batch_axes(actions)

    if self.is_per_pixel_softmax:
      frames_pd_shape = common_layers.shape_list(frames_pd)
      frames_pd = tf.reshape(frames_pd, [-1, 256])
      frames_pd = tf.to_float(tf.argmax(frames_pd, axis=-1))
      frames_pd = tf.reshape(frames_pd, frames_pd_shape[:-1] + [3])

    frames_gd = concat_on_y_axis(frames_gd)
    frames_pd = concat_on_y_axis(frames_pd)
    if actions is not None:
      actions = tf.clip_by_value(actions, 0, 1)
      summary("action_vid", tf.cast(actions * 255, tf.uint8))
      actions = concat_on_y_axis(actions)
      side_by_side_video = tf.concat([frames_gd, frames_pd, actions], axis=2)
    else:
      side_by_side_video = tf.concat([frames_gd, frames_pd], axis=2)
    tf.summary.image("full_video", side_by_side_video) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:29,代码来源:sv2p.py

示例13: get_extra_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def get_extra_loss(self, latent_means=None, latent_stds=None,
                     true_frames=None, gen_frames=None):
    """Gets extra loss from VAE and GAN."""
    if not self.is_training:
      return 0.0

    vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
    # Use sv2p's KL divergence computation.
    if self.hparams.use_vae:
      vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
          latent_means=latent_means, latent_stds=latent_stds)

    if self.hparams.use_gan:
      # Strip out the first context_frames for the true_frames
      # Strip out the first context_frames - 1 for the gen_frames
      context_frames = self.hparams.video_num_input_frames
      true_frames = tf.stack(
          tf.unstack(true_frames, axis=0)[context_frames:])

      # discriminator for VAE.
      if self.hparams.use_vae:
        gen_enc_frames = tf.stack(
            tf.unstack(gen_frames, axis=0)[context_frames-1:])
        d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")

      # discriminator for GAN.
      gen_prior_frames = tf.stack(
          tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
      d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")

    return (
        vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
        self.hparams.gan_vae_loss_multiplier * d_vae_loss) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:35,代码来源:savp.py

示例14: video_features

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def video_features(
      self, all_frames, all_actions, all_rewards, all_raw_frames):
    """No video wide feature."""
    del all_actions, all_rewards, all_raw_frames
    # Concatenate x_{t-1} and x_{t} along depth and encode it to
    # produce the mean and standard deviation of z_{t-1}
    seq_len = len(all_frames)
    image_pairs = tf.concat([all_frames[:seq_len-1],
                             all_frames[1:seq_len]], axis=-1)
    z_mu, z_log_sigma_sq = self.encoder(image_pairs)
    # Unstack z_mu and z_log_sigma_sq along the time dimension.
    z_mu = tf.unstack(z_mu, axis=0)
    z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
    return [z_mu, z_log_sigma_sq] 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:16,代码来源:savp.py

示例15: body

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import unstack [as 别名]
def body(self, features):
    self.has_actions = "input_action" in features
    self.has_rewards = "target_reward" in features
    self.has_policies = "target_policy" in features
    self.has_values = "target_value" in features
    hparams = self.hparams

    def merge(inputs, targets):
      """Split inputs and targets into lists."""
      inputs = tf.unstack(inputs, axis=1)
      targets = tf.unstack(targets, axis=1)
      assert len(inputs) == hparams.video_num_input_frames
      assert len(targets) == hparams.video_num_target_frames
      return inputs + targets

    frames = merge(features["inputs"], features["targets"])
    frames_raw = merge(features["inputs_raw"], features["targets_raw"])
    actions, rewards = None, None
    if self.has_actions:
      actions = merge(features["input_action"], features["target_action"])
    if self.has_rewards:
      rewards = merge(features["input_reward"], features["target_reward"])

    # Reset the internal states if the reset_internal_states has been
    # passed as a feature and has greater value than 0.
    if self.is_recurrent_model and self.internal_states is not None:
      def reset_func():
        reset_ops = flat_lists(self.reset_internal_states_ops())
        with tf.control_dependencies(reset_ops):
          return tf.no_op()
      if self.is_predicting and "reset_internal_states" in features:
        reset = features["reset_internal_states"]
        reset = tf.greater(tf.reduce_sum(reset), 0.5)
        reset_ops = tf.cond(reset, reset_func, tf.no_op)
      else:
        reset_ops = tf.no_op()
      with tf.control_dependencies([reset_ops]):
        frames[0] = tf.identity(frames[0])

    with tf.control_dependencies([frames[0]]):
      return self.__process(frames, actions, rewards, frames_raw) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:43,代码来源:base.py


注:本文中的tensorflow.compat.v1.unstack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。