当前位置: 首页>>代码示例>>Python>>正文


Python common_layers.shape_list函数代码示例

本文整理汇总了Python中tensor2tensor.layers.common_layers.shape_list函数的典型用法代码示例。如果您正苦于以下问题:Python shape_list函数的具体用法?Python shape_list怎么用?Python shape_list使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了shape_list函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: transformer_prepare_decoder

def transformer_prepare_decoder(targets, hparams, features=None):
  """Prepare one shard of the model for the decoder.

  Args:
    targets: a Tensor.
    hparams: run hyperparameters
    features: optionally pass the entire features dictionary as well.
      This is needed now for "packed" datasets.

  Returns:
    decoder_input: a Tensor, bottom of decoder stack
    decoder_self_attention_bias: a bias tensor for use in encoder self-attention
  """
  decoder_self_attention_bias = (
      common_attention.attention_bias_lower_triangle(
          common_layers.shape_list(targets)[1]))
  if features and "targets_segmentation" in features:
    # "Packed" dataset - keep the examples from seeing each other.
    targets_segmentation = features["targets_segmentation"]
    targets_position = features["targets_position"]
    decoder_self_attention_bias += common_attention.attention_bias_same_segment(
        targets_segmentation, targets_segmentation)
  else:
    targets_position = None
  if hparams.proximity_bias:
    decoder_self_attention_bias += common_attention.attention_bias_proximal(
        common_layers.shape_list(targets)[1])
  decoder_input = common_layers.shift_right_3d(targets)
  if hparams.pos == "timing":
    if targets_position is not None:
      decoder_input = common_attention.add_timing_signal_1d_given_position(
          decoder_input, targets_position)
    else:
      decoder_input = common_attention.add_timing_signal_1d(decoder_input)
  return (decoder_input, decoder_self_attention_bias)
开发者ID:zeyu-h,项目名称:tensor2tensor,代码行数:35,代码来源:transformer.py

示例2: create_output

def create_output(decoder_output, rows, cols, targets, hparams):
  """Creates output from decoder output and vars.

  Args:
    decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
      that the number of elements is batch * rows * cols * hparams.hidden_size.
    rows: Integer representing number of rows in a 2-D data point.
    cols: Integer representing number of columns in a 2-D data point.
    targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
      hparams.num_channels].
    hparams: tf.contrib.training.HParams set.

  Returns:
    Tensor of shape [batch, hparams.img_len, hparams.img_len,
    hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
    [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
    In the special case of predict mode, it is a Tensor of rank 5.
  """
  decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
  depth = common_layers.shape_list(decoded_image)[-1]
  batch, height, width, channels = common_layers.shape_list(targets)
  likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
  if hparams.mode == tf.estimator.ModeKeys.PREDICT:
    y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
    output = y[:, :height, :, :, :]
  elif likelihood == DistributionType.CAT:
    # Unpack the cols dimension of the Categorical.
    output = tf.reshape(decoded_image,
                        [batch, height, width, channels, depth])
  else:
    output = decoded_image
  return output
开发者ID:kltony,项目名称:tensor2tensor,代码行数:32,代码来源:common_image_attention.py

示例3: body

  def body(self, features):
    """Body of the model.

    Args:
      features: a dictionary with the tensors.

    Returns:
      A pair (predictions, losses) where predictions is the generated image
      and losses is a dictionary of losses (that get added for the final loss).
    """
    features["targets"] = features["inputs"]
    is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN

    # Input images.
    inputs = tf.to_float(features["targets_raw"])

    # Noise vector.
    z = tf.random_uniform([self.hparams.batch_size,
                           self.hparams.bottleneck_bits],
                          minval=-1, maxval=1, name="z")

    # Generator output: fake images.
    out_shape = common_layers.shape_list(inputs)[1:4]
    g = self.generator(z, is_training, out_shape)

    losses = self.losses(inputs, g)  # pylint: disable=not-callable

    summary_g_image = tf.reshape(
        g[0, :], [1] + common_layers.shape_list(inputs)[1:])
    tf.summary.image("generated", summary_g_image, max_outputs=1)

    if is_training:  # Returns an dummy output and the losses dictionary.
      return tf.zeros_like(inputs), losses
    return tf.reshape(g, tf.shape(inputs)), losses
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:34,代码来源:vanilla_gan.py

示例4: infer

  def infer(self,
            features=None,
            decode_length=50,
            beam_size=1,
            top_beams=1,
            alpha=0.0,
            use_tpu=False):
    """Produce predictions from the model."""
    if not features:
      features = {}
    inputs_old = None
    if "inputs" in features and len(features["inputs"].shape) < 4:
      inputs_old = features["inputs"]
      features["inputs"] = tf.expand_dims(features["inputs"], 2)

    # Create an initial targets tensor.
    if "partial_targets" in features:
      initial_output = tf.convert_to_tensor(features["partial_targets"])
    else:
      batch_size = common_layers.shape_list(features["inputs"])[0]
      length = common_layers.shape_list(features["inputs"])[1]
      target_length = tf.to_int32(2.0 * tf.to_float(length))
      initial_output = tf.zeros((batch_size, target_length, 1, 1),
                                dtype=tf.int64)

    features["targets"] = initial_output
    logits, _ = self(features)  # pylint: disable=not-callable
    samples = tf.argmax(logits, axis=-1)
    if inputs_old is not None:  # Restore to not confuse Estimator.
      features["inputs"] = inputs_old
    return samples
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:31,代码来源:transformer_nat.py

示例5: infer

  def infer(self, features, *args, **kwargs):
    """Produce predictions from the model by running it."""
    del args, kwargs
    if "targets" not in features:
      if "infer_targets" in features:
        targets_shape = common_layers.shape_list(features["infer_targets"])
      elif "inputs" in features:
        targets_shape = common_layers.shape_list(features["inputs"])
        targets_shape[1] = self.hparams.video_num_target_frames
      else:
        raise ValueError("no inputs are given.")
      features["targets"] = tf.zeros(targets_shape, dtype=tf.float32)

    output, _ = self(features)  # pylint: disable=not-callable

    if not isinstance(output, dict):
      output = {"targets": output}

    x = output["targets"]
    if self.is_per_pixel_softmax:
      x_shape = common_layers.shape_list(x)
      x = tf.reshape(x, [-1, x_shape[-1]])
      x = tf.argmax(x, axis=-1)
      x = tf.reshape(x, x_shape[:-1])
    else:
      x = tf.squeeze(x, axis=-1)
      x = tf.to_int64(tf.round(x))
    output["targets"] = x
    if self.hparams.reward_prediction:
      output["target_reward"] = tf.argmax(output["target_reward"], axis=-1)

    # only required for decoding.
    output["outputs"] = output["targets"]
    output["scores"] = output["targets"]
    return output
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:35,代码来源:sv2p.py

示例6: padded_sequence_accuracy

def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):
  """Percentage of times that predictions matches labels everywhere (non-0)."""
  # If the last dimension is 1 then we're using L1/L2 loss.
  if common_layers.shape_list(predictions)[-1] == 1:
    return rounding_sequence_accuracy(
        predictions, labels, weights_fn=weights_fn)
  with tf.variable_scope(
      "padded_sequence_accuracy", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)

    # Flatten, keeping batch dim (and num_classes dim for predictions)
    # TPU argmax can only deal with a limited number of dimensions
    predictions_shape = common_layers.shape_list(padded_predictions)
    batch_size = predictions_shape[0]
    num_classes = predictions_shape[-1]
    flat_size = common_layers.list_product(
        common_layers.shape_list(padded_labels)[1:])
    padded_predictions = tf.reshape(
        padded_predictions,
        [batch_size, common_layers.list_product(predictions_shape[1:-1]),
         num_classes])
    padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
    weights = tf.reshape(weights, [batch_size, flat_size])

    outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
    padded_labels = tf.to_int32(padded_labels)
    not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
    axis = list(range(1, len(outputs.get_shape())))
    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
    return correct_seq, tf.constant(1.0)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:34,代码来源:metrics.py

示例7: bottleneck

 def bottleneck(self, x):
   hparams = self.hparams
   b, _ = super(AutoencoderDualDiscrete, self).bottleneck(x)
   if hparams.mode == tf.estimator.ModeKeys.EVAL:
     return b, 0.0
   bt, bi = tf.split(b, 2, axis=0)
   if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
     return tf.concat([bi, bi], axis=0), 0.0
   # Share the first hparams.bottleneck_shared_bits.
   shared = (bt + bi) / 2  # -1 if both -1, 1 if both were 1, 0 if disagree.
   rand = tf.random_uniform(common_layers.shape_list(bt))
   br = tf.where(rand < 0.5, bt, bi)  # Break ties at random.
   bs = tf.where(shared == 0, br, shared)
   bs = tf.concat([bs, bs], axis=0)
   n = hparams.bottleneck_shared_bits
   step = tf.train.get_global_step()
   zero = tf.constant(0, dtype=tf.int64)
   if step is None:
     step = zero
   step = tf.maximum(zero, step - hparams.bottleneck_shared_bits_start_warmup)
   f = common_layers.inverse_lin_decay(
       hparams.bottleneck_shared_bits_stop_warmup, min_value=0.1, step=step)
   n = tf.where(step > 1, n * f, n)
   n = tf.cast(n, tf.int64)
   b_shape = common_layers.shape_list(b)
   b = tf.concat([bs[..., :n], b[..., n:]], axis=-1)
   b = tf.reshape(b, b_shape)
   return b, 0.0
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:28,代码来源:autoencoders.py

示例8: embed

  def embed(self, x):
    """Embedding function that takes discrete latent and returns embedding.

    Args:
        x: Input to the discretization bottleneck.
    Returns:
        Continuous embedding to be passed on to the decoder.

    Raises:
        ValueError: For unknown or missing arguments.
    """
    shape_x = common_layers.shape_list(x)
    x_flat = tf.reshape(x, [-1, 1])
    c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2)
    shape = common_layers.shape_list(c)
    new_shape = shape
    new_shape.append(self.hparams.num_blocks)
    new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks))
    c = tf.to_int32(tf.reshape(c, shape=new_shape))
    h1_shape = shape_x
    h1_shape.append(self.hparams.hidden_size)
    h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)
    c_int = self.bit_to_int(
        c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)
    c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1)
    c_hot_flat = tf.reshape(
        c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size])
    h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means)
    h1 = tf.transpose(h1, perm=[1, 0, 2])
    h1 = tf.reshape(h1, shape=h1_shape)
    h1_shape[0] = self.hparams.batch_size
    h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
    res = tf.layers.dense(
        tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
    return res
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:35,代码来源:vq_discrete.py

示例9: symbols_to_logits_fn

    def symbols_to_logits_fn(ids):
      """Go from ids to logits."""
      ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
      ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
      if "partial_targets" in features:
        pt = features["partial_targets"]
        pt_length = common_layers.shape_list(pt)[1]
        pt = tf.tile(pt, [1, beam_size])
        pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
        ids = tf.concat([pt, ids], axis=1)

      features["targets"] = ids
      self._coverage = None
      logits, _ = self(features)  # pylint: disable=not-callable
      # now self._coverage is a coverage tensor for the first datashard.
      # it has shape [batch_size] and contains floats between 0 and
      # source_length.
      if self._problem_hparams:
        modality = self._problem_hparams.target_modality
        if modality.top_is_pointwise:
          return tf.squeeze(logits, axis=[1, 2, 3])
      # -1 due to the pad above.
      current_output_position = common_layers.shape_list(ids)[1] - 1
      logits = logits[:, current_output_position, :, :]
      return tf.squeeze(logits, axis=[1, 2])
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:25,代码来源:t2t_model.py

示例10: postprocess_image

def postprocess_image(x, rows, cols, hparams):
  """Postprocessing after decoding."""
  batch = common_layers.shape_list(x)[0]
  channels = 256
  x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size])
  # targets = common_layers.conv(x, 256, (1, 1), name="output_conv")
  targets = tf.layers.dense(x, 256, use_bias=True, activation=None,
                            name="output_conv")
  if hparams.mode == tf.contrib.learn.ModeKeys.INFER:
    y = targets
    y = tf.reshape(y, [batch, -1, hparams.img_len*3, channels])
    yshape = common_layers.shape_list(y)
    block_length = hparams.query_shape[0]
    block_width = hparams.query_shape[1]

    # Break into block row wise.
    y = tf.reshape(y,
                   [batch, yshape[1] // block_length,
                    block_length,
                    yshape[2], channels])
    yshape = common_layers.shape_list(y)
    # Break into blocks width wise.
    y_blocks = tf.reshape(y,
                          [batch, yshape[1], yshape[2],
                           yshape[3] // block_width,
                           block_width, channels])

    # Reshape targets as [batch_size, num_blocks_rows, num_block_cols,
    # block_length, block_width, channels]
    targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5])

  return targets
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:32,代码来源:common_image_attention.py

示例11: dae

def dae(x, hparams, name):
  with tf.variable_scope(name):
    m = tf.layers.dense(x, hparams.v_size, name="mask")
    if hparams.softmax_k > 0:
      m, kl = top_k_softmax(m, hparams.softmax_k)
      return m, m, 1.0 - tf.reduce_mean(kl)
    logsm = tf.nn.log_softmax(m)
    # Gumbel-softmax sample.
    gumbel_samples = gumbel_sample(common_layers.shape_list(m))
    steps = hparams.kl_warmup_steps
    gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5
    temperature = 1.2 - common_layers.inverse_lin_decay(steps)
    # 10% of the time keep reasonably high temperature to keep learning.
    temperature = tf.cond(tf.less(tf.random_uniform([]), 0.9),
                          lambda: temperature,
                          lambda: tf.random_uniform([], minval=0.5, maxval=1.0))
    s = tf.nn.softmax((logsm + gumbel_samples) / temperature)
    m = tf.nn.softmax(m)
    kl = - tf.reduce_max(logsm, axis=-1)
    if _DO_SUMMARIES:
      tf.summary.histogram("max-log", tf.reshape(kl, [-1]))
    # Calculate the argmax and construct hot vectors.
    maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1])
    maxvhot = tf.stop_gradient(tf.one_hot(maxvec, hparams.v_size))
    # Add losses that prevent too few being used.
    distrib = tf.reshape(logsm, [-1, hparams.v_size]) * maxvhot
    d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)
    d_variance = tf.reduce_mean(tf.square(distrib - d_mean), axis=[0])
    d_dev = - tf.reduce_mean(d_variance)
    ret = s
    if hparams.mode != tf.contrib.learn.ModeKeys.TRAIN:
      ret = tf.reshape(maxvhot, common_layers.shape_list(s))  # Just hot @eval.
    return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:33,代码来源:transformer_vae.py

示例12: vq_discrete_unbottleneck

def vq_discrete_unbottleneck(x, hidden_size):
  """Simple undiscretization from vector quantized representation."""
  x_shape = common_layers.shape_list(x)
  x = tf.to_float(x)
  bottleneck_size = common_layers.shape_list(x)[-1]
  means, _, _ = get_vq_bottleneck(bottleneck_size, hidden_size)
  result = tf.matmul(tf.reshape(x, [-1, x_shape[-1]]), means)
  return tf.reshape(result, x_shape[:-1] + [hidden_size])
开发者ID:kltony,项目名称:tensor2tensor,代码行数:8,代码来源:discretization.py

示例13: prepare_decoder

def prepare_decoder(targets, hparams):
  """Prepare decoder for images."""
  targets_shape = common_layers.shape_list(targets)
  channels = hparams.num_channels
  curr_infer_length = None

  # during training, images are [batch, IMG_LEN, IMG_LEN, 3].
  # At inference, they are [batch, curr_infer_length, 1, 1]
  if hparams.mode == tf.contrib.learn.ModeKeys.INFER:
    curr_infer_length = targets_shape[1]
    if hparams.block_raster_scan:
      assert hparams.img_len*channels % hparams.query_shape[1] == 0
      assert hparams.img_len % hparams.query_shape[0] == 0
      total_block_width = hparams.img_len*channels
      # Decoding is in block raster scan order. We divide the image into
      # hparams.query_shape blocks and then decode each block in raster scan.
      # To make that compatible with our inference pipeline, pad the target so
      # that rows is a multiple of query_shape and columns is a multiple of
      # hparams.img_len*channels
      curr_infer_length = targets_shape[1]
      block_padding_factor = total_block_width * hparams.query_shape[0]
      targets = tf.pad(targets, [
          [0, 0], [0, -curr_infer_length % block_padding_factor],
          [0, 0], [0, 0]])

      num_blocks = total_block_width // hparams.query_shape[1]
      # Reshape the image to represent blocks
      target_blocks = tf.reshape(
          targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0],
                    hparams.query_shape[1]])
      # Transpose to read the image in 2D fashion.
      targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4])
    else:
      # add padding to make sure the size of targets is a multiple of img_height
      # times number of channels. This is  needed for positional encodings and
      # for doing the RGB lookup.
      padding_factor = channels * hparams.img_len
      targets = tf.pad(targets, [
          [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]])
    targets = tf.reshape(targets,
                         [targets_shape[0], -1, hparams.img_len, channels])
  # Preprocess image
  x = prepare_image(targets, hparams, name="dec_channels")
  x_shape = common_layers.shape_list(x)
  if (hparams.dec_attention_type == AttentionType.LOCAL_2D or
      hparams.dec_attention_type == AttentionType.LOCAL_BLOCK):
    x = common_attention.right_shift_blockwise(x, hparams.query_shape)
    x = add_pos_signals(x, hparams, "dec_pos")
  else:
    # Add position signals
    x = tf.reshape(x, [targets_shape[0],
                       x_shape[1]*x_shape[2], hparams.hidden_size])
    x = common_layers.shift_right_3d(x)
    x = tf.reshape(x, [targets_shape[0],
                       x_shape[1], x_shape[2], hparams.hidden_size])
    x = add_pos_signals(x, hparams, "dec_pos")
  x = common_layers.cast_like(x, targets)
  return x, x_shape[1], x_shape[2]
开发者ID:kltony,项目名称:tensor2tensor,代码行数:58,代码来源:common_image_attention.py

示例14: logits_to_samples

 def logits_to_samples(logits):
   """Get samples from logits."""
   # If the last dimension is 1 then we're using L1/L2 loss.
   if common_layers.shape_list(logits)[-1] == 1:
     return tf.to_int32(tf.squeeze(logits, axis=-1))
   # Argmax in TF doesn't handle more than 5 dimensions yet.
   logits_shape = common_layers.shape_list(logits)
   argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=-1)
   return tf.reshape(argmax, logits_shape[:-1])
开发者ID:kltony,项目名称:tensor2tensor,代码行数:9,代码来源:next_frame.py

示例15: loss

 def loss(self, top_out, targets):
   predictions = top_out
   if (len(common_layers.shape_list(top_out)) != len(
       common_layers.shape_list(targets))):
     predictions = tf.squeeze(top_out, axis=[-1])
   with tf.name_scope("log_possion"):
     weights = self.targets_weights_fn(targets)
     lp_loss = tf.nn.log_poisson_loss(targets, predictions)
     return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
开发者ID:kltony,项目名称:tensor2tensor,代码行数:9,代码来源:modalities.py


注:本文中的tensor2tensor.layers.common_layers.shape_list函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。