當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.reshape方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.reshape方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.reshape方法的具體用法?Python v1.reshape怎麽用?Python v1.reshape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reshape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: unpack_grad_tuple

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def unpack_grad_tuple(gv, gpt):
  """Unpack a previously packed collection of gradient tensors.

  Args:
    gv: A (grad, var) pair to be unpacked.
    gpt: A GradPackTuple describing the packing operation that produced gv.

  Returns:
    A list of (grad, var) pairs corresponding to the values that were
     originally packed into gv, maybe following subsequent operations like
     reduction.
  """
  elt_widths = [x.num_elements() for x in gpt.shapes]
  with tf.device(gv[0][0].device):
    with tf.name_scope('unpack'):
      splits = tf.split(gv[0], elt_widths)
      unpacked_gv = []
      for idx, s in enumerate(splits):
        unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
  return unpacked_gv 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:22,代碼來源:allreduce.py

示例2: undo_maybe_concat_tensors

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def undo_maybe_concat_tensors(self, concatenated_tensor):
    """Undo maybe_concat_tensors()."""
    if not self._num_splits:
      return concatenated_tensor

    if len(concatenated_tensor) != 1:
      raise RuntimeError(
          'undo_maybe_split_tensors() must be called before '
          'undo_maybe_concat_tensors when num_splits is greater than 1')
    concatenated_tensor = concatenated_tensor[0]

    tensors_with_sizes = tf.split(concatenated_tensor,
                                  self._orig_sizes)
    tensors_with_shapes = [
        tf.reshape(grad, shape) for grad, shape in zip(
            tensors_with_sizes, self._orig_shapes)
    ]
    return tensors_with_shapes 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:20,代碼來源:batch_allreduce.py

示例3: padded_neg_log_perplexity_with_masking

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def padded_neg_log_perplexity_with_masking(
    predictions,
    labels,
    features,
    weights_fn=None):
  """Average log-perplexity with custom targets_mask."""
  del weights_fn
  if "targets_mask" not in features:
    raise ValueError("masked_neg_log_perplexity requires targets_mask feature")

  # Features are 4 dimensional, so we need to reshape the targets_mask to match
  # the shape of the labels. A lot of models rely on these features being 4D,
  # so it's best to update the shape of the mask.
  extended_targets_mask_shape = common_layers.shape_list(
      features["targets_mask"])
  extended_targets_mask_shape.extend([1, 1])
  features["targets_mask"] = tf.reshape(features["targets_mask"],
                                        shape=extended_targets_mask_shape)

  mask_fn = lambda labels: features["targets_mask"]
  return padded_neg_log_perplexity(predictions, labels, mask_fn) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例4: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def __init__(self, pad_mask):
    """Compute and store the location of the padding.

    Args:
      pad_mask (tf.Tensor): Reference padding tensor of shape
        [batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
        containing non-zeros positive values to indicate padding location.
    """
    self.nonpad_ids = None
    self.dim_origin = None

    with tf.name_scope("pad_reduce/get_ids"):
      pad_mask = tf.reshape(pad_mask, [-1])  # Flatten the batch
      # nonpad_ids contains coordinates of zeros rows (as pad_mask is
      # float32, checking zero equality is done with |x| < epsilon, with
      # epsilon=1e-9 as standard, here pad_mask only contains positive values
      # so tf.abs would be redundant)
      self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
      self.dim_origin = tf.shape(pad_mask)[:1] 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:expert_utils.py

示例5: combine

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def combine(self, x):
    """Return the output from the experts.

    When one example goes to multiple experts, the outputs are summed.

    Args:
      x: a Tensor with shape [batch, num_experts, expert_capacity, depth]

    Returns:
      a `Tensor` with shape `[batch, length, depth]
    """
    depth = tf.shape(x)[-1]
    x *= tf.expand_dims(self._nonpadding, -1)
    ret = tf.unsorted_segment_sum(
        x, self._flat_indices, num_segments=self._batch * self._length)
    ret = tf.reshape(ret, [self._batch, self._length, depth])
    return ret 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:expert_utils.py

示例6: compute_batch_indices

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def compute_batch_indices(batch_size, beam_size):
  """Computes the i'th coordinate that contains the batch index for gathers.

  Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which
  batch the beam item is in. This will create the i of the i,j coordinate
  needed for the gather.

  Args:
    batch_size: Batch size
    beam_size: Size of the beam.
  Returns:
    batch_pos: [batch_size, beam_size] tensor of ids
  """
  batch_pos = tf.range(batch_size * beam_size) // beam_size
  batch_pos = tf.reshape(batch_pos, [batch_size, beam_size])
  return batch_pos 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:beam_search.py

示例7: decode_transformer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets,
                       hparams, name):
  """Original Transformer decoder."""
  with tf.variable_scope(name):
    targets = common_layers.flatten4d3d(targets)

    decoder_input, decoder_self_bias = (
        transformer.transformer_prepare_decoder(targets, hparams))

    decoder_input = tf.nn.dropout(decoder_input,
                                  1.0 - hparams.layer_prepostprocess_dropout)

    decoder_output = transformer.transformer_decoder(
        decoder_input, encoder_output, decoder_self_bias,
        encoder_decoder_attention_bias, hparams)
    decoder_output = tf.expand_dims(decoder_output, axis=2)
    decoder_output_shape = common_layers.shape_list(decoder_output)
    decoder_output = tf.reshape(
        decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size])
    # Expand since t2t expects 4d tensors.
    return decoder_output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:transformer_nat.py

示例8: sample_q

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def sample_q(
      self, targets, targets_mask, decoder_self_attention_bias, n_samples,
      temp, **kwargs):
    hparams = self._hparams
    batch_size, targets_max_length = common_layers.shape_list(targets_mask)[:2]
    q_params = ops.posterior("posterior", hparams, targets, targets_mask,
                             decoder_self_attention_bias, **kwargs)
    q_dist = gops.diagonal_normal(q_params, "posterior")
    loc, scale = q_dist.loc, q_dist.scale
    z_shape = [batch_size, targets_max_length, hparams.latent_size]
    iw_z_shape = [n_samples*batch_size, targets_max_length, hparams.latent_size]
    if n_samples == 1:
      noise = tf.random_normal(z_shape, stddev=temp)
      z_q = loc + scale * noise
      log_q_z = q_dist.log_prob(z_q)  # [B, L, C]
    else:
      noise = tf.random_normal([n_samples] + z_shape, stddev=temp)
      z_q = loc[tf.newaxis, ...] + scale[tf.newaxis, ...] * noise
      log_q_z = q_dist.log_prob(z_q)  # [K, B, L, C]
      z_q = tf.reshape(z_q, iw_z_shape)
      log_q_z = tf.reshape(log_q_z, iw_z_shape)
    return z_q, log_q_z, q_dist 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:transformer_vae_flow_prior.py

示例9: expand_batch_coordinates

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def expand_batch_coordinates(bc, length_factor):
  """Duplicate elements of bc by length_factor.

  Args:
    bc (tf.Tensor): int32 tensor of shape [1, length, 1]
    length_factor (int):

  Returns:
    tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
      been duplicated length_factor times.
  """
  assert bc.get_shape().as_list() == [1, None, 1]
  # bc has shape [1, length, 1]
  bc *= tf.constant([[1] * length_factor])
  # bc has shape [1, length, length_factor]
  bc = tf.reshape(bc, [1, -1, 1])
  # bc has shape [1, length*length_factor]
  return bc 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:attention_lm_moe.py

示例10: attn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def attn(image_feat, query, hparams, name="attn"):
  """Attention on image feature with question as query."""
  with tf.variable_scope(name, "attn", values=[image_feat, query]):
    attn_dim = hparams.attn_dim
    num_glimps = hparams.num_glimps
    num_channels = common_layers.shape_list(image_feat)[-1]
    if len(common_layers.shape_list(image_feat)) == 4:
      image_feat = common_layers.flatten4d3d(image_feat)
    query = tf.expand_dims(query, 1)
    image_proj = common_attention.compute_attention_component(
        image_feat, attn_dim, name="image_proj")
    query_proj = common_attention.compute_attention_component(
        query, attn_dim, name="query_proj")
    h = tf.nn.relu(image_proj + query_proj)
    h_proj = common_attention.compute_attention_component(
        h, num_glimps, name="h_proj")
    p = tf.nn.softmax(h_proj, axis=1)
    image_ave = tf.matmul(image_feat, p, transpose_a=True)
    image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps])

    return image_ave 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:vqa_attention.py

示例11: decoder

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs):
  """Compute final hidden states for p(y|z,x)."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout)
    if hparams.pos_attn:
      decoder_input = gops.positional_attention(
          "pos_attn", decoder_input, decoder_self_attention_bias, hparams)
    else:
      decoder_input = common_attention.add_timing_signal_1d(decoder_input)
    if common_layers.shape_list(latents)[-1] != hparams.hidden_size:
      decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size)
    decoder_output = transformer_decoder_layers(
        "block",
        n_layers=hparams.n_decoder_layers,
        decoder_input=decoder_input,
        hparams=hparams,
        decoder_self_attention_bias=decoder_self_attention_bias,
        **kwargs)
    batch_size, targets_length = common_layers.shape_list(decoder_output)[:2]
    decoder_output = tf.reshape(
        decoder_output, [batch_size, targets_length, 1, hparams.hidden_size])
    # Expand since t2t expects 4d tensors.
    return decoder_output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:transformer_vae_flow_prior_ops.py

示例12: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def body(self, features):
    hparams = copy.copy(self._hparams)
    inputs = features["inputs"]
    targets = features["targets"]
    targets_shape = common_layers.shape_list(targets)
    if not (tf.get_variable_scope().reuse or
            hparams.mode == tf.estimator.ModeKeys.PREDICT):
      tf.summary.image("targets", targets, max_outputs=1)

    decoder_input, rows, cols = cia.prepare_decoder(
        targets, hparams)
    # Add class label to decoder input.
    if not hparams.unconditional:
      decoder_input += tf.reshape(inputs,
                                  [targets_shape[0], 1, 1, hparams.hidden_size])

    decoder_output = cia.transformer_decoder_layers(
        decoder_input, None,
        hparams.num_decoder_layers,
        hparams,
        attention_type=hparams.dec_attention_type,
        name="decoder")

    output = cia.create_output(decoder_output, rows, cols, targets, hparams)
    return output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:27,代碼來源:image_transformer_2d.py

示例13: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def body(self, features):
    # TODO(lukaszkaiser): investigate this issue and repair.
    if self._hparams.initializer == "orthogonal":
      raise ValueError("LSTM models fail with orthogonal initializer.")
    train = self._hparams.mode == tf.estimator.ModeKeys.TRAIN
    # This is a temporary fix for varying-length sequences within in a batch.
    # A more complete fix should pass a length tensor from outside so that
    # all the lstm variants can use it.
    input_shape = common_layers.shape_list(features["inputs_raw"])
    flat_input = tf.reshape(features["inputs_raw"],
                            [input_shape[0], input_shape[1]])
    inputs_length = tf.reduce_sum(tf.minimum(flat_input, 1), -1)
    target_shape = common_layers.shape_list(features["targets_raw"])
    flat_target = tf.reshape(features["targets_raw"],
                             [target_shape[0], target_shape[1]])
    targets_length = tf.reduce_sum(tf.minimum(flat_target, 1), -1)
    tf.logging.info(self._hparams)
    return lstm_seq2seq_internal_attention(
        features["inputs"], features["targets"], self._hparams, train,
        inputs_length, targets_length) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:lstm.py

示例14: encode_knowledge_bottom

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def encode_knowledge_bottom(self, features):
    tf.logging.info("Encoding knowledge " + str(self.triple_num))
    # Make sure this is embeddings for triples
    # <tf.float32>[batch_size, triple_num*max_triple_length, 1, emb_dim]
    fact_embedding = features["encoded_triples"]
    # [batch_size, triple_num*max_triple_length, emb_dim]
    fact_embedding = tf.squeeze(fact_embedding, 2)

    kb_shape = common_layers.shape_list(fact_embedding)
    batch_size = kb_shape[0]
    embed_dim = kb_shape[2]
    # <tf.float32>[batch_size*triple_num, max_triple_length, emb_dim]
    re_fact_embedding = tf.reshape(
        fact_embedding, [batch_size * self.triple_num, -1, embed_dim],
        name="reshape_fact_embedding")

    # <tf.int64>[batch_size, triple_num]
    input_fact_lengths = features["triple_lens"]
    # Stack the fact lengths.
    # <tf.int64>[batch_size*max_triple_num]
    re_fact_lengths = tf.reshape(
        input_fact_lengths, [batch_size * self.triple_num, 1],
        name="reshape_fact_lengths")

    return re_fact_embedding, re_fact_lengths 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:27,代碼來源:neural_assistant.py

示例15: add_inference

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reshape [as 別名]
def add_inference(self, cnn):
    # This model only supports 1x1 images with 1 channel
    assert cnn.top_layer.shape[1:] == (1, 1, 1)
    # Multiply by variable A.
    with tf.name_scope('mult_by_var_A'):
      cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
               kernel_initializer=tf.constant_initializer(
                   self.VAR_A_INITIAL_VALUE))
    # Multiply by variable B.
    with tf.name_scope('mult_by_var_B'):
      cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
               kernel_initializer=tf.constant_initializer(
                   self.VAR_B_INITIAL_VALUE))
    with tf.name_scope('reshape_to_scalar'):
      cnn.reshape([-1, 1]) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:17,代碼來源:test_util.py


注:本文中的tensorflow.compat.v1.reshape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。