當前位置: 首頁>>代碼示例>>Python>>正文


Python common_layers.layer_preprocess方法代碼示例

本文整理匯總了Python中tensor2tensor.layers.common_layers.layer_preprocess方法的典型用法代碼示例。如果您正苦於以下問題:Python common_layers.layer_preprocess方法的具體用法?Python common_layers.layer_preprocess怎麽用?Python common_layers.layer_preprocess使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensor2tensor.layers.common_layers的用法示例。


在下文中一共展示了common_layers.layer_preprocess方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: top

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def top(self, body_output, _):
    with tf.variable_scope(self.name):
      hidden_dim = self._model_hparams.hidden_size
      img_len = self._model_hparams.img_len
      channels = self.num_channels  # RGB
      batch = common_layers.shape_list(body_output)[0]
      x = tf.layers.conv2d(
          body_output,
          hidden_dim * channels, (1, 1),
          strides=(1, 1),
          padding="VALID",
          activation=tf.nn.relu,
          name="decompress_conv")
      x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_dim])
      x = common_layers.layer_preprocess(x, self._model_hparams)
      x = tf.layers.dense(
          x, 256, use_bias=True, activation=None, name="output_conv")
      x = tf.reshape(x,
                     [-1, img_len, img_len, channels, self.top_dimensionality])
      return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:22,代碼來源:modalities.py

示例2: attend

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def attend(x, source, hparams, name):
  """Attend function."""
  with tf.variable_scope(name):
    # x = tf.squeeze(x, axis=2)
    x, xshape, _ = cia.maybe_reshape_4d_to_3d(x)
    if len(source.get_shape()) > 3:
      source = tf.squeeze(source, axis=2)
    source = common_attention.add_timing_signal_1d(source)
    y = common_attention.multihead_attention(
        common_layers.layer_preprocess(x, hparams),
        source,
        None,
        hparams.attention_key_channels or hparams.hidden_size,
        hparams.attention_value_channels or hparams.hidden_size,
        hparams.hidden_size, hparams.num_heads,
        hparams.attention_dropout)
    res = common_layers.layer_postprocess(x, y, hparams)
    return tf.reshape(res, xshape) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:20,代碼來源:latent_layers.py

示例3: attend

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def attend(x, source, hparams, name):
  """Self-attention layer with source as memory antecedent."""
  with tf.variable_scope(name):
    x = tf.squeeze(x, axis=2)
    if len(source.get_shape()) > 3:
      source = tf.squeeze(source, axis=2)
    source = common_attention.add_timing_signal_1d(source)
    y = common_attention.multihead_attention(
        common_layers.layer_preprocess(x, hparams), source, None,
        hparams.attention_key_channels or hparams.hidden_size,
        hparams.attention_value_channels or hparams.hidden_size,
        hparams.hidden_size, hparams.num_heads,
        hparams.attention_dropout)
    res = common_layers.layer_postprocess(x, y, hparams)
    return tf.expand_dims(res, axis=2) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:17,代碼來源:transformer_vae.py

示例4: transformer_encoder_attention_unit

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def transformer_encoder_attention_unit(x,
                                       hparams,
                                       encoder_self_attention_bias,
                                       attention_dropout_broadcast_dims,
                                       save_weights_to=None,
                                       make_image_summary=True):
  """Applies multihead attention function which is parametrised for encoding.

  Args:
    x: input
    hparams: model hyper-parameters
    encoder_self_attention_bias: a bias tensor for use in encoder self-attention
    attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
      layers to save memory during training
    save_weights_to: an optional dictionary to capture attention weights for
      visualization; the weights tensor will be appended there under a string
      key created from the variable scope (including name).
    make_image_summary: Whether to make an attention image summary.

  Returns:
    the output tensor

  """

  with tf.variable_scope("self_attention"):
    y = common_attention.multihead_attention(
        common_layers.layer_preprocess(x, hparams),
        None,
        encoder_self_attention_bias,
        hparams.attention_key_channels or hparams.hidden_size,
        hparams.attention_value_channels or hparams.hidden_size,
        hparams.hidden_size,
        hparams.num_heads,
        hparams.attention_dropout,
        attention_type=hparams.self_attention_type,
        save_weights_to=save_weights_to,
        max_relative_position=hparams.max_relative_position,
        make_image_summary=make_image_summary,
        dropout_broadcast_dims=attention_dropout_broadcast_dims)
    x = common_layers.layer_postprocess(x, y, hparams)
  return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:43,代碼來源:universal_transformer_util.py

示例5: attention_lm_decoder

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def attention_lm_decoder(decoder_input,
                         decoder_self_attention_bias,
                         hparams,
                         name="decoder"):
  """A stack of attention_lm layers.

  Args:
    decoder_input: a Tensor
    decoder_self_attention_bias: bias Tensor for self-attention
      (see common_attention.attention_bias())
    hparams: hyperparameters for model
    name: a string

  Returns:
    y: a Tensors
  """
  x = decoder_input
  with tf.variable_scope(name):
    for layer in range(hparams.num_hidden_layers):
      with tf.variable_scope("layer_%d" % layer):
        with tf.variable_scope("self_attention"):
          y = common_attention.multihead_attention(
              common_layers.layer_preprocess(
                  x, hparams), None, decoder_self_attention_bias,
              hparams.attention_key_channels or hparams.hidden_size,
              hparams.attention_value_channels or hparams.hidden_size,
              hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
          x = common_layers.layer_postprocess(x, y, hparams)
        with tf.variable_scope("ffn"):
          y = common_layers.conv_hidden_relu(
              common_layers.layer_preprocess(x, hparams),
              hparams.filter_size,
              hparams.hidden_size,
              dropout=hparams.relu_dropout)
          x = common_layers.layer_postprocess(x, y, hparams)
    return common_layers.layer_preprocess(x, hparams) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:38,代碼來源:attention_lm.py

示例6: local_within_block_attention

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def local_within_block_attention(x,
                                 self_attention_bias,
                                 hparams,
                                 attention_type="local_within_block_mask_right",
                                 q_padding="VALID",
                                 kv_padding="VALID"):
  """Local within block self attention."""
  x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
  with tf.variable_scope("local_within_block"):
    y = common_attention.multihead_attention(
        common_layers.layer_preprocess(x_new, hparams),
        None,
        self_attention_bias,
        hparams.attention_key_channels or hparams.hidden_size,
        hparams.attention_value_channels or hparams.hidden_size,
        hparams.hidden_size,
        hparams.num_heads,
        hparams.attention_dropout,
        attention_type=attention_type,
        block_width=hparams.block_width,
        block_length=hparams.block_length,
        q_padding=q_padding,
        kv_padding=kv_padding,
        q_filter_width=hparams.q_filter_width,
        kv_filter_width=hparams.kv_filter_width,
        name="local_within_block")
    if is_4d:
      y = tf.reshape(y, x_shape)
    return y 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:31,代碼來源:common_image_attention.py

示例7: transformer_encoder_layers

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def transformer_encoder_layers(inputs,
                               num_layers,
                               hparams,
                               attention_type=AttentionType.GLOBAL,
                               self_attention_bias=None,
                               q_padding="VALID",
                               kv_padding="VALID",
                               name="transformer"):
  """Multi layer transformer encoder."""
  x = inputs
  x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)

  for layer in range(num_layers):
    # attention layers + skip connections
    with tf.variable_scope("%s_layer_%d" % (name, layer)):
      if attention_type == AttentionType.LOCAL_2D:
        y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
                               hparams,
                               attention_type="local_attention_2d")
      elif attention_type == AttentionType.LOCAL_1D:
        y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
                               hparams,
                               attention_type="local_unmasked",
                               q_padding=q_padding, kv_padding=kv_padding)
      elif attention_type == AttentionType.GLOBAL:
        y = full_self_attention(common_layers.layer_preprocess(x, hparams),
                                self_attention_bias, hparams,
                                q_padding=q_padding, kv_padding=kv_padding)
      x = common_layers.layer_postprocess(x, y, hparams)
      # feed-fwd layer + skip connections
      y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams)
      x = common_layers.layer_postprocess(x, y, hparams)
  return common_layers.layer_preprocess(x, hparams) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:35,代碼來源:common_image_attention.py

示例8: transformer_decoder_ffn_unit

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def transformer_decoder_ffn_unit(x,
                                 hparams,
                                 nonpadding_mask=None):
  """Applies a feed-forward function which is parametrised for decoding.

  Args:
    x: input
    hparams: model hyper-parameters
    nonpadding_mask: optional Tensor with shape [batch_size, encoder_length]
    indicating what positions are not padding.  This is used
    to mask out padding in convoltutional layers.  We generally only
    need this mask for "packed" datasets, because for ordinary datasets,
    no padding is ever followed by nonpadding.

  Returns:
    the output tensor

  """

  with tf.variable_scope("ffn"):
    if hparams.transformer_ffn_type == "fc":
      y = transformer.transformer_ffn_layer(
          common_layers.layer_preprocess(x, hparams),
          hparams,
          conv_padding="LEFT",
          nonpadding_mask=nonpadding_mask)

    if hparams.transformer_ffn_type == "sepconv":
      y = common_layers.sepconv_relu_sepconv(
          common_layers.layer_preprocess(x, hparams),
          filter_size=hparams.filter_size,
          output_size=hparams.hidden_size,
          first_kernel_size=(3, 1),
          second_kernel_size=(5, 1),
          padding="LEFT",
          nonpadding_mask=nonpadding_mask,
          dropout=hparams.relu_dropout)

    x = common_layers.layer_postprocess(x, y, hparams)

  return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:43,代碼來源:universal_transformer_util.py

示例9: recurrent_transformer_decoder

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def recurrent_transformer_decoder(
    decoder_input,
    encoder_output,
    decoder_self_attention_bias,
    encoder_decoder_attention_bias,
    hparams,
    name="decoder",
    nonpadding=None,
    save_weights_to=None,
    make_image_summary=True):
  """Recurrent decoder function."""
  x = decoder_input
  attention_dropout_broadcast_dims = (
      common_layers.comma_separated_string_to_integer_list(
          getattr(hparams, "attention_dropout_broadcast_dims", "")))
  with tf.variable_scope(name):
    ffn_unit = functools.partial(
        # use encoder ffn, since decoder ffn use left padding
        universal_transformer_util.transformer_encoder_ffn_unit,
        hparams=hparams,
        nonpadding_mask=nonpadding)

    attention_unit = functools.partial(
        universal_transformer_util.transformer_decoder_attention_unit,
        hparams=hparams,
        encoder_output=encoder_output,
        decoder_self_attention_bias=decoder_self_attention_bias,
        encoder_decoder_attention_bias=encoder_decoder_attention_bias,
        attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
        save_weights_to=save_weights_to,
        make_image_summary=make_image_summary)

    x, extra_output = universal_transformer_util.universal_transformer_layer(
        x, hparams, ffn_unit, attention_unit)

    return common_layers.layer_preprocess(x, hparams), extra_output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:38,代碼來源:vqa_recurrent_self_attention.py

示例10: _apply_layer_norm

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def _apply_layer_norm(input_tensor, nonpadding, hparams):
  """Applies Tensor2Tensor layer_norm to |input_tensor|."""
  input_depth = input_tensor.shape.as_list()[-1]
  if nonpadding is not None:
    nonpadding_input_tiled = tf.tile(
        tf.expand_dims(nonpadding, 2), [1, 1, input_depth])
    output_tensor = input_tensor * nonpadding_input_tiled

  output_tensor = common_layers.layer_preprocess(input_tensor, hparams)
  if nonpadding is not None:
    output_tensor *= nonpadding_input_tiled

  return output_tensor 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:15,代碼來源:nas_model.py

示例11: image_channel_compress_top

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):
  """Transforms body output to return logits.

  Args:
    body_output: Tensor of shape [batch, img_len, img_len, depth].
    targets:
    model_hparams: HParams, model hyperparmeters.
    vocab_size: int, vocabulary size.

  Returns:
    Tensor of shape [batch, img_len, img_len, channels, vocab_size].
  """
  del targets  # unused arg
  with tf.variable_scope("image_channel_compress_modality"):
    hidden_size = model_hparams.hidden_size
    img_len = model_hparams.img_len
    channels = 3  # RGB
    batch = common_layers.shape_list(body_output)[0]
    x = tf.layers.conv2d(
        body_output,
        hidden_size * channels,
        kernel_size=(1, 1),
        strides=(1, 1),
        padding="VALID",
        activation=tf.nn.relu,
        name="decompress_conv")
    x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])
    x = common_layers.layer_preprocess(x, model_hparams)
    x = tf.layers.dense(x,
                        vocab_size,
                        use_bias=True,
                        activation=None,
                        name="output_conv")
    x = tf.reshape(
        x, [batch, img_len, img_len, channels, vocab_size])
    return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:38,代碼來源:modalities.py

示例12: compress_self_attention_layer

# 需要導入模塊: from tensor2tensor.layers import common_layers [as 別名]
# 或者: from tensor2tensor.layers.common_layers import layer_preprocess [as 別名]
def compress_self_attention_layer(x, hparams, name=None):
  """Attend function."""
  with tf.variable_scope(name, default_name="compress_self_attention"):
    x, xshape, _ = cia.maybe_reshape_4d_to_3d(x)
    y = common_attention.multihead_attention(
        common_layers.layer_preprocess(x, hparams),
        None,
        None,
        hparams.attention_key_channels or hparams.hidden_size,
        hparams.attention_value_channels or hparams.hidden_size,
        hparams.hidden_size, hparams.num_heads,
        hparams.attention_dropout)
    res = common_layers.layer_postprocess(x, y, hparams)
    return tf.reshape(res, xshape) 
開發者ID:yyht,項目名稱:BERT,代碼行數:16,代碼來源:latent_layers.py


注:本文中的tensor2tensor.layers.common_layers.layer_preprocess方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。