本文整理汇总了Python中tensor2tensor.models.transformer.transformer_prepare_encoder方法的典型用法代码示例。如果您正苦于以下问题:Python transformer.transformer_prepare_encoder方法的具体用法?Python transformer.transformer_prepare_encoder怎么用?Python transformer.transformer_prepare_encoder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensor2tensor.models.transformer
的用法示例。
在下文中一共展示了transformer.transformer_prepare_encoder方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _prepare_encoder
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def _prepare_encoder(self, inputs, target_space):
"""Process the transformer encoder inputs."""
inputs = common_layers.flatten4d3d(inputs)
output = transformer.transformer_prepare_encoder(
inputs,
target_space,
self._hparams,
features=None,
)
enco_input, enco_self_att_bias, enco_deco_att_bias = output
enco_input = tf.nn.dropout(
enco_input, 1.0 - self._hparams.layer_prepostprocess_dropout)
return enco_input, enco_self_att_bias, enco_deco_att_bias
示例2: transformer_text_encoder
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def transformer_text_encoder(x,
space_id,
hparams,
name="transformer_text_encoder"):
"""Transformer text encoder over inputs with unmasked full attention.
Args:
x: Tensor of shape [batch, length, 1, hparams.hidden_size].
space_id: int, id.
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
encoder_output: Tensor of shape [batch, length, hparams.hidden_size].
ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias
for any padded tokens.
"""
with tf.variable_scope(name):
x = common_layers.flatten4d3d(x)
(encoder_input, encoder_self_attention_bias,
ed) = transformer.transformer_prepare_encoder(x, space_id, hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
encoder_output = transformer.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams)
return encoder_output, ed
示例3: encode
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def encode(self, features, input_key):
hparams = self._hparams
inputs = common_layers.flatten4d3d(features[input_key])
(encoder_input, encoder_self_attention_bias, _) = (
transformer.transformer_prepare_encoder(inputs, problem.SpaceID.EN_TOK,
hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer.transformer_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, input_key))
encoder_output = tf.reduce_mean(encoder_output, axis=1)
return encoder_output
示例4: universal_transformer_encoder
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def universal_transformer_encoder(inputs, target_space,
hparams, features=None, make_image_summary=False):
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
[encoder_output,
encoder_extra_output] = universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=None,
make_image_summary=make_image_summary)
# encoder_output = tf.expand_dims(encoder_output, 2)
return encoder_output
示例5: transformer_encoder
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def transformer_encoder(inputs, target_space, hparams, features=None, losses=None):
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer.transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=None,
losses=losses)
# encoder_output = tf.expand_dims(encoder_output, 2)
return encoder_output
示例6: body
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def body(self, features):
hparams = self._hparams
targets = features["targets"]
inputs = features["inputs"]
target_space = features["target_space_id"]
inputs = common_layers.flatten4d3d(inputs)
targets = common_layers.flatten4d3d(targets)
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = (transformer.transformer_prepare_encoder(
inputs, target_space, hparams))
(decoder_input,
decoder_self_attention_bias) = transformer.transformer_prepare_decoder(
targets, hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer_revnet_encoder(
encoder_input, encoder_self_attention_bias, hparams)
decoder_output = transformer_revnet_decoder(
decoder_input, encoder_output, decoder_self_attention_bias,
encoder_decoder_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, 2)
return decoder_output
示例7: encode
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def encode(x, x_space, hparams, name):
"""Transformer preparations and encoder."""
with tf.variable_scope(name):
(encoder_input, encoder_self_attention_bias,
ed) = transformer.transformer_prepare_encoder(x, x_space, hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
return transformer.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams), ed
示例8: encode
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Encode transformer inputs.
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
losses: Unused.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
del losses
inputs = common_layers.flatten4d3d(inputs)
(encoder_input, self_attention_bias, _) = (
transformer.transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output, encoder_extra_output) = (
universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights))
return encoder_output, encoder_extra_output
示例9: encode
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Encode Universal Transformer inputs.
It is similar to "transformer.encode", but it uses
"universal_transformer_util.universal_transformer_encoder" instead of
"transformer.transformer_encoder".
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
losses: Unused.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
del losses
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output, encoder_extra_output) = (
universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights))
return encoder_output, encoder_decoder_attention_bias, encoder_extra_output
示例10: encode
# 需要导入模块: from tensor2tensor.models import transformer [as 别名]
# 或者: from tensor2tensor.models.transformer import transformer_prepare_encoder [as 别名]
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Encode inputs using _encoder().
This performs the same way as transformer.Transformer.encode with the
encoder portion replaced with _encoder().
Args:
inputs: Input [batch_size, input_length, input_height, hidden_dim] tensor
which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: Hyperparmeters for model.
features: Optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: Unused list of losses.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encodre-decoder attention. [batch_size, input_length]
Raises:
ValueError: If encoder type not found.
"""
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = self._encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights)
return encoder_output, encoder_decoder_attention_bias