当前位置: 首页>>代码示例>>Python>>正文


Python common_layers.length_from_embedding方法代码示例

本文整理汇总了Python中tensor2tensor.layers.common_layers.length_from_embedding方法的典型用法代码示例。如果您正苦于以下问题:Python common_layers.length_from_embedding方法的具体用法?Python common_layers.length_from_embedding怎么用?Python common_layers.length_from_embedding使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensor2tensor.layers.common_layers的用法示例。


在下文中一共展示了common_layers.length_from_embedding方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lstm_seq2seq_internal_attention

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention"):
    # This is a temporary fix for varying-length sequences within in a batch.
    # A more complete fix should pass a length tensor from outside so that
    # all the lstm variants can use it.
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)

    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_outputs, final_encoder_state = lstm(
        inputs, inputs_length, hparams, train, "encoder")

    # LSTM decoder with attention.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder",
        final_encoder_state, encoder_outputs, inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:25,代码来源:lstm.py

示例2: lstm_seq2seq_internal_attention_bid_encoder

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
                                                train):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)
    # LSTM encoder.
    encoder_outputs, final_encoder_state = lstm_bid_encoder(
        inputs, inputs_length, hparams, train, "encoder")
    # LSTM decoder with attention
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    hparams_decoder = copy.copy(hparams)
    hparams_decoder.hidden_size = 2 * hparams.hidden_size
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
        "decoder", final_encoder_state, encoder_outputs,
        inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:lstm.py

示例3: lstm_seq2seq_internal

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def lstm_seq2seq_internal(inputs, targets, hparams, train):
  """The basic LSTM seq2seq model, main step used for training."""
  with tf.variable_scope("lstm_seq2seq"):
    if inputs is not None:
      inputs_length = common_layers.length_from_embedding(inputs)
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)

      # LSTM encoder.
      inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
      _, final_encoder_state = lstm(inputs, inputs_length, hparams, train,
                                    "encoder")
    else:
      final_encoder_state = None

    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        targets_length,
        hparams,
        train,
        "decoder",
        initial_state=final_encoder_state)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:29,代码来源:lstm.py

示例4: lstm_seq2seq_internal_bid_encoder

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):
  """The basic LSTM seq2seq model with bidirectional encoder."""
  with tf.variable_scope("lstm_seq2seq_bid_encoder"):
    if inputs is not None:
      inputs_length = common_layers.length_from_embedding(inputs)
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)
      # LSTM encoder.
      _, final_encoder_state = lstm_bid_encoder(
          inputs, inputs_length, hparams, train, "encoder")
    else:
      inputs_length = None
      final_encoder_state = None
    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    hparams_decoder = copy.copy(hparams)
    hparams_decoder.hidden_size = 2 * hparams.hidden_size
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        targets_length,
        hparams_decoder,
        train,
        "decoder",
        initial_state=final_encoder_state)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:29,代码来源:lstm.py

示例5: body

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def body(self, features):
    if self._hparams.initializer == "orthogonal":
      raise ValueError("LSTM models fail with orthogonal initializer.")
    train = self._hparams.mode == tf.estimator.ModeKeys.TRAIN
    inputs = features.get("inputs")
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)
    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_output, _ = lstm(inputs, inputs_length, self._hparams, train,
                             "encoder")
    return tf.expand_dims(encoder_output, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:15,代码来源:lstm.py

示例6: _build_inputs_and_targets

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def _build_inputs_and_targets(
      self, from_seqs=None, from_tags=None, to_seqs=None, to_tags=None):
    """Given from and to sequences and tags, construct inputs and targets."""
    del from_tags  # Unused.
    if from_seqs is not None:
      inputs = from_seqs
      inputs_length = common_layers.length_from_embedding(inputs)
      if to_tags is not None:
        # Add to-tags to the inputs and adjust lengths.
        # <float32> [batch_size, seq_len + 1, 1, emb_size].
        inputs = tf.concat([to_tags, inputs], axis=1)
        inputs_length = inputs_length + 1
      inputs = common_layers.flatten4d3d(inputs)
    else:
      inputs = None
      inputs_length = None

    if to_seqs is not None:
      # Shift to-sequences to form targets.
      # <float32> [batch_size, seq_len, 1, emb_size].
      targets = common_layers.shift_right(to_seqs)
      # Add 1 to account for the padding added to the left from shift_right.
      targets_length = common_layers.length_from_embedding(targets) + 1
      targets = common_layers.flatten4d3d(targets)
    else:
      targets = None
      targets_length = None

    return (inputs, inputs_length), (targets, targets_length) 
开发者ID:google-research,项目名称:language,代码行数:31,代码来源:agreement.py

示例7: _build_lm_inputs

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def _build_lm_inputs(self, features):
    """Builds inputs and targets for LM training."""
    targets = features["targets"]
    target_tags = features["target_tags"]

    if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
      target_tags = tf.tile(target_tags, [self._hparams.beam_width, 1, 1, 1])

    # Construct LM inputs.
    inputs = common_layers.shift_right(targets, pad_value=target_tags)
    inputs_length = common_layers.length_from_embedding(targets) + 1
    inputs = common_layers.flatten4d3d(inputs)

    return inputs, inputs_length 
开发者ID:google-research,项目名称:language,代码行数:16,代码来源:agreement.py

示例8: _preprocess

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import length_from_embedding [as 别名]
def _preprocess(self, features):
    """Preprocesses features for multilingual translation."""
    inputs = features["inputs"]
    targets = features["targets"]
    target_tags = features["target_tags"]

    # Expand target tags to beam width, if necessary.
    if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
      # <float32> [batch_size * beam_width, 1, 1, emb_size].
      beam_width = self._hparams.beam_width
      target_tags = tf.tile(target_tags, [beam_width, 1, 1, 1])

    # Add target tags to the input sequences.
    # <float32> [batch_size, seq_len + 1, 1, emb_size].
    inputs = tf.concat([target_tags, inputs], axis=1)

    # Compute length of the input sequences.
    inputs_length = common_layers.length_from_embedding(inputs)
    inputs = common_layers.flatten4d3d(inputs)

    # Preprocess targets.
    targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right.
    targets_length = common_layers.length_from_embedding(targets) + 1
    targets = common_layers.flatten4d3d(targets)

    return inputs, inputs_length, targets, targets_length 
开发者ID:google-research,项目名称:language,代码行数:29,代码来源:basic.py


注:本文中的tensor2tensor.layers.common_layers.length_from_embedding方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。