当前位置: 首页>>代码示例>>Python>>正文


Python common_layers.shift_right方法代码示例

本文整理汇总了Python中tensor2tensor.layers.common_layers.shift_right方法的典型用法代码示例。如果您正苦于以下问题:Python common_layers.shift_right方法的具体用法?Python common_layers.shift_right怎么用?Python common_layers.shift_right使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensor2tensor.layers.common_layers的用法示例。


在下文中一共展示了common_layers.shift_right方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lstm_seq2seq_internal_attention

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention"):
    # This is a temporary fix for varying-length sequences within in a batch.
    # A more complete fix should pass a length tensor from outside so that
    # all the lstm variants can use it.
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)

    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_outputs, final_encoder_state = lstm(
        inputs, inputs_length, hparams, train, "encoder")

    # LSTM decoder with attention.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder",
        final_encoder_state, encoder_outputs, inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:25,代码来源:lstm.py

示例2: lstm_seq2seq_internal_attention_bid_encoder

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
                                                train):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)
    # LSTM encoder.
    encoder_outputs, final_encoder_state = lstm_bid_encoder(
        inputs, inputs_length, hparams, train, "encoder")
    # LSTM decoder with attention
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    hparams_decoder = copy.copy(hparams)
    hparams_decoder.hidden_size = 2 * hparams.hidden_size
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
        "decoder", final_encoder_state, encoder_outputs,
        inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:lstm.py

示例3: bytenet_internal

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def bytenet_internal(inputs, targets, hparams):
  """ByteNet, main step used for training."""
  with tf.variable_scope("bytenet"):
    # Flatten inputs and extend length by 50%.
    inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
    extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1]))
    inputs_shape = inputs.shape.as_list()
    inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]])
    inputs_shape[1] = None
    inputs.set_shape(inputs_shape)  # Don't lose the other shapes when padding.
    # Pad inputs and targets to be the same length, divisible by 50.
    inputs, targets = common_layers.pad_to_same_length(
        inputs, targets, final_length_divisible_by=50)
    final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat,
                                          "SAME", "encoder", hparams)

    shifted_targets = common_layers.shift_right(targets)
    kernel = (hparams.kernel_height, hparams.kernel_width)
    decoder_start = common_layers.conv_block(
        tf.concat([final_encoder, shifted_targets], axis=3),
        hparams.hidden_size, [((1, 1), kernel)],
        padding="LEFT")

    return residual_dilated_conv(decoder_start, hparams.num_block_repeat,
                                 "LEFT", "decoder", hparams) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:27,代码来源:bytenet.py

示例4: lstm_seq2seq_internal_attention

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train,
                                    inputs_length, targets_length):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention"):
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)

    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_outputs, final_encoder_state = lstm(
        inputs, inputs_length, hparams, train, "encoder")

    # LSTM decoder with attention.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = targets_length + 1
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder",
        final_encoder_state, encoder_outputs, inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:lstm.py

示例5: lstm_seq2seq_internal

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal(inputs, targets, hparams, train):
  """The basic LSTM seq2seq model, main step used for training."""
  with tf.variable_scope("lstm_seq2seq"):
    if inputs is not None:
      inputs_length = common_layers.length_from_embedding(inputs)
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)

      # LSTM encoder.
      inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
      _, final_encoder_state = lstm(inputs, inputs_length, hparams, train,
                                    "encoder")
    else:
      final_encoder_state = None

    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        targets_length,
        hparams,
        train,
        "decoder",
        initial_state=final_encoder_state)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:29,代码来源:lstm.py

示例6: lstm_seq2seq_internal_bid_encoder

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):
  """The basic LSTM seq2seq model with bidirectional encoder."""
  with tf.variable_scope("lstm_seq2seq_bid_encoder"):
    if inputs is not None:
      inputs_length = common_layers.length_from_embedding(inputs)
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)
      # LSTM encoder.
      _, final_encoder_state = lstm_bid_encoder(
          inputs, inputs_length, hparams, train, "encoder")
    else:
      inputs_length = None
      final_encoder_state = None
    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    hparams_decoder = copy.copy(hparams)
    hparams_decoder.hidden_size = 2 * hparams.hidden_size
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        targets_length,
        hparams_decoder,
        train,
        "decoder",
        initial_state=final_encoder_state)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:29,代码来源:lstm.py

示例7: testShiftLeft

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def testShiftLeft(self):
    x1 = np.zeros((5, 7, 1, 11))
    x1[:, 0, :] = np.ones_like(x1[:, 0, :])
    expected = np.zeros((5, 7, 1, 11))
    expected[:, 1, :] = np.ones_like(expected[:, 1, :])
    with self.test_session() as session:
      a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32))
      actual = session.run(a)
    self.assertAllEqual(actual, expected) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:11,代码来源:common_layers_test.py

示例8: body

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def body(self, features):
    """Build the main body of the model.

    Args:
      features: A dict of "inputs" and "targets" which have already been passed
        through an embedding layer. Inputs should have shape
        [batch_size, max_seq_length, 1, embedding_size]. Targets should have
        shape [batch_size, max_seq_length, 1, 1]

    Returns:
      The logits which get passed to the top of the model for inference.
      A tensor of shape [batch_size, seq_length, 1, embedding_size]
    """
    inputs = features.get("inputs")
    targets = features["targets"]

    if inputs is not None:
      inputs = common_layers.flatten4d3d(inputs)
      _, final_encoder_state = self._rnn(tf.reverse(inputs, axis=[1]),
                                         "encoder")
    else:
      final_encoder_state = None

    shifted_targets = common_layers.shift_right(targets)
    decoder_outputs, _ = self._rnn(
        common_layers.flatten4d3d(shifted_targets),
        "decoder",
        initial_state=final_encoder_state)
    return decoder_outputs 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:31,代码来源:neural_stack.py

示例9: slicenet_middle

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
  """Middle part of slicenet, connecting encoder and decoder."""

  def norm_fn(x, name):
    with tf.variable_scope(name, default_name="norm"):
      return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
                                      hparams.norm_epsilon)

  # Flatten targets and embed target_space_id.
  targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2)
  target_space_emb = tf.tile(target_space_emb,
                             [tf.shape(targets_flat)[0], 1, 1, 1])

  # Use attention from each target to look at input and retrieve.
  targets_shifted = common_layers.shift_right(
      targets_flat, pad_value=target_space_emb)
  if hparams.attention_type == "none":
    targets_with_attention = tf.zeros_like(targets_shifted)
  else:
    inputs_padding_bias = (1.0 - mask) * -1e9  # Bias to not attend to padding.
    targets_with_attention = attention(
        targets_shifted,
        inputs_encoded,
        norm_fn,
        hparams,
        bias=inputs_padding_bias)

  # Positional targets: merge attention and raw.
  kernel = (hparams.kernel_height, hparams.kernel_width)
  targets_merged = common_layers.subseparable_conv_block(
      tf.concat([targets_with_attention, targets_shifted], axis=3),
      hparams.hidden_size, [((1, 1), kernel)],
      normalizer_fn=norm_fn,
      padding="LEFT",
      separability=4,
      name="targets_merge")

  return targets_merged, 0.0 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:40,代码来源:slicenet.py

示例10: testShiftLeft

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def testShiftLeft(self):
    x1 = np.zeros((5, 7, 1, 11))
    x1[:, 0, :] = np.ones_like(x1[:, 0, :])
    expected = np.zeros((5, 7, 1, 11))
    expected[:, 1, :] = np.ones_like(expected[:, 1, :])
    a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32))
    actual = self.evaluate(a)
    self.assertAllEqual(actual, expected) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:10,代码来源:common_layers_test.py

示例11: lstm_seq2seq_internal_dynamic

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def lstm_seq2seq_internal_dynamic(inputs, targets, hparams, train):
  '''The basic LSTM seq2seq model, main step used for training.'''
  with tf.variable_scope('lstm_seq2seq'):
    if inputs is not None:
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)
      # LSTM encoder.
      _, final_encoder_state = lstm(
          tf.reverse(inputs, axis=[1]), hparams, train, 'encoder')

    else:
      final_encoder_state = None
    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        hparams,
        train,
        'decoder',
        initial_state=final_encoder_state)

    # Project the outputs.
    with tf.variable_scope('projection'):
      projected_outputs = tf.layers.dense(decoder_outputs,
                                          2048,
                                          activation=None,
                                          use_bias=False)
    return tf.expand_dims(projected_outputs, axis=2), final_encoder_state[0] 
开发者ID:ricsinaruto,项目名称:Seq2seqChatbots,代码行数:30,代码来源:gradient_checkpointed_seq2seq.py

示例12: _build_inputs_and_targets

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def _build_inputs_and_targets(
      self, from_seqs=None, from_tags=None, to_seqs=None, to_tags=None):
    """Given from and to sequences and tags, construct inputs and targets."""
    del from_tags  # Unused.
    if from_seqs is not None:
      inputs = from_seqs
      inputs_length = common_layers.length_from_embedding(inputs)
      if to_tags is not None:
        # Add to-tags to the inputs and adjust lengths.
        # <float32> [batch_size, seq_len + 1, 1, emb_size].
        inputs = tf.concat([to_tags, inputs], axis=1)
        inputs_length = inputs_length + 1
      inputs = common_layers.flatten4d3d(inputs)
    else:
      inputs = None
      inputs_length = None

    if to_seqs is not None:
      # Shift to-sequences to form targets.
      # <float32> [batch_size, seq_len, 1, emb_size].
      targets = common_layers.shift_right(to_seqs)
      # Add 1 to account for the padding added to the left from shift_right.
      targets_length = common_layers.length_from_embedding(targets) + 1
      targets = common_layers.flatten4d3d(targets)
    else:
      targets = None
      targets_length = None

    return (inputs, inputs_length), (targets, targets_length) 
开发者ID:google-research,项目名称:language,代码行数:31,代码来源:agreement.py

示例13: _build_lm_inputs

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def _build_lm_inputs(self, features):
    """Builds inputs and targets for LM training."""
    targets = features["targets"]
    target_tags = features["target_tags"]

    if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
      target_tags = tf.tile(target_tags, [self._hparams.beam_width, 1, 1, 1])

    # Construct LM inputs.
    inputs = common_layers.shift_right(targets, pad_value=target_tags)
    inputs_length = common_layers.length_from_embedding(targets) + 1
    inputs = common_layers.flatten4d3d(inputs)

    return inputs, inputs_length 
开发者ID:google-research,项目名称:language,代码行数:16,代码来源:agreement.py

示例14: _preprocess

# 需要导入模块: from tensor2tensor.layers import common_layers [as 别名]
# 或者: from tensor2tensor.layers.common_layers import shift_right [as 别名]
def _preprocess(self, features):
    """Preprocesses features for multilingual translation."""
    inputs = features["inputs"]
    targets = features["targets"]
    target_tags = features["target_tags"]

    # Expand target tags to beam width, if necessary.
    if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
      # <float32> [batch_size * beam_width, 1, 1, emb_size].
      beam_width = self._hparams.beam_width
      target_tags = tf.tile(target_tags, [beam_width, 1, 1, 1])

    # Add target tags to the input sequences.
    # <float32> [batch_size, seq_len + 1, 1, emb_size].
    inputs = tf.concat([target_tags, inputs], axis=1)

    # Compute length of the input sequences.
    inputs_length = common_layers.length_from_embedding(inputs)
    inputs = common_layers.flatten4d3d(inputs)

    # Preprocess targets.
    targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right.
    targets_length = common_layers.length_from_embedding(targets) + 1
    targets = common_layers.flatten4d3d(targets)

    return inputs, inputs_length, targets, targets_length 
开发者ID:google-research,项目名称:language,代码行数:29,代码来源:basic.py


注:本文中的tensor2tensor.layers.common_layers.shift_right方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。