当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.reverse_sequence方法代码示例

本文整理汇总了Python中tensorflow.reverse_sequence方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reverse_sequence方法的具体用法?Python tensorflow.reverse_sequence怎么用?Python tensorflow.reverse_sequence使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reverse_sequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lstm_seq2seq_internal_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention"):
    # This is a temporary fix for varying-length sequences within in a batch.
    # A more complete fix should pass a length tensor from outside so that
    # all the lstm variants can use it.
    inputs_length = common_layers.length_from_embedding(inputs)
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)

    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_outputs, final_encoder_state = lstm(
        inputs, inputs_length, hparams, train, "encoder")

    # LSTM decoder with attention.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder",
        final_encoder_state, encoder_outputs, inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:25,代码来源:lstm.py

示例2: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope("fw_{}".format(layer)):
                out_fw, _ = gru_fw(
                    outputs[-1] * mask_fw, initial_state=(init_fw, ))
            with tf.variable_scope("bw_{}".format(layer)):
                inputs_bw = tf.reverse_sequence(
                    outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, ))
                out_bw = tf.reverse_sequence(
                    out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res 
开发者ID:HKUST-KnowComp,项目名称:R-Net,代码行数:24,代码来源:func.py

示例3: lstm_seq2seq_internal_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train,
                                    inputs_length, targets_length):
  """LSTM seq2seq model with attention, main step used for training."""
  with tf.variable_scope("lstm_seq2seq_attention"):
    # Flatten inputs.
    inputs = common_layers.flatten4d3d(inputs)

    # LSTM encoder.
    inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
    encoder_outputs, final_encoder_state = lstm(
        inputs, inputs_length, hparams, train, "encoder")

    # LSTM decoder with attention.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = targets_length + 1
    decoder_outputs = lstm_attention_decoder(
        common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder",
        final_encoder_state, encoder_outputs, inputs_length, targets_length)
    return tf.expand_dims(decoder_outputs, axis=2) 
开发者ID:yyht,项目名称:BERT,代码行数:22,代码来源:lstm.py

示例4: bw_dynamic_rnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                   dtype=None, parallel_iterations=None, swap_memory=False,
                   time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_inputs = tf.reverse(flat_inputs, [1]) if sequence_length is None \
        else tf.reverse_sequence(flat_inputs, sequence_length, 1)
    flat_outputs, final_state = tf.nn.dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    flat_outputs = tf.reverse(flat_outputs, [1]) if sequence_length is None \
        else tf.reverse_sequence(flat_outputs, sequence_length, 1)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:rnn.py

示例5: bw_dynamic_rnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                   dtype=None, parallel_iterations=None, swap_memory=False,
                   time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_inputs, sequence_length, 1)
    flat_outputs, final_state = tf.nn.dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_outputs, sequence_length, 1)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:21,代码来源:rnn.py

示例6: BiRNN

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def BiRNN(layer, recur_size, seq_lengths, recur_cell=LSTM, bilin=False, **kwargs):
  """"""

  locations = tf.expand_dims(tf.one_hot(seq_lengths-1, tf.shape(layer)[1]), -1)
  with tf.variable_scope('RNN_FW'):
    fw_hidden, fw_cell = recur_cell(layer, recur_size, seq_lengths, **kwargs)
  rev_layer = tf.reverse_sequence(layer, seq_lengths, batch_axis=0, seq_axis=1)
  with tf.variable_scope('RNN_BW'):
    bw_hidden, bw_cell = recur_cell(rev_layer, recur_size, seq_lengths, **kwargs)
  rev_bw_hidden = tf.reverse_sequence(bw_hidden, seq_lengths, batch_axis=0, seq_axis=1)
  rev_bw_cell = tf.reverse_sequence(bw_cell, seq_lengths, batch_axis=0, seq_axis=1)
  if bilin:
    layer = tf.concat([fw_hidden*rev_bw_hidden, fw_hidden, rev_bw_hidden], 2)
  else:
    layer = tf.concat([fw_hidden, rev_bw_hidden], 2)
  if recur_cell == RNN:
    final_states = tf.squeeze(tf.matmul(hidden, locations, transpose_a=True), -1)
  else:
    final_fw_hidden = tf.squeeze(tf.matmul(fw_hidden, locations, transpose_a=True), -1)
    final_fw_cell = tf.squeeze(tf.matmul(fw_cell, locations, transpose_a=True), -1)
    final_rev_bw_hidden = tf.squeeze(tf.matmul(rev_bw_hidden, locations, transpose_a=True), -1)
    final_rev_bw_cell = tf.squeeze(tf.matmul(rev_bw_cell, locations, transpose_a=True), -1)
    final_states = tf.concat([final_fw_hidden, final_rev_bw_hidden, final_fw_cell, final_rev_bw_cell], 1)
  return layer, final_states 
开发者ID:tdozat,项目名称:Parser-v3,代码行数:26,代码来源:recurrent.py

示例7: transform

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def transform(self, z_fwd, z_rvs, mask_fwd, mask_rvs, sequence_lengths):
        h_fwd = []
        h = z_fwd

        for layer in self.rnn.layers:
            h = layer(h, mask=mask_fwd)
            h = self.dropout(h)
            h_fwd.append(h)

        h_rvs = []
        h = z_rvs
        for layer in self.rnn.layers:
            h = layer(h, mask=mask_rvs)
            h = self.dropout(h)
            h_rvs.append(
                tf.reverse_sequence(h, sequence_lengths - 1, seq_axis=1))

        return h_fwd, h_rvs 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:20,代码来源:BeplerModel.py

示例8: embed_and_split

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def embed_and_split(self, x, sequence_lengths, pad=False):
        if pad:
            # Add one to each sequence element
            if not self._use_pfam_alphabet:
                x = x + 1
                mask = rk.utils.convert_sequence_length_to_sequence_mask(x, sequence_lengths)
                x = x * tf.cast(mask, x.dtype)

            x = tf.pad(x, [[0, 0], [1, 1]])  # pad x
            sequence_lengths += 2

        mask = rk.utils.convert_sequence_length_to_sequence_mask(x, sequence_lengths)

        z = self.embed(x)
        z_fwd = z[:, :-1]
        mask_fwd = mask[:, :-1]

        z_rvs = tf.reverse_sequence(z, sequence_lengths, seq_axis=1)[:, :-1]
        mask_rvs = tf.reverse_sequence(mask, sequence_lengths, seq_axis=1)[:, :-1]

        return z_fwd, z_rvs, mask_fwd, mask_rvs, sequence_lengths 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:23,代码来源:BeplerModel.py

示例9: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def call(self, inputs):
        sequence = inputs['primary']
        protein_length = inputs['protein_length']

        sequence = self.embedding(sequence)
        tf.add_to_collection('checkpoints', sequence)

        forward_output = self.forward_lstm(sequence)
        tf.add_to_collection('checkpoints', forward_output)

        reversed_sequence = tf.reverse_sequence(sequence, protein_length, seq_axis=1)
        reverse_output = self.reverse_lstm(reversed_sequence)
        reverse_output = tf.reverse_sequence(reverse_output, protein_length, seq_axis=1)
        tf.add_to_collection('checkpoints', reverse_output)

        encoder_output = tf.concat((forward_output, reverse_output), -1)

        encoder_output = self.dropout(encoder_output)

        inputs['encoder_output'] = encoder_output
        return inputs 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:23,代码来源:BidirectionalLSTM.py

示例10: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        # cudnn GRU需要交换张量的维度,可能是便于计算
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope("fw_{}".format(layer)):
                out_fw, _ = gru_fw(
                    outputs[-1] * mask_fw, initial_state=(init_fw, ))
            with tf.variable_scope("bw_{}".format(layer)):
                inputs_bw = tf.reverse_sequence(
                    outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, ))
                out_bw = tf.reverse_sequence(
                    out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res 
开发者ID:yuhaitao1994,项目名称:AIchallenger2018_MachineReadingComprehension,代码行数:25,代码来源:nn_func.py

示例11: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        # cudnn GRU需要交换张量的维度,可能是便于计算
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        with tf.variable_scope(self.scope):
            for layer in range(self.num_layers):
                gru_fw, gru_bw = self.grus[layer]
                init_fw, init_bw = self.inits[layer]
                mask_fw, mask_bw = self.dropout_mask[layer]
                with tf.variable_scope("fw_{}".format(layer)):
                    out_fw, _ = gru_fw(
                        outputs[-1] * mask_fw, initial_state=(init_fw, ))
                with tf.variable_scope("bw_{}".format(layer)):
                    inputs_bw = tf.reverse_sequence(
                        outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                    out_bw, _ = gru_bw(
                        inputs_bw, initial_state=(init_bw, ))
                    out_bw = tf.reverse_sequence(
                        out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res 
开发者ID:yuhaitao1994,项目名称:AIchallenger2018_MachineReadingComprehension,代码行数:27,代码来源:nn_func.py

示例12: bw_dynamic_rnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                   dtype=None, parallel_iterations=None, swap_memory=False,
                   time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_inputs, sequence_length, 1)
    flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_outputs, sequence_length, 1)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:21,代码来源:rnn.py

示例13: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            param_fw, param_bw = self.params[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope("fw"):
                out_fw, _ = gru_fw(outputs[-1] * mask_fw, init_fw, param_fw)
            with tf.variable_scope("bw"):
                inputs_bw = tf.reverse_sequence(
                    outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                out_bw, _ = gru_bw(inputs_bw, init_bw, param_bw)
                out_bw = tf.reverse_sequence(
                    out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res

###### self attention part code 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:26,代码来源:rnn.py

示例14: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            param_fw, param_bw = self.params[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope("fw"):
                out_fw, _ = gru_fw(outputs[-1] * mask_fw, init_fw, param_fw)
            with tf.variable_scope("bw"):
                inputs_bw = tf.reverse_sequence(outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                out_bw, _ = gru_bw(inputs_bw, init_bw, param_bw)
                out_bw = tf.reverse_sequence(out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:22,代码来源:func.py

示例15: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse_sequence [as 别名]
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [inputs]
        with tf.variable_scope(self.scope):
            for layer in range(self.num_layers):
                gru_fw, gru_bw = self.grus[layer]
                init_fw, init_bw = self.inits[layer]
                mask_fw, mask_bw = self.dropout_mask[layer]
                with tf.variable_scope("fw_{}".format(layer)):
                    out_fw, _ = tf.nn.dynamic_rnn(
                        gru_fw, outputs[-1] * mask_fw, seq_len,
                                            initial_state=init_fw,
                                            dtype=tf.float32)
                with tf.variable_scope("bw_{}".format(layer)):
                    inputs_bw = tf.reverse_sequence(
                        outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
                    out_bw, _ = tf.nn.dynamic_rnn(
                        gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32)
                    out_bw = tf.reverse_sequence(
                        out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
                outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        return res 
开发者ID:vanzytay,项目名称:EMNLP2018_NLI,代码行数:27,代码来源:func.py


注:本文中的tensorflow.reverse_sequence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。