當前位置: 首頁>>代碼示例>>Python>>正文


Python seq2seq.LuongAttention方法代碼示例

本文整理匯總了Python中tensorflow.contrib.seq2seq.LuongAttention方法的典型用法代碼示例。如果您正苦於以下問題:Python seq2seq.LuongAttention方法的具體用法?Python seq2seq.LuongAttention怎麽用?Python seq2seq.LuongAttention使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.seq2seq的用法示例。


在下文中一共展示了seq2seq.LuongAttention方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_cell

# 需要導入模塊: from tensorflow.contrib import seq2seq [as 別名]
# 或者: from tensorflow.contrib.seq2seq import LuongAttention [as 別名]
def _create_cell(self, rnn_enc_tensor, src_len, hsz, pdrop, rnntype='lstm', layers=1, vdrop=False, **kwargs):
        cell = multi_rnn_cell_w_dropout(hsz, pdrop, rnntype, layers, variational=vdrop, training=TRAIN_FLAG())
        if self.beam_width > 1:
            # Expand the encoded tensor for all beam entries
            rnn_enc_tensor = tf.contrib.seq2seq.tile_batch(rnn_enc_tensor, multiplier=self.beam_width)
            src_len = tf.contrib.seq2seq.tile_batch(src_len, multiplier=self.beam_width)
        GlobalAttention = tfcontrib_seq2seq.LuongAttention if self.attn_type == 'luong' else tfcontrib_seq2seq.BahdanauAttention
        attn_mech = GlobalAttention(hsz, rnn_enc_tensor, src_len)
        return tf.contrib.seq2seq.AttentionWrapper(cell, attn_mech, self.hsz, name='dyn_attn_cell') 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:11,代碼來源:v1.py

示例2: _create_decoder_cell

# 需要導入模塊: from tensorflow.contrib import seq2seq [as 別名]
# 或者: from tensorflow.contrib.seq2seq import LuongAttention [as 別名]
def _create_decoder_cell(self):
        enc_outputs, enc_states, enc_seq_len = self.enc_outputs, self.enc_states, self.enc_seq_len
        batch_size = self.batch_size * self.cfg.beam_size if self.use_beam_search else self.batch_size
        with tf.variable_scope("attention"):
            if self.cfg.attention == "luong":  # Luong attention mechanism
                attention_mechanism = LuongAttention(num_units=self.cfg.num_units, memory=enc_outputs,
                                                     memory_sequence_length=enc_seq_len)
            else:  # default using Bahdanau attention mechanism
                attention_mechanism = BahdanauAttention(num_units=self.cfg.num_units, memory=enc_outputs,
                                                        memory_sequence_length=enc_seq_len)

        def cell_input_fn(inputs, attention):  # define cell input function to keep input/output dimension same
            # reference: https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/AttentionWrapper
            if not self.cfg.use_attention_input_feeding:
                return inputs
            input_project = tf.layers.Dense(self.cfg.num_units, dtype=tf.float32, name='attn_input_feeding')
            return input_project(tf.concat([inputs, attention], axis=-1))

        if self.cfg.top_attention:  # apply attention mechanism only on the top decoder layer
            cells = [self._create_rnn_cell() for _ in range(self.cfg.num_layers)]
            cells[-1] = AttentionWrapper(cells[-1], attention_mechanism=attention_mechanism, name="Attention_Wrapper",
                                         attention_layer_size=self.cfg.num_units, initial_cell_state=enc_states[-1],
                                         cell_input_fn=cell_input_fn)
            initial_state = [state for state in enc_states]
            initial_state[-1] = cells[-1].zero_state(batch_size=batch_size, dtype=tf.float32)
            dec_init_states = tuple(initial_state)
            cells = MultiRNNCell(cells)
        else:
            cells = MultiRNNCell([self._create_rnn_cell() for _ in range(self.cfg.num_layers)])
            cells = AttentionWrapper(cells, attention_mechanism=attention_mechanism, name="Attention_Wrapper",
                                     attention_layer_size=self.cfg.num_units, initial_cell_state=enc_states,
                                     cell_input_fn=cell_input_fn)
            dec_init_states = cells.zero_state(batch_size=batch_size, dtype=tf.float32).clone(cell_state=enc_states)
        return cells, dec_init_states 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:36,代碼來源:seq2seq_model.py

示例3: get

# 需要導入模塊: from tensorflow.contrib import seq2seq [as 別名]
# 或者: from tensorflow.contrib.seq2seq import LuongAttention [as 別名]
def get(attention_type, num_units, memory, memory_sequence_length,
        scope=None, reuse=None):
  """Returns attention mechanism according to the specified type."""
  with tf.variable_scope(scope, reuse=reuse):
    if attention_type == U.ATT_LUONG:
      attention_mechanism = contrib_seq2seq.LuongAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length)
    elif attention_type == U.ATT_LUONG_SCALED:
      attention_mechanism = contrib_seq2seq.LuongAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length,
          scale=True)
    elif attention_type == U.ATT_BAHDANAU:
      attention_mechanism = contrib_seq2seq.BahdanauAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length)
    elif attention_type == U.ATT_BAHDANAU_NORM:
      attention_mechanism = contrib_seq2seq.BahdanauAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length,
          normalize=True)
    else:
      raise ValueError("Unknown attention type: %s" % attention_type)
  return attention_mechanism 
開發者ID:google-research,項目名稱:language,代碼行數:31,代碼來源:attention_mechanisms.py

示例4: build_attention_mechanism

# 需要導入模塊: from tensorflow.contrib import seq2seq [as 別名]
# 或者: from tensorflow.contrib.seq2seq import LuongAttention [as 別名]
def build_attention_mechanism(self):
        if self.hparams.attention_type == 'luong':
            attention_mechanism = seq2seq.LuongAttention(
                self.hparams.hidden_units, self.feedforward_inputs, self.feedforward_inputs_length)
        elif self.hparams.attention_type == 'bahdanau':
            attention_mechanism = seq2seq.BahdanauAttention(
                self.hparams.hidden_units, self.feedforward_inputs, self.feedforward_inputs_length,)
        else:
            raise ValueError(
                "Currently, the only supported attention types are 'luong' and 'bahdanau'.") 
開發者ID:microsoft,項目名稱:icecaps,代碼行數:12,代碼來源:seq2seq_decoder_estimator.py

示例5: _make_decoder

# 需要導入模塊: from tensorflow.contrib import seq2seq [as 別名]
# 或者: from tensorflow.contrib.seq2seq import LuongAttention [as 別名]
def _make_decoder(self, encoder_outputs, encoder_final_state, beam_search=False, reuse=False):
        """Create decoder"""
        with tf.variable_scope('decode', reuse=reuse):
            # Create decoder cells
            cells = [self._make_cell() for _ in range(self.depth)]

            if beam_search:
                # Tile inputs as needed for beam search
                encoder_outputs = seq2seq.tile_batch(
                    encoder_outputs, multiplier=self.beam_width)
                encoder_final_state = nest.map_structure(
                    lambda s: seq2seq.tile_batch(s, multiplier=self.beam_width),
                    encoder_final_state)
                sequence_length = seq2seq.tile_batch(
                    self.sequence_length, multiplier=self.beam_width)
            else:
                sequence_length = self.sequence_length

            # Prepare attention mechanism;
            # add only to last cell
            attention_mechanism = seq2seq.LuongAttention(
                num_units=self.hidden_size, memory=encoder_outputs,
                memory_sequence_length=sequence_length, name='attn')
            cells[-1] = seq2seq.AttentionWrapper(
                cells[-1], attention_mechanism, attention_layer_size=self.hidden_size,
                initial_cell_state=encoder_final_state[-1],
                cell_input_fn=lambda inp, attn: tf.layers.dense(tf.concat([inp, attn], -1), self.hidden_size),
                name='attnwrap'
            )

            # Copy encoder final state as decoder initial state
            decoder_initial_state = [s for s in encoder_final_state]

            # Set last initial state to be AttentionWrapperState
            batch_size = self.batch_size
            if beam_search: batch_size = self.batch_size * self.beam_width
            decoder_initial_state[-1] = cells[-1].zero_state(
                dtype=tf.float32, batch_size=batch_size)

            # Wrap up the cells
            cell = rnn.MultiRNNCell(cells)

            # Return initial state as a tuple
            # (required by tensorflow)
            return cell, tuple(decoder_initial_state) 
開發者ID:frnsys,項目名稱:retrosynthesis_planner,代碼行數:47,代碼來源:seq2seq.py


注:本文中的tensorflow.contrib.seq2seq.LuongAttention方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。