当前位置: 首页>>代码示例>>Python>>正文


Python seq2seq.BahdanauAttention方法代码示例

本文整理汇总了Python中tensorflow.contrib.seq2seq.BahdanauAttention方法的典型用法代码示例。如果您正苦于以下问题:Python seq2seq.BahdanauAttention方法的具体用法?Python seq2seq.BahdanauAttention怎么用?Python seq2seq.BahdanauAttention使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.seq2seq的用法示例。


在下文中一共展示了seq2seq.BahdanauAttention方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build_attention_mechanism

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def _build_attention_mechanism(self, feature_maps):
    """Build (possibly multiple) attention mechanisms."""
    def _build_single_attention_mechanism(memory):
      if not self._is_training:
        memory = seq2seq.tile_batch(memory, multiplier=self._beam_width)
      return seq2seq.BahdanauAttention(
        self._num_attention_units,
        memory,
        memory_sequence_length=None
      )
    
    feature_sequences = [tf.squeeze(map, axis=1) for map in feature_maps]
    if self._multi_attention:
      attention_mechanism = []
      for i, feature_sequence in enumerate(feature_sequences):
        memory = feature_sequence
        attention_mechanism.append(_build_single_attention_mechanism(memory))
    else:
      memory = tf.concat(feature_sequences, axis=1)
      attention_mechanism = _build_single_attention_mechanism(memory)
    return attention_mechanism 
开发者ID:bgshih,项目名称:aster,代码行数:23,代码来源:attention_predictor.py

示例2: __init__

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def __init__(self,
                 encoder_outputs,
                 base_cell,
                 state_size,
                 vocab_size,
                 embed_size,
                 attention_mechanism='BahdanauAttention',
                 dropout_prob=1.0,
                 num_layers=1,
                 temperature=0.0,
                 max_seq_len=10):
        """We need to explicitly call the constructor now, so we can:
           - Specify we need the state wrapped in AttentionWrapperState.
           - Specify our attention mechanism (will allow customization soon).
        """

        super(AttentionDecoder, self).__init__(
            encoder_outputs=encoder_outputs,
            base_cell=base_cell,
            state_size=state_size,
            vocab_size=vocab_size,
            embed_size=embed_size,
            dropout_prob=dropout_prob,
            num_layers=num_layers,
            temperature=temperature,
            max_seq_len=max_seq_len,
            state_wrapper=AttentionWrapperState)

        _mechanism = getattr(tf.contrib.seq2seq, attention_mechanism)
        self.attention_mechanism = _mechanism(num_units=state_size,
                                              memory=encoder_outputs)
        self.output_attention = True 
开发者ID:mckinziebrandon,项目名称:DeepChatModels,代码行数:34,代码来源:decoders.py

示例3: _create_cell

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def _create_cell(self, rnn_enc_tensor, src_len, hsz, pdrop, rnntype='lstm', layers=1, vdrop=False, **kwargs):
        cell = multi_rnn_cell_w_dropout(hsz, pdrop, rnntype, layers, variational=vdrop, training=TRAIN_FLAG())
        if self.beam_width > 1:
            # Expand the encoded tensor for all beam entries
            rnn_enc_tensor = tf.contrib.seq2seq.tile_batch(rnn_enc_tensor, multiplier=self.beam_width)
            src_len = tf.contrib.seq2seq.tile_batch(src_len, multiplier=self.beam_width)
        GlobalAttention = tfcontrib_seq2seq.LuongAttention if self.attn_type == 'luong' else tfcontrib_seq2seq.BahdanauAttention
        attn_mech = GlobalAttention(hsz, rnn_enc_tensor, src_len)
        return tf.contrib.seq2seq.AttentionWrapper(cell, attn_mech, self.hsz, name='dyn_attn_cell') 
开发者ID:dpressel,项目名称:mead-baseline,代码行数:11,代码来源:v1.py

示例4: create_attention_mechanisms

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def create_attention_mechanisms(num_units, attention_types, mode, dtype, beam_search=False, beam_width=None, memory=None, memory_len=None, fusion_type=None):
    r"""
    Creates a list of attention mechanisms (e.g. seq2seq.BahdanauAttention)
    and also a list of ints holding the attention projection layer size
    Args:
        beam_search: `bool`, whether the beam-search decoding algorithm is used or not
    """
    mechanisms = []
    output_attention = None

    if beam_search is True:
        memory = seq2seq.tile_batch(
            memory, multiplier=beam_width)

        memory_len = seq2seq.tile_batch(
            memory_len, multiplier=beam_width)

    for attention_type in attention_types:
        attention, output_attention = create_attention_mechanism(
            num_units=num_units,  # has to match decoder's state(query) size
            memory=memory,
            memory_sequence_length=memory_len,
            attention_type=attention_type,
            mode=mode,
            dtype=dtype,
        )
        mechanisms.append(attention)

    N = len(attention_types)
    if fusion_type == 'deep_fusion':
        attention_layer_sizes = None
        attention_layers = [AttentionLayers(units=num_units, dtype=dtype) for _ in range(N)]
    elif fusion_type == 'linear_fusion':
        attention_layer_sizes = [num_units, ] * N
        attention_layers = None
    else:
        raise Exception('Unknown fusion type')

    return mechanisms, attention_layers, attention_layer_sizes, output_attention 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:41,代码来源:attention.py

示例5: _create_decoder_cell

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def _create_decoder_cell(self):
        enc_outputs, enc_states, enc_seq_len = self.enc_outputs, self.enc_states, self.enc_seq_len
        batch_size = self.batch_size * self.cfg.beam_size if self.use_beam_search else self.batch_size
        with tf.variable_scope("attention"):
            if self.cfg.attention == "luong":  # Luong attention mechanism
                attention_mechanism = LuongAttention(num_units=self.cfg.num_units, memory=enc_outputs,
                                                     memory_sequence_length=enc_seq_len)
            else:  # default using Bahdanau attention mechanism
                attention_mechanism = BahdanauAttention(num_units=self.cfg.num_units, memory=enc_outputs,
                                                        memory_sequence_length=enc_seq_len)

        def cell_input_fn(inputs, attention):  # define cell input function to keep input/output dimension same
            # reference: https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/AttentionWrapper
            if not self.cfg.use_attention_input_feeding:
                return inputs
            input_project = tf.layers.Dense(self.cfg.num_units, dtype=tf.float32, name='attn_input_feeding')
            return input_project(tf.concat([inputs, attention], axis=-1))

        if self.cfg.top_attention:  # apply attention mechanism only on the top decoder layer
            cells = [self._create_rnn_cell() for _ in range(self.cfg.num_layers)]
            cells[-1] = AttentionWrapper(cells[-1], attention_mechanism=attention_mechanism, name="Attention_Wrapper",
                                         attention_layer_size=self.cfg.num_units, initial_cell_state=enc_states[-1],
                                         cell_input_fn=cell_input_fn)
            initial_state = [state for state in enc_states]
            initial_state[-1] = cells[-1].zero_state(batch_size=batch_size, dtype=tf.float32)
            dec_init_states = tuple(initial_state)
            cells = MultiRNNCell(cells)
        else:
            cells = MultiRNNCell([self._create_rnn_cell() for _ in range(self.cfg.num_layers)])
            cells = AttentionWrapper(cells, attention_mechanism=attention_mechanism, name="Attention_Wrapper",
                                     attention_layer_size=self.cfg.num_units, initial_cell_state=enc_states,
                                     cell_input_fn=cell_input_fn)
            dec_init_states = cells.zero_state(batch_size=batch_size, dtype=tf.float32).clone(cell_state=enc_states)
        return cells, dec_init_states 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:36,代码来源:seq2seq_model.py

示例6: AttentionRNNV1

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def AttentionRNNV1(num_units, prenets: Tuple[PreNet],
                   memory, memory_sequence_length, gru_impl=GRUImpl.GRUCell, dtype=None):
    rnn_cell = gru_cell_factory(gru_impl, num_units)
    attention_mechanism = BahdanauAttention(num_units, memory, memory_sequence_length, dtype=dtype)
    return AttentionRNN(rnn_cell, prenets, attention_mechanism, dtype=dtype) 
开发者ID:nii-yamagishilab,项目名称:tacotron2,代码行数:7,代码来源:tacotron_v1.py

示例7: get

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def get(attention_type, num_units, memory, memory_sequence_length,
        scope=None, reuse=None):
  """Returns attention mechanism according to the specified type."""
  with tf.variable_scope(scope, reuse=reuse):
    if attention_type == U.ATT_LUONG:
      attention_mechanism = contrib_seq2seq.LuongAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length)
    elif attention_type == U.ATT_LUONG_SCALED:
      attention_mechanism = contrib_seq2seq.LuongAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length,
          scale=True)
    elif attention_type == U.ATT_BAHDANAU:
      attention_mechanism = contrib_seq2seq.BahdanauAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length)
    elif attention_type == U.ATT_BAHDANAU_NORM:
      attention_mechanism = contrib_seq2seq.BahdanauAttention(
          num_units=num_units,
          memory=memory,
          memory_sequence_length=memory_sequence_length,
          normalize=True)
    else:
      raise ValueError("Unknown attention type: %s" % attention_type)
  return attention_mechanism 
开发者ID:google-research,项目名称:language,代码行数:31,代码来源:attention_mechanisms.py

示例8: build_attention_mechanism

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def build_attention_mechanism(self):
        if self.hparams.attention_type == 'luong':
            attention_mechanism = seq2seq.LuongAttention(
                self.hparams.hidden_units, self.feedforward_inputs, self.feedforward_inputs_length)
        elif self.hparams.attention_type == 'bahdanau':
            attention_mechanism = seq2seq.BahdanauAttention(
                self.hparams.hidden_units, self.feedforward_inputs, self.feedforward_inputs_length,)
        else:
            raise ValueError(
                "Currently, the only supported attention types are 'luong' and 'bahdanau'.") 
开发者ID:microsoft,项目名称:icecaps,代码行数:12,代码来源:seq2seq_decoder_estimator.py

示例9: __init__

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def __init__(self,
                 num_units,
                 memory,
                 memory_sequence_length,
                 teacher_alignments,
                 name="BahdanauAttention"):
        super(TeacherForcingAdditiveAttention, self).__init__(
            num_units=num_units,
            memory=memory,
            memory_sequence_length=memory_sequence_length,
            probability_fn=None,
            name=name)
        self.teacher_alignments = teacher_alignments 
开发者ID:nii-yamagishilab,项目名称:self-attention-tacotron,代码行数:15,代码来源:teacher_forcing_attention.py

示例10: attention_mechanism_factory

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BahdanauAttention [as 别名]
def attention_mechanism_factory(options: AttentionOptions):
    def attention_fn(memory, memory_sequence_length, teacher_alignments=None):
        if options.attention == "forward":
            mechanism = ForwardAttention(num_units=options.num_units,
                                         memory=memory,
                                         memory_sequence_length=memory_sequence_length,
                                         attention_kernel=options.attention_kernel,
                                         attention_filters=options.attention_filters,
                                         use_transition_agent=options.use_transition_agent,
                                         cumulative_weights=options.cumulative_weights)
        elif options.attention == "location_sensitive":
            mechanism = LocationSensitiveAttention(num_units=options.num_units,
                                                   memory=memory,
                                                   memory_sequence_length=memory_sequence_length,
                                                   attention_kernel=options.attention_kernel,
                                                   attention_filters=options.attention_filters,
                                                   smoothing=options.smoothing,
                                                   cumulative_weights=options.cumulative_weights)
        elif options.attention == "teacher_forcing_forward":
            mechanism = TeacherForcingForwardAttention(num_units=options.num_units,
                                                       memory=memory,
                                                       memory_sequence_length=memory_sequence_length,
                                                       teacher_alignments=teacher_alignments)
        elif options.attention == "teacher_forcing_additive":
            mechanism = TeacherForcingAdditiveAttention(num_units=options.num_units,
                                                        memory=memory,
                                                        memory_sequence_length=memory_sequence_length,
                                                        teacher_alignments=teacher_alignments)
        elif options.attention == "additive":
            mechanism = BahdanauAttention(num_units=options.num_units,
                                          memory=memory,
                                          memory_sequence_length=memory_sequence_length,
                                          dtype=memory.dtype)
        else:
            raise ValueError(f"Unknown attention mechanism: {options.attention}")
        return mechanism

    return attention_fn 
开发者ID:nii-yamagishilab,项目名称:self-attention-tacotron,代码行数:40,代码来源:attentions.py


注:本文中的tensorflow.contrib.seq2seq.BahdanauAttention方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。