当前位置: 首页>>代码示例>>Python>>正文


Python seq2seq.BeamSearchDecoder方法代码示例

本文整理汇总了Python中tensorflow.contrib.seq2seq.BeamSearchDecoder方法的典型用法代码示例。如果您正苦于以下问题:Python seq2seq.BeamSearchDecoder方法的具体用法?Python seq2seq.BeamSearchDecoder怎么用?Python seq2seq.BeamSearchDecoder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.seq2seq的用法示例。


在下文中一共展示了seq2seq.BeamSearchDecoder方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _make_predict

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BeamSearchDecoder [as 别名]
def _make_predict(self, decoder_cell, decoder_initial_state):
        # Access embeddings directly
        with tf.variable_scope('embed', reuse=True):
            embeddings = tf.get_variable('embeddings')

        # Assume 0 is the START token
        start_tokens = tf.zeros((self.batch_size,), dtype=tf.int32)

        # For predictions, we use beam search to return multiple results
        with tf.variable_scope('decode', reuse=True):
            # Project to correct dimensions
            out_proj = tf.layers.Dense(self.vocab_size, name='output_proj')
            embeddings = tf.layers.dense(embeddings, self.hidden_size, name='input_proj')

            decoder = seq2seq.BeamSearchDecoder(
                cell=decoder_cell,
                embedding=embeddings,
                start_tokens=start_tokens,
                end_token=END,
                initial_state=decoder_initial_state,
                beam_width=self.beam_width,
                output_layer=out_proj
            )

            final_outputs, final_state, final_sequence_lengths = seq2seq.dynamic_decode(
                decoder=decoder, impute_finished=False, maximum_iterations=self.max_decode_iter)

        # Swap axes for an order that makes more sense (to me)
        # such that we have [batch_size, beam_width, T], i.e.
        # each row is a output sequence
        return tf.transpose(final_outputs.predicted_ids, [0,2,1]) 
开发者ID:frnsys,项目名称:retrosynthesis_planner,代码行数:33,代码来源:seq2seq.py

示例2: _build_decoder

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BeamSearchDecoder [as 别名]
def _build_decoder(self, decoder_cell, batch_size):
    embedding_fn = functools.partial(tf.one_hot, depth=self.num_classes)
    output_layer = tf.layers.Dense(
      self.num_classes,
      activation=None,
      use_bias=True,
      kernel_initializer=tf.variance_scaling_initializer(),
      bias_initializer=tf.zeros_initializer())
    if self._is_training:
      train_helper = seq2seq.TrainingHelper(
        embedding_fn(self._groundtruth_dict['decoder_inputs']),
        sequence_length=self._groundtruth_dict['decoder_lengths'],
        time_major=False)
      decoder = seq2seq.BasicDecoder(
        cell=decoder_cell,
        helper=train_helper,
        initial_state=decoder_cell.zero_state(batch_size, tf.float32),
        output_layer=output_layer)
    else:
      decoder = seq2seq.BeamSearchDecoder(
        cell=decoder_cell,
        embedding=embedding_fn,
        start_tokens=tf.fill([batch_size], self.start_label),
        end_token=self.end_label,
        initial_state=decoder_cell.zero_state(batch_size * self._beam_width, tf.float32),
        beam_width=self._beam_width,
        output_layer=output_layer,
        length_penalty_weight=0.0)
    return decoder 
开发者ID:bgshih,项目名称:aster,代码行数:31,代码来源:attention_predictor.py

示例3: _build_decoder_test_beam_search

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BeamSearchDecoder [as 别名]
def _build_decoder_test_beam_search(self):
        r"""
        Builds a beam search test decoder
        """
        if self._hparams.enable_attention is True:
            cells, initial_state = add_attention(
                cells=self._decoder_cells,
                attention_types=self._hparams.attention_type[1],
                num_units=self._hparams.decoder_units_per_layer[-1],
                memory=self._encoder_memory,
                memory_len=self._encoder_features_len,
                beam_search=True,
                batch_size=self._batch_size,
                beam_width=self._hparams.beam_width,
                initial_state=self._decoder_initial_state,
                mode=self._mode,
                dtype=self._hparams.dtype,
                fusion_type='linear_fusion',
                write_attention_alignment=self._hparams.write_attention_alignment)
        else:  # does the non-attentive beam decoder need tile_batch ?
            cells = self._decoder_cells

            decoder_initial_state_tiled = seq2seq.tile_batch(  # guess so ? it compiles without it too
                self._decoder_initial_state, multiplier=self._hparams.beam_width)
            initial_state = decoder_initial_state_tiled

        self._decoder_inference = seq2seq.BeamSearchDecoder(
            cell=cells,
            embedding=self._embedding_matrix,
            start_tokens=array_ops.fill([self._batch_size], self._GO_ID),
            end_token=self._EOS_ID,
            initial_state=initial_state,
            beam_width=self._hparams.beam_width,
            output_layer=self._dense_layer,
            length_penalty_weight=0.6,
        )

        outputs, states, lengths = seq2seq.dynamic_decode(
            self._decoder_inference,
            impute_finished=False,
            maximum_iterations=self._hparams.max_label_length,
            swap_memory=False)

        if self._hparams.write_attention_alignment is True:
            self.attention_summary, self.attention_alignment = self._create_attention_alignments_summary(states)

        self.inference_outputs = outputs.beam_search_decoder_output
        self.inference_predicted_ids = outputs.predicted_ids[:, :, 0]  # return the first beam
        self.inference_predicted_beam = outputs.predicted_ids
        self.beam_search_output = outputs.beam_search_decoder_output 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:52,代码来源:decoder_unimodal.py

示例4: _build_decoder_beam_search

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BeamSearchDecoder [as 别名]
def _build_decoder_beam_search(self):

        batch_size, _ = tf.unstack(tf.shape(self._labels))

        attention_mechanisms, layer_sizes = self._create_attention_mechanisms(beam_search=True)

        decoder_initial_state_tiled = seq2seq.tile_batch(
            self._decoder_initial_state, multiplier=self._hparams.beam_width)

        if self._hparams.enable_attention is True:

            attention_cells = seq2seq.AttentionWrapper(
                cell=self._decoder_cells,
                attention_mechanism=attention_mechanisms,
                attention_layer_size=layer_sizes,
                initial_cell_state=decoder_initial_state_tiled,
                alignment_history=self._hparams.write_attention_alignment,
                output_attention=self._output_attention)

            initial_state = attention_cells.zero_state(
                dtype=self._hparams.dtype, batch_size=batch_size * self._hparams.beam_width)

            initial_state = initial_state.clone(
                cell_state=decoder_initial_state_tiled)

            cells = attention_cells
        else:
            cells = self._decoder_cells
            initial_state = decoder_initial_state_tiled

        self._decoder_inference = seq2seq.BeamSearchDecoder(
            cell=cells,
            embedding=self._embedding_matrix,
            start_tokens=array_ops.fill([batch_size], self._GO_ID),
            end_token=self._EOS_ID,
            initial_state=initial_state,
            beam_width=self._hparams.beam_width,
            output_layer=self._dense_layer,
            length_penalty_weight=0.5,
        )

        outputs, states, lengths = seq2seq.dynamic_decode(
            self._decoder_inference,
            impute_finished=False,
            maximum_iterations=self._hparams.max_label_length,
            swap_memory=False)

        if self._hparams.write_attention_alignment is True:
            self.attention_summary = self._create_attention_alignments_summary(states)

        self.inference_outputs = outputs.beam_search_decoder_output
        self.inference_predicted_ids = outputs.predicted_ids[:, :, 0]  # return the first beam
        self.inference_predicted_beam = outputs.predicted_ids
        self.beam_search_output = outputs.beam_search_decoder_output 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:56,代码来源:decoder_bimodal.py


注:本文中的tensorflow.contrib.seq2seq.BeamSearchDecoder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。