当前位置: 首页>>代码示例>>Python>>正文


Python seq2seq.tile_batch方法代码示例

本文整理汇总了Python中tensorflow.contrib.seq2seq.tile_batch方法的典型用法代码示例。如果您正苦于以下问题:Python seq2seq.tile_batch方法的具体用法?Python seq2seq.tile_batch怎么用?Python seq2seq.tile_batch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.seq2seq的用法示例。


在下文中一共展示了seq2seq.tile_batch方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_basic_rnn_decoder_given_initial_state

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def test_basic_rnn_decoder_given_initial_state(self):
        """Tests beam search with BasicRNNDecoder given initial state.
        """
        hparams = {
            "rnn_cell": {
                "kwargs": {"num_units": self._cell_dim}
            }
        }
        decoder = tx.modules.BasicRNNDecoder(
            vocab_size=self._vocab_size,
            hparams=hparams)

        # (zhiting): The beam search decoder does not generate max-length
        # samples if only one cell_state is created. Perhaps due to
        # random seed or bugs?
        cell_state = decoder.cell.zero_state(self._batch_size, tf.float32)

        self._test_beam_search(decoder, initial_state=cell_state)

        tiled_cell_state = tile_batch(cell_state, multiplier=self._beam_width)
        self._test_beam_search(
            decoder, tiled_initial_state=tiled_cell_state, initiated=True) 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:24,代码来源:beam_search_decode_test.py

示例2: _build_attention_mechanism

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _build_attention_mechanism(self, feature_maps):
    """Build (possibly multiple) attention mechanisms."""
    def _build_single_attention_mechanism(memory):
      if not self._is_training:
        memory = seq2seq.tile_batch(memory, multiplier=self._beam_width)
      return seq2seq.BahdanauAttention(
        self._num_attention_units,
        memory,
        memory_sequence_length=None
      )
    
    feature_sequences = [tf.squeeze(map, axis=1) for map in feature_maps]
    if self._multi_attention:
      attention_mechanism = []
      for i, feature_sequence in enumerate(feature_sequences):
        memory = feature_sequence
        attention_mechanism.append(_build_single_attention_mechanism(memory))
    else:
      memory = tf.concat(feature_sequences, axis=1)
      attention_mechanism = _build_single_attention_mechanism(memory)
    return attention_mechanism 
开发者ID:bgshih,项目名称:aster,代码行数:23,代码来源:attention_predictor.py

示例3: build_attention_wrapper

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def build_attention_wrapper(self, final_cell):
        self.feedforward_inputs = tf.cond(
            self.beam_search_decoding, lambda: seq2seq.tile_batch(
                self.features["inputs"], multiplier=self.hparams.beam_width),
            lambda: self.features["inputs"])
        self.feedforward_inputs_length = tf.cond(
            self.beam_search_decoding, lambda: seq2seq.tile_batch(
                self.features["length"], multiplier=self.hparams.beam_width),
            lambda: self.features["length"])
        attention_mechanism = self.build_attention_mechanism()
        return AttentionWrapper(
            cell=final_cell,
            attention_mechanism=attention_mechanism,
            attention_layer_size=self.hparams.hidden_units,
            cell_input_fn=self._attention_input_feeding,
            initial_cell_state=self.initial_state[-1] if self.hparams.depth > 1 else self.initial_state) 
开发者ID:microsoft,项目名称:icecaps,代码行数:18,代码来源:seq2seq_decoder_estimator.py

示例4: build_rnn

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def build_rnn(self):
        self.initial_state = tf.cond(
            self.beam_search_decoding, lambda: seq2seq.tile_batch(
                self.features["state"], self.hparams.beam_width),
            lambda: self.features["state"], name="initial_state")
        self.build_embeddings()
        cell_list = self.build_deep_cell(return_raw_list=True)
        if self.hparams.use_attention:
            cell_list[-1] = self.build_attention(cell_list[-1])
            if self.hparams.depth > 1:
                self.initial_state[-1] = final_cell.zero_state(batch_size=self.batch_size)
            else:
                self.initial_state = final_cell.zero_state(batch_size=self.batch_size)
        with tf.name_scope('rnn_cell'):
            self.cell = self.build_deep_cell(cell_list)
        self.output_layer = Dense(self.vocab.size(), name='output_layer') 
开发者ID:microsoft,项目名称:icecaps,代码行数:18,代码来源:seq2seq_decoder_estimator.py

示例5: _get_beam_search_cell

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _get_beam_search_cell(self, beam_width):
        """Returns the RNN cell for beam search decoding.
        """
        with tf.variable_scope(self.variable_scope, reuse=True):
            attn_kwargs = copy.copy(self._attn_kwargs)

            memory = attn_kwargs['memory']
            attn_kwargs['memory'] = tile_batch(memory, multiplier=beam_width)

            memory_seq_length = attn_kwargs['memory_sequence_length']
            if memory_seq_length is not None:
                attn_kwargs['memory_sequence_length'] = tile_batch(
                    memory_seq_length, beam_width)

            attn_modules = ['tensorflow.contrib.seq2seq', 'texar.custom']
            bs_attention_mechanism = utils.check_or_get_instance(
                self._hparams.attention.type, attn_kwargs, attn_modules,
                classtype=tf.contrib.seq2seq.AttentionMechanism)

            bs_attn_cell = AttentionWrapper(
                self._cell._cell,
                bs_attention_mechanism,
                cell_input_fn=self._cell_input_fn,
                **self._attn_cell_kwargs)

            self._beam_search_cell = bs_attn_cell

            return bs_attn_cell 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:30,代码来源:rnn_decoders.py

示例6: test_attention_decoder_given_initial_state

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def test_attention_decoder_given_initial_state(self):
        """Tests beam search with RNNAttentionDecoder given initial state.
        """
        seq_length = np.random.randint(
            self._max_time, size=[self._batch_size]) + 1
        encoder_values_length = tf.constant(seq_length)
        hparams = {
            "attention": {
                "kwargs": {"num_units": self._attention_dim}
            },
            "rnn_cell": {
                "kwargs": {"num_units": self._cell_dim}
            }
        }
        decoder = tx.modules.AttentionRNNDecoder(
            vocab_size=self._vocab_size,
            memory=self._encoder_output,
            memory_sequence_length=encoder_values_length,
            hparams=hparams)

        state = decoder.cell.zero_state(self._batch_size, tf.float32)

        cell_state = state.cell_state
        self._test_beam_search(decoder, initial_state=cell_state)

        tiled_cell_state = tile_batch(cell_state, multiplier=self._beam_width)
        self._test_beam_search(
            decoder, tiled_initial_state=tiled_cell_state, initiated=True) 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:30,代码来源:beam_search_decode_test.py

示例7: create_attention_mechanisms

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def create_attention_mechanisms(num_units, attention_types, mode, dtype, beam_search=False, beam_width=None, memory=None, memory_len=None, fusion_type=None):
    r"""
    Creates a list of attention mechanisms (e.g. seq2seq.BahdanauAttention)
    and also a list of ints holding the attention projection layer size
    Args:
        beam_search: `bool`, whether the beam-search decoding algorithm is used or not
    """
    mechanisms = []
    output_attention = None

    if beam_search is True:
        memory = seq2seq.tile_batch(
            memory, multiplier=beam_width)

        memory_len = seq2seq.tile_batch(
            memory_len, multiplier=beam_width)

    for attention_type in attention_types:
        attention, output_attention = create_attention_mechanism(
            num_units=num_units,  # has to match decoder's state(query) size
            memory=memory,
            memory_sequence_length=memory_len,
            attention_type=attention_type,
            mode=mode,
            dtype=dtype,
        )
        mechanisms.append(attention)

    N = len(attention_types)
    if fusion_type == 'deep_fusion':
        attention_layer_sizes = None
        attention_layers = [AttentionLayers(units=num_units, dtype=dtype) for _ in range(N)]
    elif fusion_type == 'linear_fusion':
        attention_layer_sizes = [num_units, ] * N
        attention_layers = None
    else:
        raise Exception('Unknown fusion type')

    return mechanisms, attention_layers, attention_layer_sizes, output_attention 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:41,代码来源:attention.py

示例8: _get_beam_search_cell

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _get_beam_search_cell(self, beam_width):
        """Returns the RNN cell for beam search decoding.
        """
        with tf.variable_scope(self.variable_scope, reuse=True):
            attn_kwargs = copy.copy(self._attn_kwargs)

            memory = attn_kwargs['memory']
            attn_kwargs['memory'] = tile_batch(memory, multiplier=beam_width)

            memory_seq_length = attn_kwargs['memory_sequence_length']
            if memory_seq_length is not None:
                attn_kwargs['memory_sequence_length'] = tile_batch(
                    memory_seq_length, beam_width)

            attn_modules = ['tensorflow.contrib.seq2seq', 'texar.tf.custom']
            bs_attention_mechanism = utils.check_or_get_instance(
                self._hparams.attention.type, attn_kwargs, attn_modules,
                classtype=tf.contrib.seq2seq.AttentionMechanism)

            bs_attn_cell = AttentionWrapper(
                self._cell._cell,
                bs_attention_mechanism,
                cell_input_fn=self._cell_input_fn,
                **self._attn_cell_kwargs)

            self._beam_search_cell = bs_attn_cell

            return bs_attn_cell 
开发者ID:asyml,项目名称:texar,代码行数:30,代码来源:rnn_decoders.py

示例9: _make_decoder

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _make_decoder(self, encoder_outputs, encoder_final_state, beam_search=False, reuse=False):
        """Create decoder"""
        with tf.variable_scope('decode', reuse=reuse):
            # Create decoder cells
            cells = [self._make_cell() for _ in range(self.depth)]

            if beam_search:
                # Tile inputs as needed for beam search
                encoder_outputs = seq2seq.tile_batch(
                    encoder_outputs, multiplier=self.beam_width)
                encoder_final_state = nest.map_structure(
                    lambda s: seq2seq.tile_batch(s, multiplier=self.beam_width),
                    encoder_final_state)
                sequence_length = seq2seq.tile_batch(
                    self.sequence_length, multiplier=self.beam_width)
            else:
                sequence_length = self.sequence_length

            # Prepare attention mechanism;
            # add only to last cell
            attention_mechanism = seq2seq.LuongAttention(
                num_units=self.hidden_size, memory=encoder_outputs,
                memory_sequence_length=sequence_length, name='attn')
            cells[-1] = seq2seq.AttentionWrapper(
                cells[-1], attention_mechanism, attention_layer_size=self.hidden_size,
                initial_cell_state=encoder_final_state[-1],
                cell_input_fn=lambda inp, attn: tf.layers.dense(tf.concat([inp, attn], -1), self.hidden_size),
                name='attnwrap'
            )

            # Copy encoder final state as decoder initial state
            decoder_initial_state = [s for s in encoder_final_state]

            # Set last initial state to be AttentionWrapperState
            batch_size = self.batch_size
            if beam_search: batch_size = self.batch_size * self.beam_width
            decoder_initial_state[-1] = cells[-1].zero_state(
                dtype=tf.float32, batch_size=batch_size)

            # Wrap up the cells
            cell = rnn.MultiRNNCell(cells)

            # Return initial state as a tuple
            # (required by tensorflow)
            return cell, tuple(decoder_initial_state) 
开发者ID:frnsys,项目名称:retrosynthesis_planner,代码行数:47,代码来源:seq2seq.py

示例10: _build_decoder_test_beam_search

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _build_decoder_test_beam_search(self):
        r"""
        Builds a beam search test decoder
        """
        if self._hparams.enable_attention is True:
            cells, initial_state = add_attention(
                cells=self._decoder_cells,
                attention_types=self._hparams.attention_type[1],
                num_units=self._hparams.decoder_units_per_layer[-1],
                memory=self._encoder_memory,
                memory_len=self._encoder_features_len,
                beam_search=True,
                batch_size=self._batch_size,
                beam_width=self._hparams.beam_width,
                initial_state=self._decoder_initial_state,
                mode=self._mode,
                dtype=self._hparams.dtype,
                fusion_type='linear_fusion',
                write_attention_alignment=self._hparams.write_attention_alignment)
        else:  # does the non-attentive beam decoder need tile_batch ?
            cells = self._decoder_cells

            decoder_initial_state_tiled = seq2seq.tile_batch(  # guess so ? it compiles without it too
                self._decoder_initial_state, multiplier=self._hparams.beam_width)
            initial_state = decoder_initial_state_tiled

        self._decoder_inference = seq2seq.BeamSearchDecoder(
            cell=cells,
            embedding=self._embedding_matrix,
            start_tokens=array_ops.fill([self._batch_size], self._GO_ID),
            end_token=self._EOS_ID,
            initial_state=initial_state,
            beam_width=self._hparams.beam_width,
            output_layer=self._dense_layer,
            length_penalty_weight=0.6,
        )

        outputs, states, lengths = seq2seq.dynamic_decode(
            self._decoder_inference,
            impute_finished=False,
            maximum_iterations=self._hparams.max_label_length,
            swap_memory=False)

        if self._hparams.write_attention_alignment is True:
            self.attention_summary, self.attention_alignment = self._create_attention_alignments_summary(states)

        self.inference_outputs = outputs.beam_search_decoder_output
        self.inference_predicted_ids = outputs.predicted_ids[:, :, 0]  # return the first beam
        self.inference_predicted_beam = outputs.predicted_ids
        self.beam_search_output = outputs.beam_search_decoder_output 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:52,代码来源:decoder_unimodal.py

示例11: add_attention

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def add_attention(
        cells,
        attention_types,
        num_units,
        memory,
        memory_len,
        mode,
        batch_size,
        dtype,
        beam_search=False,
        beam_width=None,
        initial_state=None,
        write_attention_alignment=False,
        fusion_type='linear_fusion',
):
    r"""
    Wraps the decoder_cells with an AttentionWrapper
    Args:
        cells: instances of `RNNCell`
        beam_search: `bool` flag for beam search decoders
        batch_size: `Tensor` containing the batch size. Necessary to the initialisation of the initial state

    Returns:
        attention_cells: the Attention wrapped decoder cells
        initial_state: a proper initial state to be used with the returned cells
    """
    attention_mechanisms, attention_layers, attention_layer_sizes, output_attention = create_attention_mechanisms(
        beam_search=beam_search,
        beam_width=beam_width,
        memory=memory,
        memory_len=memory_len,
        num_units=num_units,
        attention_types=attention_types,
        fusion_type=fusion_type,
        mode=mode,
        dtype=dtype)

    if beam_search is True:
        initial_state= seq2seq.tile_batch(
            initial_state, multiplier=beam_width)

    attention_cells = seq2seq.AttentionWrapper(
        cell=cells,
        attention_mechanism=attention_mechanisms,
        attention_layer_size=attention_layer_sizes,
        # initial_cell_state=decoder_initial_state,
        alignment_history=write_attention_alignment,
        output_attention=output_attention,
        attention_layer=attention_layers,
    )

    attn_zero = attention_cells.zero_state(
        dtype=dtype,
        batch_size=batch_size * beam_width if beam_search is True else batch_size)

    if initial_state is not None:
        initial_state = attn_zero.clone(
            cell_state=initial_state)

    return attention_cells, initial_state 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:62,代码来源:attention.py

示例12: _create_attention_mechanisms

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _create_attention_mechanisms(self, beam_search=False):

        mechanisms = []
        layer_sizes = []

        if self._video_memory is not None:

            if beam_search is True:
                #  TODO potentially broken, please re-check
                self._video_memory = seq2seq.tile_batch(
                    self._video_memory, multiplier=self._hparams.beam_width)

                self._video_features_len = seq2seq.tile_batch(
                    self._video_features_len, multiplier=self._hparams.beam_width)

            for attention_type in self._hparams.attention_type[0]:

                attention_video = self._create_attention_mechanism(
                    num_units=self._hparams.decoder_units_per_layer[-1],
                    memory=self._video_memory,
                    memory_sequence_length=self._video_features_len,
                    attention_type=attention_type
                )
                mechanisms.append(attention_video)
                layer_sizes.append(self._hparams.decoder_units_per_layer[-1])

        if self._audio_memory is not None:

            if beam_search is True:
                #  TODO potentially broken, please re-check
                self._audio_memory = seq2seq.tile_batch(
                    self._audio_memory, multiplier=self._hparams.beam_width)

                self._audio_features_len = seq2seq.tile_batch(
                    self._audio_features_len, multiplier=self._hparams.beam_width)

            for attention_type in self._hparams.attention_type[1]:
                attention_audio = self._create_attention_mechanism(
                    num_units=self._hparams.decoder_units_per_layer[-1],
                    memory=self._audio_memory,
                    memory_sequence_length=self._audio_features_len,
                    attention_type=attention_type
                )
                mechanisms.append(attention_audio)
                layer_sizes.append(self._hparams.decoder_units_per_layer[-1])

        return mechanisms, layer_sizes 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:49,代码来源:decoder_bimodal.py

示例13: _build_decoder_beam_search

# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import tile_batch [as 别名]
def _build_decoder_beam_search(self):

        batch_size, _ = tf.unstack(tf.shape(self._labels))

        attention_mechanisms, layer_sizes = self._create_attention_mechanisms(beam_search=True)

        decoder_initial_state_tiled = seq2seq.tile_batch(
            self._decoder_initial_state, multiplier=self._hparams.beam_width)

        if self._hparams.enable_attention is True:

            attention_cells = seq2seq.AttentionWrapper(
                cell=self._decoder_cells,
                attention_mechanism=attention_mechanisms,
                attention_layer_size=layer_sizes,
                initial_cell_state=decoder_initial_state_tiled,
                alignment_history=self._hparams.write_attention_alignment,
                output_attention=self._output_attention)

            initial_state = attention_cells.zero_state(
                dtype=self._hparams.dtype, batch_size=batch_size * self._hparams.beam_width)

            initial_state = initial_state.clone(
                cell_state=decoder_initial_state_tiled)

            cells = attention_cells
        else:
            cells = self._decoder_cells
            initial_state = decoder_initial_state_tiled

        self._decoder_inference = seq2seq.BeamSearchDecoder(
            cell=cells,
            embedding=self._embedding_matrix,
            start_tokens=array_ops.fill([batch_size], self._GO_ID),
            end_token=self._EOS_ID,
            initial_state=initial_state,
            beam_width=self._hparams.beam_width,
            output_layer=self._dense_layer,
            length_penalty_weight=0.5,
        )

        outputs, states, lengths = seq2seq.dynamic_decode(
            self._decoder_inference,
            impute_finished=False,
            maximum_iterations=self._hparams.max_label_length,
            swap_memory=False)

        if self._hparams.write_attention_alignment is True:
            self.attention_summary = self._create_attention_alignments_summary(states)

        self.inference_outputs = outputs.beam_search_decoder_output
        self.inference_predicted_ids = outputs.predicted_ids[:, :, 0]  # return the first beam
        self.inference_predicted_beam = outputs.predicted_ids
        self.beam_search_output = outputs.beam_search_decoder_output 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:56,代码来源:decoder_bimodal.py


注:本文中的tensorflow.contrib.seq2seq.tile_batch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。