当前位置: 首页>>代码示例>>Python>>正文


Python rnn.stack_bidirectional_dynamic_rnn方法代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.python.ops.rnn.stack_bidirectional_dynamic_rnn方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.stack_bidirectional_dynamic_rnn方法的具体用法?Python rnn.stack_bidirectional_dynamic_rnn怎么用?Python rnn.stack_bidirectional_dynamic_rnn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.rnn.python.ops.rnn的用法示例。


在下文中一共展示了rnn.stack_bidirectional_dynamic_rnn方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: encode

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def encode(self, inputs, sequence_length, **kwargs):
    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])

    cells_fw = _unpack_cell(cell_fw)
    cells_bw = _unpack_cell(cell_bw)

    result = rnn.stack_bidirectional_dynamic_rnn(
        cells_fw=cells_fw,
        cells_bw=cells_bw,
        inputs=inputs,
        dtype=tf.float32,
        sequence_length=sequence_length,
        **kwargs)
    outputs_concat, _output_state_fw, _output_state_bw = result
    final_state = (_output_state_fw, _output_state_bw)
    return EncoderOutput(
        outputs=outputs_concat,
        final_state=final_state,
        attention_values=outputs_concat,
        attention_values_length=sequence_length) 
开发者ID:pandegroup,项目名称:reaction_prediction_seq2seq,代码行数:23,代码来源:rnn_encoder.py

示例2: build_net_aux

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def build_net_aux(self, inputs, lengths):
        outputs = tf.reshape(inputs, [self._config.batch_size, -1, self._config.input_size])
        # BLSTM layer
        with tf.variable_scope('blstm_aux'):
            def lstm_cell():
                if not self._infer and self._config.keep_prob < 1.0:
                    return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(self._config.aux_hidden_size), output_keep_prob=self._config.keep_prob)
                else:
                    return tf.contrib.rnn.BasicLSTMCell(self._config.aux_hidden_size)

            # tf.nn.rnn_cell.MultiRNNCell in r1.12
            lstm_fw_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(self._config.rnn_num_layers)], state_is_tuple=True)
            lstm_bw_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(self._config.rnn_num_layers)], state_is_tuple=True)
            lstm_fw_cell = self._unpack_cell(lstm_fw_cell)
            lstm_bw_cell = self._unpack_cell(lstm_bw_cell)
            outputs, fw_final_states, bw_final_states = rnn.stack_bidirectional_dynamic_rnn(cells_fw=lstm_fw_cell, cells_bw=lstm_bw_cell, inputs=outputs, dtype=tf.float32,
                         sequence_length=lengths)
            outputs = tf.reshape(outputs, [-1, 2*self._config.aux_hidden_size]) # transform blstm outputs into right output size

        with tf.variable_scope('layer2_aux'):
            weights2, biases2 = self._weight_and_bias(2*self._config.aux_hidden_size, self._config.aux_hidden_size)
            outputs = tf.nn.relu(tf.matmul(outputs, weights2) +  biases2)

        with tf.variable_scope('layer3_aux'):
            weights3, biases3 = self._weight_and_bias(self._config.aux_hidden_size, self._config.aux_output_size)
            outputs = tf.matmul(outputs, weights3) + biases3
            outputs = tf.reshape(outputs, [self._config.batch_size, -1, self._config.aux_output_size])
            # average over the frames to get the speaker embedding
            spk_embed = tf.reduce_sum(outputs, 1)/tf.reshape(tf.to_float(self._lengths_aux), (-1,1))

        return spk_embed 
开发者ID:xuchenglin28,项目名称:speaker_extraction,代码行数:33,代码来源:model.py

示例3: _unpack_cell

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def _unpack_cell(cell):
  """Unpack the cells because the stack_bidirectional_dynamic_rnn
  expects a list of cells, one per layer."""
  if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
    return cell._cells  #pylint: disable=W0212
  else:
    return [cell] 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:9,代码来源:rnn_encoder.py

示例4: encode

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])

    cells_fw = _unpack_cell(cell_fw)
    cells_bw = _unpack_cell(cell_bw)

    result = rnn.stack_bidirectional_dynamic_rnn(
        cells_fw=cells_fw,
        cells_bw=cells_bw,
        inputs=inputs,
        dtype=tf.float32,
        sequence_length=sequence_length,
        **kwargs)
    outputs_concat, _output_state_fw, _output_state_bw = result
    final_state = (_output_state_fw, _output_state_bw)
    return EncoderOutput(
        outputs=outputs_concat,
        final_state=final_state,
        attention_values=outputs_concat,
        attention_values_length=sequence_length) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:28,代码来源:rnn_encoder.py

示例5: _build_model_op

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def _build_model_op(self):
        with tf.variable_scope("bi_directional_rnn"):
            cell_fw = self._create_rnn_cell()
            cell_bw = self._create_rnn_cell()
            if self.cfg["use_stack_rnn"]:
                rnn_outs, *_ = stack_bidirectional_dynamic_rnn(cell_fw, cell_bw, self.word_emb, dtype=tf.float32,
                                                               sequence_length=self.seq_len)
            else:
                rnn_outs, *_ = bidirectional_dynamic_rnn(cell_fw, cell_bw, self.word_emb, sequence_length=self.seq_len,
                                                         dtype=tf.float32)
            rnn_outs = tf.concat(rnn_outs, axis=-1)
            rnn_outs = tf.layers.dropout(rnn_outs, rate=self.drop_rate, training=self.is_train)
            if self.cfg["use_residual"]:
                word_project = tf.layers.dense(self.word_emb, units=2 * self.cfg["num_units"], use_bias=False)
                rnn_outs = rnn_outs + word_project
            outputs = layer_normalize(rnn_outs) if self.cfg["use_layer_norm"] else rnn_outs
            print("rnn output shape: {}".format(outputs.get_shape().as_list()))

        if self.cfg["use_attention"] == "self_attention":
            with tf.variable_scope("self_attention"):
                attn_outs = multi_head_attention(outputs, outputs, self.cfg["num_heads"], self.cfg["attention_size"],
                                                 drop_rate=self.drop_rate, is_train=self.is_train)
                if self.cfg["use_residual"]:
                    attn_outs = attn_outs + outputs
                outputs = layer_normalize(attn_outs) if self.cfg["use_layer_norm"] else attn_outs
                print("self-attention output shape: {}".format(outputs.get_shape().as_list()))

        elif self.cfg["use_attention"] == "normal_attention":
            with tf.variable_scope("normal_attention"):
                context = tf.transpose(outputs, [1, 0, 2])
                p_context = tf.layers.dense(outputs, units=2 * self.cfg["num_units"], use_bias=False)
                p_context = tf.transpose(p_context, [1, 0, 2])
                attn_cell = AttentionCell(self.cfg["num_units"], context, p_context)  # time major based
                attn_outs, _ = dynamic_rnn(attn_cell, context, sequence_length=self.seq_len, time_major=True,
                                           dtype=tf.float32)
                outputs = tf.transpose(attn_outs, [1, 0, 2])
                print("attention output shape: {}".format(outputs.get_shape().as_list()))

        with tf.variable_scope("project"):
            self.logits = tf.layers.dense(outputs, units=self.tag_vocab_size, use_bias=True)
            print("logits shape: {}".format(self.logits.get_shape().as_list())) 
开发者ID:IsaacChanghau,项目名称:neural_sequence_labeling,代码行数:43,代码来源:blstm_cnn_crf_model.py

示例6: __call__

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.rnn import stack_bidirectional_dynamic_rnn [as 别名]
def __call__(self, inputs, seq_len):
        with tf.variable_scope(self.scope):
            output, *_ = stack_bidirectional_dynamic_rnn(self.cells_fw, self.cells_bw, inputs, sequence_length=seq_len,
                                                         dtype=tf.float32)
        return output 
开发者ID:IsaacChanghau,项目名称:Dense_BiLSTM,代码行数:7,代码来源:nns.py


注:本文中的tensorflow.contrib.rnn.python.ops.rnn.stack_bidirectional_dynamic_rnn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。