当前位置: 首页>>代码示例>>Python>>正文


Python rnn.static_bidirectional_rnn方法代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.static_bidirectional_rnn方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.static_bidirectional_rnn方法的具体用法?Python rnn.static_bidirectional_rnn怎么用?Python rnn.static_bidirectional_rnn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.static_bidirectional_rnn方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BiRNN

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 别名]
def BiRNN(x, weights, biases, timesteps, num_hidden):
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)
    x = tf.unstack(x, timesteps, 1)

    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get BiRNN cell output
    outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                                 dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights) + biases 
开发者ID:easy-tensorflow,项目名称:easy-tensorflow,代码行数:22,代码来源:ops.py

示例2: build

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 别名]
def build(self):
		self._define_input()
		
		output = self.input_seq
		output = embedding(output, self.vocab.size, self.embedding_dim, name='layer_embedding')
		input_dim = self.embedding_dim
		
		# Prepare data shape to match rnn function requirements
		# Current data input shape: [batch_size, num_steps, input_dim]
		# Required shape: 'num_steps' tensors list of shape [batch_size, input_dim]
		output = tf.transpose(output, [1, 0, 2])
		output = tf.reshape(output, [-1, input_dim])
		output = tf.split(output, self.num_steps, 0)
		
		if self.bidirectional:
			# 'num_steps' tensors list of shape [batch_size, rnn_units * 2]
			fw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
			bw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
			output, state_fw, state_bw = rnn.static_bidirectional_rnn(
				fw_cell, bw_cell, output, dtype=tf.float32, sequence_length=self.seq_len, scope='encoder')
			
			if isinstance(state_fw, tf.contrib.rnn.LSTMStateTuple):
				encoder_state_c = tf.concat([state_fw.c, state_bw.c], axis=1, name='bidirectional_concat_c')
				encoder_state_h = tf.concat([state_fw.h, state_bw.h], axis=1, name='bidirectional_concat_h')
				state = tf.contrib.rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
			elif isinstance(state_fw, tf.Tensor):
				state = tf.concat([state_fw, state_bw], axis=1, name='bidirectional_concat')
			else:
				raise ValueError
		else:
			# 'num_steps' tensors list of shape [batch_size, rnn_units]
			cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
			output, state = rnn.static_rnn(cell, output, dtype=tf.float32, sequence_length=self.seq_len,
										   scope='encoder')
		
		output = tf.stack(output, axis=0)  # [num_steps, batch_size, rnn_units]
		output = tf.transpose(output, [1, 0, 2])  # [batch_size, num_steps, rnn_units]
		self.encoder_output = output
		self.encoder_state = state
		return output, state 
开发者ID:han-cai,项目名称:EAS,代码行数:42,代码来源:base_controller.py

示例3: BiRNN

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 别名]
def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow,代码行数:15,代码来源:bidirectional_RNN_1.py

示例4: generate_rnn_output

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 别名]
def generate_rnn_output(self):
        """
        Generate RNN state outputs with word embeddings as inputs
        """
        with tf.variable_scope("generate_seq_output"):
            if self.bidirectional_rnn:
                embedding = tf.get_variable("embedding",
                                            [self.source_vocab_size,
                                             self.word_embedding_size])
                encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input) \
                                      for encoder_input in self.encoder_inputs]
                rnn_outputs = static_bidirectional_rnn(self.cell_fw,
                                                       self.cell_bw,
                                                       encoder_emb_inputs,
                                                       sequence_length=self.sequence_length,
                                                       dtype=tf.float32)
                encoder_outputs, encoder_state_fw, encoder_state_bw = rnn_outputs
                # with state_is_tuple = True, if num_layers > 1,
                # here we simply use the state from last layer as the encoder state
                state_fw = encoder_state_fw[-1]
                state_bw = encoder_state_bw[-1]
                encoder_state = tf.concat([tf.concat(state_fw, 1),
                                           tf.concat(state_bw, 1)], 1)
                top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size \
                                             + self.cell_bw.output_size])
                              for e in encoder_outputs]
                attention_states = tf.concat(top_states, 1)
            else:
                embedding = tf.get_variable("embedding",
                                            [self.source_vocab_size,
                                             self.word_embedding_size])
                encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input) \
                                      for encoder_input in self.encoder_inputs]
                rnn_outputs = static_rnn(self.cell_fw,
                                         encoder_emb_inputs,
                                         sequence_length=self.sequence_length,
                                         dtype=tf.float32)
                encoder_outputs, encoder_state = rnn_outputs
                # with state_is_tuple = True, if num_layers > 1,
                # here we use the state from last layer as the encoder state
                state = encoder_state[-1]
                encoder_state = tf.concat(state, 1)
                top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size])
                              for e in encoder_outputs]
                attention_states = tf.concat(top_states, 1)
            return encoder_outputs, encoder_state, attention_states 
开发者ID:sliderSun,项目名称:pynlp,代码行数:48,代码来源:multi_task_model.py


注:本文中的tensorflow.contrib.rnn.static_bidirectional_rnn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。