本文整理匯總了Python中tensorflow.contrib.rnn.static_bidirectional_rnn方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.static_bidirectional_rnn方法的具體用法?Python rnn.static_bidirectional_rnn怎麽用?Python rnn.static_bidirectional_rnn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.rnn
的用法示例。
在下文中一共展示了rnn.static_bidirectional_rnn方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: BiRNN
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 別名]
def BiRNN(x, weights, biases, timesteps, num_hidden):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, num_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)
x = tf.unstack(x, timesteps, 1)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get BiRNN cell output
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights) + biases
示例2: build
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 別名]
def build(self):
self._define_input()
output = self.input_seq
output = embedding(output, self.vocab.size, self.embedding_dim, name='layer_embedding')
input_dim = self.embedding_dim
# Prepare data shape to match rnn function requirements
# Current data input shape: [batch_size, num_steps, input_dim]
# Required shape: 'num_steps' tensors list of shape [batch_size, input_dim]
output = tf.transpose(output, [1, 0, 2])
output = tf.reshape(output, [-1, input_dim])
output = tf.split(output, self.num_steps, 0)
if self.bidirectional:
# 'num_steps' tensors list of shape [batch_size, rnn_units * 2]
fw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
bw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
output, state_fw, state_bw = rnn.static_bidirectional_rnn(
fw_cell, bw_cell, output, dtype=tf.float32, sequence_length=self.seq_len, scope='encoder')
if isinstance(state_fw, tf.contrib.rnn.LSTMStateTuple):
encoder_state_c = tf.concat([state_fw.c, state_bw.c], axis=1, name='bidirectional_concat_c')
encoder_state_h = tf.concat([state_fw.h, state_bw.h], axis=1, name='bidirectional_concat_h')
state = tf.contrib.rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
elif isinstance(state_fw, tf.Tensor):
state = tf.concat([state_fw, state_bw], axis=1, name='bidirectional_concat')
else:
raise ValueError
else:
# 'num_steps' tensors list of shape [batch_size, rnn_units]
cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
output, state = rnn.static_rnn(cell, output, dtype=tf.float32, sequence_length=self.seq_len,
scope='encoder')
output = tf.stack(output, axis=0) # [num_steps, batch_size, rnn_units]
output = tf.transpose(output, [1, 0, 2]) # [batch_size, num_steps, rnn_units]
self.encoder_output = output
self.encoder_state = state
return output, state
示例3: BiRNN
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 別名]
def BiRNN(x, weights, biases):
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, n_input])
x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
return tf.matmul(outputs[-1], weights['out']) + biases['out']
示例4: generate_rnn_output
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import static_bidirectional_rnn [as 別名]
def generate_rnn_output(self):
"""
Generate RNN state outputs with word embeddings as inputs
"""
with tf.variable_scope("generate_seq_output"):
if self.bidirectional_rnn:
embedding = tf.get_variable("embedding",
[self.source_vocab_size,
self.word_embedding_size])
encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input) \
for encoder_input in self.encoder_inputs]
rnn_outputs = static_bidirectional_rnn(self.cell_fw,
self.cell_bw,
encoder_emb_inputs,
sequence_length=self.sequence_length,
dtype=tf.float32)
encoder_outputs, encoder_state_fw, encoder_state_bw = rnn_outputs
# with state_is_tuple = True, if num_layers > 1,
# here we simply use the state from last layer as the encoder state
state_fw = encoder_state_fw[-1]
state_bw = encoder_state_bw[-1]
encoder_state = tf.concat([tf.concat(state_fw, 1),
tf.concat(state_bw, 1)], 1)
top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size \
+ self.cell_bw.output_size])
for e in encoder_outputs]
attention_states = tf.concat(top_states, 1)
else:
embedding = tf.get_variable("embedding",
[self.source_vocab_size,
self.word_embedding_size])
encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input) \
for encoder_input in self.encoder_inputs]
rnn_outputs = static_rnn(self.cell_fw,
encoder_emb_inputs,
sequence_length=self.sequence_length,
dtype=tf.float32)
encoder_outputs, encoder_state = rnn_outputs
# with state_is_tuple = True, if num_layers > 1,
# here we use the state from last layer as the encoder state
state = encoder_state[-1]
encoder_state = tf.concat(state, 1)
top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size])
for e in encoder_outputs]
attention_states = tf.concat(top_states, 1)
return encoder_outputs, encoder_state, attention_states