本文整理匯總了Python中tensorflow.python.ops.rnn.static_rnn方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.static_rnn方法的具體用法?Python rnn.static_rnn怎麽用?Python rnn.static_rnn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.rnn
的用法示例。
在下文中一共展示了rnn.static_rnn方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
is_list = isinstance(inputs, list)
if self._use_dynamic_rnn:
if is_list:
inputs = array_ops.stack(inputs)
outputs, state = rnn.dynamic_rnn(
self._cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=dtype,
time_major=True,
scope=scope)
if is_list:
# Convert outputs back to list
outputs = array_ops.unstack(outputs)
else: # non-dynamic rnn
if not is_list:
inputs = array_ops.unstack(inputs)
outputs, state = rnn.static_rnn(
self._cell,
inputs,
initial_state=initial_state,
dtype=dtype,
sequence_length=sequence_length,
scope=scope)
if not is_list:
# Convert outputs back to tensor
outputs = array_ops.stack(outputs)
return outputs, state
示例2: basic_rnn_seq2seq
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
enc_cell = copy.deepcopy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
示例3: tied_rnn_seq2seq
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
loop_function=None,
dtype=dtypes.float32,
scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.static_rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(
decoder_inputs,
enc_state,
cell,
loop_function=loop_function,
scope=scope)
示例4: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def __call__(self,inputs,seq_len = None):
if self.call_cnt ==0:
self.cell = LSTMCell(self.output_dim,initializer = self.initializer(dtype=inputs.dtype))
with tf.variable_scope(self.scope) as scope:
#self.check_reuse(scope)
#if self.call_cnt ==0:
#self.cell = LSTMCell(self.output_dim,initializer = self.initializer)
#cell = BasicLSTMCell(self.output_dim)
print scope.reuse
rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
print scope.reuse
return rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
#return rnn.static_rnn(self.cell,inputs.as_list(),dtype = inputs.dtype)
示例5: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def __call__(self,inputs,seq_len = None):
if self.call_cnt ==0:
self.cell = LSTMCell(self.output_dim,initializer = self.initializer(dtype=inputs.dtype))
with tf.variable_scope(self.scope) as scope:
self.check_reuse(scope)
#if self.call_cnt ==0:
#self.cell = LSTMCell(self.output_dim,initializer = self.initializer)
#cell = BasicLSTMCell(self.output_dim)
return rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
#return rnn.static_rnn(self.cell,inputs.as_list(),dtype = inputs.dtype)
示例6: basic_rnn_seq2seq
# 需要導入模塊: from tensorflow.python.ops import rnn [as 別名]
# 或者: from tensorflow.python.ops.rnn import static_rnn [as 別名]
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
#enc_cell = copy.deepcopy(cell)
enc_cell = copy.copy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)