本文整理汇总了Python中tensorflow.python.ops.rnn_cell_impl.RNNCell方法的典型用法代码示例。如果您正苦于以下问题:Python rnn_cell_impl.RNNCell方法的具体用法?Python rnn_cell_impl.RNNCell怎么用?Python rnn_cell_impl.RNNCell使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.rnn_cell_impl
的用法示例。
在下文中一共展示了rnn_cell_impl.RNNCell方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell, output_size, activation=None, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
activation: (optional) an optional activation function.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
super(OutputProjectionWrapper, self).__init__(_reuse=reuse)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._activation = activation
示例2: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell, output_size, activation=None, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
activation: (optional) an optional activation function.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
super(OutputProjectionWrapper, self).__init__(_reuse=reuse)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._activation = activation
self._linear = None
示例3: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell, output_size, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._reuse = reuse
示例4: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
示例5: basic_rnn_seq2seq
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
enc_cell = copy.deepcopy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
示例6: tied_rnn_seq2seq
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
loop_function=None,
dtype=dtypes.float32,
scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.static_rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(
decoder_inputs,
enc_state,
cell,
loop_function=loop_function,
scope=scope)
示例7: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
示例8: create_cell
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def create_cell(cell_classname, cell_params):
""" Creates RNN cell.
Args:
cell_classname: The name of the cell class,
e.g. "LSTMCell", "GRUCell" and so on.
cell_params: A dictionary of parameters to pass
to the cell constructor.
Returns:
A `tf.contrib.rnn.RNNCell` object.
"""
cell_params = cell_params.copy()
# Find the cell class, use the in-house implemented LSTMCell & GRUCell
cell_class = eval(cell_classname) # find from all CELL NAMES imported from tf.contrib.rnn
# Make sure additional arguments are valid
cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
new_cell_params = {}
for key in cell_params.keys():
if key not in cell_args:
# raise ValueError(
tf.logging.info(
"""{} is not a valid argument for {} class. Available arguments
are: {}""".format(key, cell_class.__name__, cell_args))
else:
new_cell_params[key] = cell_params[key]
# Create cell
return cell_class(**new_cell_params)
示例9: get_multilayer_rnn_cells
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def get_multilayer_rnn_cells(cell_class,
cell_params,
num_layers=1,
dropout_input_keep_prob=1.0,
dropout_state_keep_prob=1.0):
"""Creates a stacked multi-layer RNN Cell.
Args:
cell_class: The name of the cell class, e.g. "LSTMCell".
cell_params: A dictionary of parameters to pass to
the cell constructor.
num_layers: The number of layers. The cell will be
wrapped with `tf.contrib.rnn.MultiRNNCell`.
dropout_input_keep_prob: The keep probability for input
dropout.
dropout_state_keep_prob: The keep probability for output
dropout.
Returns:
A `tf.contrib.rnn.RNNCell` object.
"""
cells = []
for _ in range(num_layers):
cell = create_cell(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_state_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell,
input_keep_prob=dropout_input_keep_prob,
state_keep_prob=dropout_state_keep_prob)
cells.append(cell)
# use MultiRNN Cell even when its length is 1, for bridge computing
return StackedRNNCell(cells=cells)
示例10: get_condr_rnn_cell
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def get_condr_rnn_cell(cell_class,
cell_params,
num_layers=1,
dropout_input_keep_prob=1.0,
dropout_state_keep_prob=1.0):
"""Creates RNN Cell according to CondAttentionDecoder architecture.
Args:
cell_class: The name of the cell class, e.g. "LSTMCell".
cell_params: A dictionary of parameters to pass to
the cell constructor.
num_layers: The number of layers. The cell will be
wrapped with `tf.contrib.rnn.MultiRNNCell`.
dropout_input_keep_prob: The keep probability for input
dropout.
dropout_state_keep_prob: The keep probability for output
dropout.
Returns:
A tuple of `tf.contrib.rnn.RNNCell` objects,
`(cond_cell, r_cells)`.
"""
cond_cell = create_cell(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_state_keep_prob < 1.0:
cond_cell = tf.contrib.rnn.DropoutWrapper(
cond_cell,
input_keep_prob=dropout_input_keep_prob,
state_keep_prob=dropout_state_keep_prob)
r_cells = []
for _ in range(num_layers):
cell = create_cell(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_state_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell,
input_keep_prob=dropout_input_keep_prob,
state_keep_prob=dropout_state_keep_prob)
r_cells.append(cell)
# use a multiRNNCell as wrapper
# to deal with hidden state of type tuple
r_cells = StackedRNNCell(cells=r_cells, name="stacked_r_rnn_cell")
return cond_cell, r_cells
示例11: __init__
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def __init__(self, cell, embedding, use_context = True):
super(WeanWrapper, self).__init__()
if not _like_rnncell(cell):
raise TypeError('The parameter cell is not RNNCell.')
self._cell = cell
self._embedding = embedding
self._use_context = use_context
self._linear = None
示例12: basic_rnn_seq2seq
# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import RNNCell [as 别名]
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
#enc_cell = copy.deepcopy(cell)
enc_cell = copy.copy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)