本文整理匯總了Python中tensorflow.python.ops.rnn_cell.LSTMStateTuple方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell.LSTMStateTuple方法的具體用法?Python rnn_cell.LSTMStateTuple怎麽用?Python rnn_cell.LSTMStateTuple使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.rnn_cell
的用法示例。
在下文中一共展示了rnn_cell.LSTMStateTuple方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def __call__(self, inputs, state, k_size=3, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(3, 2, state)
# batch_size * height * width * channel
concat = _conv([inputs, h], 4 * self._num_units, k_size, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(3, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(3, [new_c, new_h])
return new_h, new_state
示例2: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def __call__(self, inputs, state, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(3, 2, state)
# batch_size * height * width * channel
concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(3, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(3, [new_c, new_h])
return new_h, new_state
示例3: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared zoneouts."""
# compute output and new state as before
output, new_state = self._cell(inputs, state, scope)
# if either hidden state or memory cell zoneout is applied, then split state and process
if self._has_hidden_state_zoneout or self._has_memory_cell_zoneout:
# split state
c_old, m_old = state
c_new, m_new = new_state
# apply zoneout to memory cell and hidden state
c_and_m = []
for s_old, s_new, p, has_zoneout in [(c_old, c_new, self._memory_cell_keep_prob, self._has_memory_cell_zoneout),
(m_old, m_new, self._hidden_state_keep_prob, self._has_hidden_state_zoneout)]:
if has_zoneout:
if self._is_training:
mask = nn_ops.dropout(array_ops.ones_like(s_new), p, seed=self._seed) * p # this should just random ops instead. See dropout code for how.
s = ((1. - mask) * s_old) + (mask * s_new)
else:
s = ((1. - p) * s_old) + (p * s_new)
else:
s = s_new
c_and_m.append(s)
# package final results
new_state = LSTMStateTuple(*c_and_m)
output = new_state.h
return output, new_state
示例4: state_size
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
示例5: state_size
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def state_size(self):
return rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
示例6: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def __call__(self, inputs, state, scope=None):
"""LSTM cell with layer normalization and recurrent dropout."""
with vs.variable_scope(scope or type(self).__name__) as scope: # LayerNormBasicLSTMCell # pylint: disable=unused-variables
c, h = state
args = array_ops.concat(1, [inputs, h])
concat = self._linear(args)
i, j, f, o = array_ops.split(1, 4, concat)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
示例7: state_size
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def state_size(self):
return (rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
示例8: call
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size x self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size x 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
# concat = _linear([inputs, h], 4 * self._num_units, True)
concat = _linear(inputs, 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
示例9: __init__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import LSTMStateTuple [as 別名]
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=math_ops.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units