当前位置: 首页>>代码示例>>Python>>正文


Python rnn_cell_impl.LSTMStateTuple方法代码示例

本文整理汇总了Python中tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple方法的典型用法代码示例。如果您正苦于以下问题:Python rnn_cell_impl.LSTMStateTuple方法的具体用法?Python rnn_cell_impl.LSTMStateTuple怎么用?Python rnn_cell_impl.LSTMStateTuple使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.rnn_cell_impl的用法示例。


在下文中一共展示了rnn_cell_impl.LSTMStateTuple方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _init_attention

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _init_attention(encoder_state):
  """Initialize attention. Handling both LSTM and GRU.

  Args:
    encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.

  Returns:
    attn: initial zero attention vector.
  """

  # Multi- vs single-layer
  # TODO(thangluong): is this the best way to check?
  if isinstance(encoder_state, tuple):
    top_state = encoder_state[-1]
  else:
    top_state = encoder_state

  # LSTM vs GRU
  if isinstance(top_state, rnn_cell_impl.LSTMStateTuple):
    attn = array_ops.zeros_like(top_state.h)
  else:
    attn = array_ops.zeros_like(top_state)

  return attn 
开发者ID:kepei1106,项目名称:SentenceFunction,代码行数:26,代码来源:my_attention_decoder_fn.py

示例2: call

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def call(self, inputs, state):
    """LSTM cell with layer normalization and recurrent dropout."""
    c, h = state
    args = array_ops.concat([inputs, h], 1)
    concat = self._linear(args)

    i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
    if self._layer_norm:
      i = self._norm(i, "input")
      j = self._norm(j, "transform")
      f = self._norm(f, "forget")
      o = self._norm(o, "output")

    g = self._activation(j)
    if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
      g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

    new_c = (c * math_ops.sigmoid(f + self._forget_bias)
             + math_ops.sigmoid(i) * g)
    if self._layer_norm:
      new_c = self._norm(new_c, "state")
    new_h = self._activation(new_c) * math_ops.sigmoid(o)

    new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
    return new_h, new_state 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:rnn_cell.py

示例3: __init__

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def __init__(self, num_units, num_proj=None,
               use_biases=False, reuse=None):
    """Initialize the parameters for a NAS cell.
    Args:
      num_units: int, The number of units in the NAS cell
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      use_biases: (optional) bool, If True then use biases within the cell. This
        is False by default.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
    """
    super(NASCell, self).__init__(_reuse=reuse)
    self._num_units = num_units
    self._num_proj = num_proj
    self._use_biases = use_biases
    self._reuse = reuse

    if num_proj is not None:
      self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
      self._output_size = num_proj
    else:
      self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
      self._output_size = num_units 
开发者ID:shaohua0116,项目名称:Multiview2Novelview,代码行数:27,代码来源:rnn_cell.py

示例4: _init_attention

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _init_attention(encoder_state):
    """Initialize attention. Handling both LSTM and GRU.
    Args:
        encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
    Returns:
        attn: initial zero attention vector.
    """

    # Multi- vs single-layer
    # TODO(thangluong): is this the best way to check?
    if isinstance(encoder_state, tuple):
        top_state = encoder_state[-1]
    else:
        top_state = encoder_state

    # LSTM vs GRU
    if isinstance(top_state, rnn_cell_impl.LSTMStateTuple):
        attn = array_ops.zeros_like(top_state.h)
    else:
        attn = array_ops.zeros_like(top_state)

    return attn 
开发者ID:thu-coai,项目名称:ccm,代码行数:24,代码来源:attention_decoder.py

示例5: _init_attention

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _init_attention(encoder_state):
    """Initialize attention. Handling both LSTM and GRU.
    Args:
        encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
    Returns:
        attn: initial zero attention vector.
    """

    # Multi- vs single-layer
    if isinstance(encoder_state, tuple):
        top_state = encoder_state[-1]
    else:
        top_state = encoder_state

    # LSTM vs GRU
    if isinstance(top_state, rnn_cell_impl.LSTMStateTuple):
        attn = array_ops.zeros_like(top_state.h)
    else:
        attn = array_ops.zeros_like(top_state)

    return attn 
开发者ID:siat-nlp,项目名称:TransDG,代码行数:23,代码来源:attention_decoder.py

示例6: __call__

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
      concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)

      new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * tf.nn.sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = tf.concat(axis=3, values=[new_c, new_h])
      return new_h, new_state 
开发者ID:kuleshov,项目名称:audio-super-res,代码行数:24,代码来源:convrnn.py

示例7: call

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def call(self, inputs, state, scope=None):
    cell, hidden = state
    new_hidden = _conv([inputs, hidden],
                       self._kernel_shape,
                       4*self._output_channels,
                       self._use_bias)
    gates = array_ops.split(value=new_hidden,
                            num_or_size_splits=4,
                            axis=self._conv_ndims+1)

    input_gate, new_input, forget_gate, output_gate = gates
    new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
    new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
    output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)

    if self._skip_connection:
      output = array_ops.concat([output, inputs], axis=-1)
    new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
    return output, new_state 
开发者ID:kuleshov,项目名称:audio-super-res,代码行数:21,代码来源:convrnn.py

示例8: _create

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _create(self, encoder_output, decoder_state_size, **kwargs):
        """ Creates decoder's initial RNN states according to
        `decoder_state_size`.

        If `decoder_state_size` is int/LSTMStateTuple(int, int), return Tensor
        with shape [batch_size, int] or LSTMStateTuple([batch_size, int], [batch_size, int]).
        If `decoder_state_size` is a tuple of int/LSTMStateTupe, return a tuple
        whose elements' structure match the `decoder_state_size` respectively.
        Args:
            encoder_output: An instance of `collections.namedtuple`
              from `Encoder.encode()`.
            decoder_state_size: RNN decoder state size.
            **kwargs:

        Returns: The decoder states with the structure determined
          by `decoder_state_size`.
        """
        batch_size = tf.shape(encoder_output.attention_length)[0]
        return rnn_cell_impl._zero_state_tensors(
            decoder_state_size, batch_size, tf.float32) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:22,代码来源:bridges.py

示例9: state_size

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def state_size(self):
    return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:4,代码来源:lstm_ops.py

示例10: __call__

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def __call__(self, x, states_prev, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or self._names["scope"]):
      x_shape = x.get_shape().with_rank(2)
      if not x_shape[1].value:
        raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
      if len(states_prev) != 2:
        raise ValueError("Expecting states_prev to be a tuple with length 2.")
      input_size = x_shape[1].value
      w = vs.get_variable(self._names["W"], [input_size + self._num_units,
                                             self._num_units * 4])
      b = vs.get_variable(
          self._names["b"], [w.get_shape().with_rank(2)[1].value],
          initializer=init_ops.constant_initializer(0.0))
      if self._use_peephole:
        wci = vs.get_variable(self._names["wci"], [self._num_units])
        wco = vs.get_variable(self._names["wco"], [self._num_units])
        wcf = vs.get_variable(self._names["wcf"], [self._num_units])
      else:
        wci = wco = wcf = array_ops.zeros([self._num_units])
      (cs_prev, h_prev) = states_prev
      (_, cs, _, _, _, _, h) = _lstm_block_cell(
          x,
          cs_prev,
          h_prev,
          w,
          b,
          wci=wci,
          wco=wco,
          wcf=wcf,
          forget_bias=self._forget_bias,
          use_peephole=self._use_peephole)

      new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
      return h, new_state 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:lstm_ops.py

示例11: call

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def call(self, inputs, state):
    """LSTM cell with layer normalization and recurrent dropout."""
    c, h = state
    args = array_ops.concat([inputs, h], 1)
    concat = self._linear(args)
    dtype = args.dtype

    i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
    if self._layer_norm:
      i = self._norm(i, "input", dtype=dtype)
      j = self._norm(j, "transform", dtype=dtype)
      f = self._norm(f, "forget", dtype=dtype)
      o = self._norm(o, "output", dtype=dtype)

    g = self._activation(j)
    if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
      g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

    new_c = (c * math_ops.sigmoid(f + self._forget_bias)
             + math_ops.sigmoid(i) * g)
    if self._layer_norm:
      new_c = self._norm(new_c, "state", dtype=dtype)
    new_h = self._activation(new_c) * math_ops.sigmoid(o)

    new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
    return new_h, new_state 
开发者ID:shaohua0116,项目名称:Multiview2Novelview,代码行数:28,代码来源:rnn_cell.py

示例12: call

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def call(self, inputs, state):
        """2D Convolutional LSTM cell with (optional) normalization and recurrent dropout."""
        c, h = state
        tile_concat = isinstance(inputs, (list, tuple))
        if tile_concat:
            inputs, inputs_non_spatial = inputs
        args = array_ops.concat([inputs, h], -1)
        concat = self._conv2d(args)
        if tile_concat:
            concat = concat + self._dense(inputs_non_spatial)[:, None, None, :]

        if self._normalizer_fn and not self._separate_norms:
            concat = self._norm(concat, "input_transform_forget_output")
        i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=-1)
        if self._normalizer_fn and self._separate_norms:
            i = self._norm(i, "input")
            j = self._norm(j, "transform")
            f = self._norm(f, "forget")
            o = self._norm(o, "output")

        g = self._activation_fn(j)
        if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
            g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

        new_c = (c * math_ops.sigmoid(f + self._forget_bias)
                 + math_ops.sigmoid(i) * g)
        if self._normalizer_fn:
            new_c = self._norm(new_c, "state")
        new_h = self._activation_fn(new_c) * math_ops.sigmoid(o)

        if self._skip_connection:
            new_h = array_ops.concat([new_h, inputs], axis=-1)

        new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
        return new_h, new_state 
开发者ID:alexlee-gk,项目名称:video_prediction,代码行数:37,代码来源:rnn_ops.py

示例13: state_size

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def state_size(self):
    return (LSTMStateTuple(self._num_units, self._num_units)
            if self._state_is_tuple else 2 * self._num_units) 
开发者ID:kuleshov,项目名称:audio-super-res,代码行数:5,代码来源:convrnn.py

示例14: _final_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _final_state(x, direction):
    """ Acquires final states.

    Args:
        x: A Tensor/LSTMStateTuple or a dictionary of Tensors/LSTMStateTuples.
        direction: The key for `x` if `x` is a dictionary.

    Returns: A Tensor or a LSTMStateTuple, according to x.

    Raises:
        ValueError: if the type of x is not mentioned above, or if `direction`
          is not a valid key of `x`, when `x` is a dictionary.
    """
    if isinstance(x, tf.Tensor) or isinstance(x, rnn_cell_impl.LSTMStateTuple):
        return x
    elif isinstance(x, dict):
        try:
            ret = x[direction]
        except KeyError:
            raise ValueError(
                "Unrecognized type of direction: {}".format(direction))
        return ret
    else:
        raise ValueError(
            "Unrecognized type of direction: {} "
            "or unknow type of final_states: {}".format(direction, type(x))) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:28,代码来源:bridges.py

示例15: _default_dropout_state_filter_visitor

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple [as 别名]
def _default_dropout_state_filter_visitor(substate):
  if isinstance(substate, LSTMStateTuple):
    # Do not perform dropout on the memory state.
    return LSTMStateTuple(c=False, h=True)
  elif isinstance(substate, tensor_array_ops.TensorArray):
    return False
  return True 
开发者ID:gkahn13,项目名称:GtS,代码行数:9,代码来源:rnn_dropout.py


注:本文中的tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。