当前位置: 首页>>代码示例>>Python>>正文


Python rnn_cell_impl._zero_state_tensors方法代码示例

本文整理汇总了Python中tensorflow.python.ops.rnn_cell_impl._zero_state_tensors方法的典型用法代码示例。如果您正苦于以下问题:Python rnn_cell_impl._zero_state_tensors方法的具体用法?Python rnn_cell_impl._zero_state_tensors怎么用?Python rnn_cell_impl._zero_state_tensors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.rnn_cell_impl的用法示例。


在下文中一共展示了rnn_cell_impl._zero_state_tensors方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: initial_alignments

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def initial_alignments(self, batch_size, dtype):
    """Creates the initial alignment values for the `AttentionWrapper` class.

    This is important for AttentionMechanisms that use the previous alignment
    to calculate the alignment at the next time step (e.g. monotonic attention).

    The default behavior is to return a tensor of all zeros.

    Args:
      batch_size: `int32` scalar, the batch_size.
      dtype: The `dtype`.

    Returns:
      A `dtype` tensor shaped `[batch_size, alignments_size]`
      (`alignments_size` is the values' `max_time`).
    """
    max_time = self._alignments_size
    return _zero_state_tensors(max_time, batch_size, dtype) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:attention_wrapper.py

示例2: initial_alignments

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def initial_alignments(self, batch_size, dtype):
        """Creates the initial alignment values for the `AttentionWrapper` class.

        This is important for AttentionMechanisms that use the previous alignment
        to calculate the alignment at the next time step (e.g. monotonic attention).

        The default behavior is to return a tensor of all zeros.

        Args:
          batch_size: `int32` scalar, the batch_size.
          dtype: The `dtype`.

        Returns:
          A `dtype` tensor shaped `[batch_size, alignments_size]`
          (`alignments_size` is the values' `max_time`).
        """
        max_time = self._alignments_size
        return _zero_state_tensors(max_time, batch_size, dtype) 
开发者ID:HareeshBahuleyan,项目名称:tf-var-attention,代码行数:20,代码来源:attention_wrapper.py

示例3: _create

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def _create(self, encoder_output, decoder_state_size, **kwargs):
        """ Creates decoder's initial RNN states according to
        `decoder_state_size`.

        If `decoder_state_size` is int/LSTMStateTuple(int, int), return Tensor
        with shape [batch_size, int] or LSTMStateTuple([batch_size, int], [batch_size, int]).
        If `decoder_state_size` is a tuple of int/LSTMStateTupe, return a tuple
        whose elements' structure match the `decoder_state_size` respectively.
        Args:
            encoder_output: An instance of `collections.namedtuple`
              from `Encoder.encode()`.
            decoder_state_size: RNN decoder state size.
            **kwargs:

        Returns: The decoder states with the structure determined
          by `decoder_state_size`.
        """
        batch_size = tf.shape(encoder_output.attention_length)[0]
        return rnn_cell_impl._zero_state_tensors(
            decoder_state_size, batch_size, tf.float32) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:22,代码来源:bridges.py

示例4: initial_alignments

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def initial_alignments(self, batch_size, dtype):
        '''Returns all the alignment saturated in first block'''
        max_time = self._alignments_size
        alignments = _zero_state_tensors(max_time - 1, batch_size, dtype)
        return tf.concat([tf.fill([batch_size, 1], 1.0), alignments], 1) 
开发者ID:vagrawal,项目名称:deepsphinx,代码行数:7,代码来源:attention.py

示例5: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          [check_ops.assert_equal(batch_size,
                                  self._attention_mechanism.batch_size,
                                  message=error_message)]):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      if self._alignment_history:
        alignment_history = tensor_array_ops.TensorArray(
            dtype=dtype, size=0, dynamic_size=True)
      else:
        alignment_history = ()
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_size, batch_size,
                                        dtype),
          alignments=self._attention_mechanism.initial_alignments(
              batch_size, dtype),
          alignment_history=alignment_history) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:36,代码来源:attention_wrapper.py

示例6: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
		"""Return an initial (zero) state tuple for this `AttentionWrapper`.
		
		Args:
		  batch_size: `0D` integer tensor: the batch size.
		  dtype: The internal state data type.
		Returns:
		  An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
		  possibly, empty `TensorArray` objects.
		Raises:
		  ValueError: (or, possibly at runtime, InvalidArgument), if
			`batch_size` does not match the output size of the encoder passed
			to the wrapper object at initialization time.
		"""
		with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
			cell_state = self._cell._cell.zero_state(batch_size, dtype)
			error_message = (
				"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
				"Non-matching batch sizes between the memory "
				"(encoder output) and the requested batch size.")
			with ops.control_dependencies(
				self._batch_size_checks(batch_size, error_message)):
				cell_state = nest.map_structure(
					lambda s: array_ops.identity(s, name="checked_cell_state"),
					cell_state)
			return TacotronDecoderCellState(
				cell_state=cell_state,
				time=array_ops.zeros([], dtype=tf.int32),
				attention=_zero_state_tensors(self._attention_layer_size, batch_size,
				  dtype),
				alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
				alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
				dynamic_size=True),
				finished=tf.reshape(tf.tile([0.0], [batch_size]), [-1, 1])) 
开发者ID:rishikksh20,项目名称:vae_tacotron2,代码行数:36,代码来源:Architecture_wrappers.py

示例7: _create_zero_outputs

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def _create_zero_outputs(size, dtype, batch_size):
  """Create a zero outputs Tensor structure."""
  def _create(s, d):
    return _zero_state_tensors(s, batch_size, d)

  return tf.contrib.framework.nest.map_structure(_create, size, dtype) 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:8,代码来源:decoder.py

示例8: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
        with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
            if self._initial_cell_state is not None:
                cell_state = self._initial_cell_state
            else:
                cell_state = self._cell.zero_state(batch_size, dtype)
            error_message = (
                "zero_state of AttentionWrapper %s: " % self._base_name +
                "Non-matching batch sizes between the memory "
                "(encoder output) and the requested batch size.")
            with tf.control_dependencies(
                [tf.assert_equal(batch_size,
                    self._attention_mechanism.batch_size,
                    message=error_message)]):
                cell_state = nest.map_structure(
                    lambda s: tf.identity(s, name="checked_cell_state"),
                    cell_state)
            alignment_history = ()

            _zero_state_tensors = rnn_cell_impl._zero_state_tensors
            return AttentionWrapperState(
                cell_state=cell_state,
                time=tf.zeros([], dtype=tf.int32),
                attention=_zero_state_tensors(self._attention_size, batch_size,
                dtype),
                alignments=self._attention_mechanism.initial_alignments(
                    batch_size, dtype),
                alignment_history=alignment_history) 
开发者ID:mckinziebrandon,项目名称:DeepChatModels,代码行数:30,代码来源:_rnn.py

示例9: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
    """Return an initial (zero) state tuple for this `AttentionWrapper`.

    Args:
      batch_size: `0D` integer tensor: the batch size.
      dtype: The internal state data type.
    Returns:
      An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
      possibly, empty `TensorArray` objects.
    Raises:
      ValueError: (or, possibly at runtime, InvalidArgument), if
      `batch_size` does not match the output size of the encoder passed
      to the wrapper object at initialization time.
    """
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
        "When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
        "Non-matching batch sizes between the memory "
        "(encoder output) and the requested batch size.")
      with ops.control_dependencies(
        self._batch_size_checks(batch_size, error_message)):
        cell_state = nest.map_structure(
          lambda s: array_ops.identity(s, name="checked_cell_state"),
          cell_state)
      return TacotronDecoderCellState(
        cell_state=cell_state,
        time=array_ops.zeros([], dtype=tf.int32),
        attention=rnn_cell_impl._zero_state_tensors(self._attention_layer_size, batch_size, dtype),
        alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
        alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
        dynamic_size=True)) 
开发者ID:youssefsharief,项目名称:arabic-tacotron-tts,代码行数:34,代码来源:rnn_wrappers.py

示例10: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
		"""Return an initial (zero) state tuple for this `AttentionWrapper`.

		Args:
		  batch_size: `0D` integer tensor: the batch size.
		  dtype: The internal state data type.
		Returns:
		  An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
		  possibly, empty `TensorArray` objects.
		Raises:
		  ValueError: (or, possibly at runtime, InvalidArgument), if
			`batch_size` does not match the output size of the encoder passed
			to the wrapper object at initialization time.
		"""
		with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
			cell_state = self._cell._cell.zero_state(batch_size, dtype)
			error_message = (
				"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
				"Non-matching batch sizes between the memory "
				"(encoder output) and the requested batch size.")
			with ops.control_dependencies(
				self._batch_size_checks(batch_size, error_message)):
				cell_state = nest.map_structure(
					lambda s: array_ops.identity(s, name="checked_cell_state"),
					cell_state)
			return TacotronDecoderCellState(
				cell_state=cell_state,
				time=array_ops.zeros([], dtype=tf.int32),
				attention=_zero_state_tensors(self._attention_layer_size, batch_size,
				  dtype),
				alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
				alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
				dynamic_size=True),
				max_attentions=tf.zeros((batch_size, ), dtype=tf.int32)) 
开发者ID:Rayhane-mamah,项目名称:Tacotron-2,代码行数:36,代码来源:Architecture_wrappers.py

示例11: initial_alignments

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def initial_alignments(self, batch_size, dtype):
        max_time = self._alignments_size
        return _zero_state_tensors(max_time, batch_size, dtype) 
开发者ID:hccho2,项目名称:Tacotron-Wavenet-Vocoder-Korean,代码行数:5,代码来源:rnn_wrappers.py

示例12: initial_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def initial_state(self, batch_size, dtype):
        state_size_ = self.state_size
        return _zero_state_tensors(state_size_, batch_size, dtype) 
开发者ID:hccho2,项目名称:Tacotron-Wavenet-Vocoder-Korean,代码行数:5,代码来源:rnn_wrappers.py

示例13: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
        with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
            if self._initial_cell_state is not None:
                cell_state = self._initial_cell_state
            else:
                cell_state = self._cell.zero_state(batch_size, dtype)
            error_message = (
                    "When calling zero_state of AttentionWrapper %s: " % self._base_name +
                    "Non-matching batch sizes between the memory "
                    "(encoder output) and the requested batch size.  Are you using "
                    "the BeamSearchDecoder?  If so, make sure your encoder output has "
                    "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
                    "the batch_size= argument passed to zero_state is "
                    "batch_size * beam_width.")
            with ops.control_dependencies(
                    self._batch_size_checks(batch_size, error_message)):
                cell_state = nest.map_structure(
                        lambda s: array_ops.identity(s, name="checked_cell_state"),
                        cell_state)

            state = PointerWrapperState(
                cell_state=cell_state,
                time=array_ops.zeros([], dtype=dtypes.int32),
                attention=_zero_state_tensors(self._attention_layer_size,
                    batch_size, dtype),
                alignment_history=self._item_or_tuple(
                    tensor_array_ops.TensorArray(dtype=dtype, size=0,
                    dynamic_size=True)
                    if self._alignment_history else ()
                        for _ in self._attention_mechanisms),
                p_gen_history=self._item_or_tuple(
                    tensor_array_ops.TensorArray(dtype=dtype, size=0,
                        dynamic_size=True) for _ in self._attention_mechanisms),
                vocab_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True),
                copy_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True),
                final_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True))

            return state 
开发者ID:ZhangShiyue,项目名称:QGforQA,代码行数:43,代码来源:hierarchical_ptr_wrapper.py

示例14: zero_state

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          self._batch_size_checks(batch_size, error_message)):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_layer_size, batch_size,
                                        dtype),
          alignments=self._item_or_tuple(
              attention_mechanism.initial_alignments(batch_size, dtype)
              for attention_mechanism in self._attention_mechanisms),
          alignment_history=self._item_or_tuple(
              tensor_array_ops.TensorArray(dtype=dtype, size=0,
                                           dynamic_size=True)
              if self._alignment_history else ()
              for _ in self._attention_mechanisms)) 
开发者ID:ZhangShiyue,项目名称:QGforQA,代码行数:34,代码来源:attention_wrapper.py

示例15: _create_zero_outputs

# 需要导入模块: from tensorflow.python.ops import rnn_cell_impl [as 别名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 别名]
def _create_zero_outputs(size, dtype, batch_size):
    """Create a zero outputs Tensor structure."""

    def _create(s, d):
        return _zero_state_tensors(s, batch_size, d)

    return nest.map_structure(_create, size, dtype) 
开发者ID:microsoft,项目名称:icecaps,代码行数:9,代码来源:dynamic_decoder_custom.py


注:本文中的tensorflow.python.ops.rnn_cell_impl._zero_state_tensors方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。