當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn_cell_impl._zero_state_tensors方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.rnn_cell_impl._zero_state_tensors方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell_impl._zero_state_tensors方法的具體用法?Python rnn_cell_impl._zero_state_tensors怎麽用?Python rnn_cell_impl._zero_state_tensors使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.rnn_cell_impl的用法示例。


在下文中一共展示了rnn_cell_impl._zero_state_tensors方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: initial_alignments

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def initial_alignments(self, batch_size, dtype):
    """Creates the initial alignment values for the `AttentionWrapper` class.

    This is important for AttentionMechanisms that use the previous alignment
    to calculate the alignment at the next time step (e.g. monotonic attention).

    The default behavior is to return a tensor of all zeros.

    Args:
      batch_size: `int32` scalar, the batch_size.
      dtype: The `dtype`.

    Returns:
      A `dtype` tensor shaped `[batch_size, alignments_size]`
      (`alignments_size` is the values' `max_time`).
    """
    max_time = self._alignments_size
    return _zero_state_tensors(max_time, batch_size, dtype) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:attention_wrapper.py

示例2: initial_alignments

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def initial_alignments(self, batch_size, dtype):
        """Creates the initial alignment values for the `AttentionWrapper` class.

        This is important for AttentionMechanisms that use the previous alignment
        to calculate the alignment at the next time step (e.g. monotonic attention).

        The default behavior is to return a tensor of all zeros.

        Args:
          batch_size: `int32` scalar, the batch_size.
          dtype: The `dtype`.

        Returns:
          A `dtype` tensor shaped `[batch_size, alignments_size]`
          (`alignments_size` is the values' `max_time`).
        """
        max_time = self._alignments_size
        return _zero_state_tensors(max_time, batch_size, dtype) 
開發者ID:HareeshBahuleyan,項目名稱:tf-var-attention,代碼行數:20,代碼來源:attention_wrapper.py

示例3: _create

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def _create(self, encoder_output, decoder_state_size, **kwargs):
        """ Creates decoder's initial RNN states according to
        `decoder_state_size`.

        If `decoder_state_size` is int/LSTMStateTuple(int, int), return Tensor
        with shape [batch_size, int] or LSTMStateTuple([batch_size, int], [batch_size, int]).
        If `decoder_state_size` is a tuple of int/LSTMStateTupe, return a tuple
        whose elements' structure match the `decoder_state_size` respectively.
        Args:
            encoder_output: An instance of `collections.namedtuple`
              from `Encoder.encode()`.
            decoder_state_size: RNN decoder state size.
            **kwargs:

        Returns: The decoder states with the structure determined
          by `decoder_state_size`.
        """
        batch_size = tf.shape(encoder_output.attention_length)[0]
        return rnn_cell_impl._zero_state_tensors(
            decoder_state_size, batch_size, tf.float32) 
開發者ID:zhaocq-nlp,項目名稱:NJUNMT-tf,代碼行數:22,代碼來源:bridges.py

示例4: initial_alignments

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def initial_alignments(self, batch_size, dtype):
        '''Returns all the alignment saturated in first block'''
        max_time = self._alignments_size
        alignments = _zero_state_tensors(max_time - 1, batch_size, dtype)
        return tf.concat([tf.fill([batch_size, 1], 1.0), alignments], 1) 
開發者ID:vagrawal,項目名稱:deepsphinx,代碼行數:7,代碼來源:attention.py

示例5: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          [check_ops.assert_equal(batch_size,
                                  self._attention_mechanism.batch_size,
                                  message=error_message)]):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      if self._alignment_history:
        alignment_history = tensor_array_ops.TensorArray(
            dtype=dtype, size=0, dynamic_size=True)
      else:
        alignment_history = ()
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_size, batch_size,
                                        dtype),
          alignments=self._attention_mechanism.initial_alignments(
              batch_size, dtype),
          alignment_history=alignment_history) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:36,代碼來源:attention_wrapper.py

示例6: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
		"""Return an initial (zero) state tuple for this `AttentionWrapper`.
		
		Args:
		  batch_size: `0D` integer tensor: the batch size.
		  dtype: The internal state data type.
		Returns:
		  An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
		  possibly, empty `TensorArray` objects.
		Raises:
		  ValueError: (or, possibly at runtime, InvalidArgument), if
			`batch_size` does not match the output size of the encoder passed
			to the wrapper object at initialization time.
		"""
		with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
			cell_state = self._cell._cell.zero_state(batch_size, dtype)
			error_message = (
				"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
				"Non-matching batch sizes between the memory "
				"(encoder output) and the requested batch size.")
			with ops.control_dependencies(
				self._batch_size_checks(batch_size, error_message)):
				cell_state = nest.map_structure(
					lambda s: array_ops.identity(s, name="checked_cell_state"),
					cell_state)
			return TacotronDecoderCellState(
				cell_state=cell_state,
				time=array_ops.zeros([], dtype=tf.int32),
				attention=_zero_state_tensors(self._attention_layer_size, batch_size,
				  dtype),
				alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
				alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
				dynamic_size=True),
				finished=tf.reshape(tf.tile([0.0], [batch_size]), [-1, 1])) 
開發者ID:rishikksh20,項目名稱:vae_tacotron2,代碼行數:36,代碼來源:Architecture_wrappers.py

示例7: _create_zero_outputs

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def _create_zero_outputs(size, dtype, batch_size):
  """Create a zero outputs Tensor structure."""
  def _create(s, d):
    return _zero_state_tensors(s, batch_size, d)

  return tf.contrib.framework.nest.map_structure(_create, size, dtype) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:8,代碼來源:decoder.py

示例8: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
        with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
            if self._initial_cell_state is not None:
                cell_state = self._initial_cell_state
            else:
                cell_state = self._cell.zero_state(batch_size, dtype)
            error_message = (
                "zero_state of AttentionWrapper %s: " % self._base_name +
                "Non-matching batch sizes between the memory "
                "(encoder output) and the requested batch size.")
            with tf.control_dependencies(
                [tf.assert_equal(batch_size,
                    self._attention_mechanism.batch_size,
                    message=error_message)]):
                cell_state = nest.map_structure(
                    lambda s: tf.identity(s, name="checked_cell_state"),
                    cell_state)
            alignment_history = ()

            _zero_state_tensors = rnn_cell_impl._zero_state_tensors
            return AttentionWrapperState(
                cell_state=cell_state,
                time=tf.zeros([], dtype=tf.int32),
                attention=_zero_state_tensors(self._attention_size, batch_size,
                dtype),
                alignments=self._attention_mechanism.initial_alignments(
                    batch_size, dtype),
                alignment_history=alignment_history) 
開發者ID:mckinziebrandon,項目名稱:DeepChatModels,代碼行數:30,代碼來源:_rnn.py

示例9: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
    """Return an initial (zero) state tuple for this `AttentionWrapper`.

    Args:
      batch_size: `0D` integer tensor: the batch size.
      dtype: The internal state data type.
    Returns:
      An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
      possibly, empty `TensorArray` objects.
    Raises:
      ValueError: (or, possibly at runtime, InvalidArgument), if
      `batch_size` does not match the output size of the encoder passed
      to the wrapper object at initialization time.
    """
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
        "When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
        "Non-matching batch sizes between the memory "
        "(encoder output) and the requested batch size.")
      with ops.control_dependencies(
        self._batch_size_checks(batch_size, error_message)):
        cell_state = nest.map_structure(
          lambda s: array_ops.identity(s, name="checked_cell_state"),
          cell_state)
      return TacotronDecoderCellState(
        cell_state=cell_state,
        time=array_ops.zeros([], dtype=tf.int32),
        attention=rnn_cell_impl._zero_state_tensors(self._attention_layer_size, batch_size, dtype),
        alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
        alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
        dynamic_size=True)) 
開發者ID:youssefsharief,項目名稱:arabic-tacotron-tts,代碼行數:34,代碼來源:rnn_wrappers.py

示例10: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
		"""Return an initial (zero) state tuple for this `AttentionWrapper`.

		Args:
		  batch_size: `0D` integer tensor: the batch size.
		  dtype: The internal state data type.
		Returns:
		  An `TacotronDecoderCellState` tuple containing zeroed out tensors and,
		  possibly, empty `TensorArray` objects.
		Raises:
		  ValueError: (or, possibly at runtime, InvalidArgument), if
			`batch_size` does not match the output size of the encoder passed
			to the wrapper object at initialization time.
		"""
		with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
			cell_state = self._cell._cell.zero_state(batch_size, dtype)
			error_message = (
				"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
				"Non-matching batch sizes between the memory "
				"(encoder output) and the requested batch size.")
			with ops.control_dependencies(
				self._batch_size_checks(batch_size, error_message)):
				cell_state = nest.map_structure(
					lambda s: array_ops.identity(s, name="checked_cell_state"),
					cell_state)
			return TacotronDecoderCellState(
				cell_state=cell_state,
				time=array_ops.zeros([], dtype=tf.int32),
				attention=_zero_state_tensors(self._attention_layer_size, batch_size,
				  dtype),
				alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
				alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,
				dynamic_size=True),
				max_attentions=tf.zeros((batch_size, ), dtype=tf.int32)) 
開發者ID:Rayhane-mamah,項目名稱:Tacotron-2,代碼行數:36,代碼來源:Architecture_wrappers.py

示例11: initial_alignments

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def initial_alignments(self, batch_size, dtype):
        max_time = self._alignments_size
        return _zero_state_tensors(max_time, batch_size, dtype) 
開發者ID:hccho2,項目名稱:Tacotron-Wavenet-Vocoder-Korean,代碼行數:5,代碼來源:rnn_wrappers.py

示例12: initial_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def initial_state(self, batch_size, dtype):
        state_size_ = self.state_size
        return _zero_state_tensors(state_size_, batch_size, dtype) 
開發者ID:hccho2,項目名稱:Tacotron-Wavenet-Vocoder-Korean,代碼行數:5,代碼來源:rnn_wrappers.py

示例13: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
        with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
            if self._initial_cell_state is not None:
                cell_state = self._initial_cell_state
            else:
                cell_state = self._cell.zero_state(batch_size, dtype)
            error_message = (
                    "When calling zero_state of AttentionWrapper %s: " % self._base_name +
                    "Non-matching batch sizes between the memory "
                    "(encoder output) and the requested batch size.  Are you using "
                    "the BeamSearchDecoder?  If so, make sure your encoder output has "
                    "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
                    "the batch_size= argument passed to zero_state is "
                    "batch_size * beam_width.")
            with ops.control_dependencies(
                    self._batch_size_checks(batch_size, error_message)):
                cell_state = nest.map_structure(
                        lambda s: array_ops.identity(s, name="checked_cell_state"),
                        cell_state)

            state = PointerWrapperState(
                cell_state=cell_state,
                time=array_ops.zeros([], dtype=dtypes.int32),
                attention=_zero_state_tensors(self._attention_layer_size,
                    batch_size, dtype),
                alignment_history=self._item_or_tuple(
                    tensor_array_ops.TensorArray(dtype=dtype, size=0,
                    dynamic_size=True)
                    if self._alignment_history else ()
                        for _ in self._attention_mechanisms),
                p_gen_history=self._item_or_tuple(
                    tensor_array_ops.TensorArray(dtype=dtype, size=0,
                        dynamic_size=True) for _ in self._attention_mechanisms),
                vocab_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True),
                copy_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True),
                final_dist_history=tensor_array_ops.TensorArray(
                    dtype=tf.float32, size=0, dynamic_size=True))

            return state 
開發者ID:ZhangShiyue,項目名稱:QGforQA,代碼行數:43,代碼來源:hierarchical_ptr_wrapper.py

示例14: zero_state

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          self._batch_size_checks(batch_size, error_message)):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_layer_size, batch_size,
                                        dtype),
          alignments=self._item_or_tuple(
              attention_mechanism.initial_alignments(batch_size, dtype)
              for attention_mechanism in self._attention_mechanisms),
          alignment_history=self._item_or_tuple(
              tensor_array_ops.TensorArray(dtype=dtype, size=0,
                                           dynamic_size=True)
              if self._alignment_history else ()
              for _ in self._attention_mechanisms)) 
開發者ID:ZhangShiyue,項目名稱:QGforQA,代碼行數:34,代碼來源:attention_wrapper.py

示例15: _create_zero_outputs

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors [as 別名]
def _create_zero_outputs(size, dtype, batch_size):
    """Create a zero outputs Tensor structure."""

    def _create(s, d):
        return _zero_state_tensors(s, batch_size, d)

    return nest.map_structure(_create, size, dtype) 
開發者ID:microsoft,項目名稱:icecaps,代碼行數:9,代碼來源:dynamic_decoder_custom.py


注:本文中的tensorflow.python.ops.rnn_cell_impl._zero_state_tensors方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。