当前位置: 首页>>代码示例>>Python>>正文


Python rnn.dynamic_rnn方法代码示例

本文整理汇总了Python中tensorflow.python.ops.rnn.dynamic_rnn方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.dynamic_rnn方法的具体用法?Python rnn.dynamic_rnn怎么用?Python rnn.dynamic_rnn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.rnn的用法示例。


在下文中一共展示了rnn.dynamic_rnn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testBuildAndTrain

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def testBuildAndTrain(self):
    inputs = tf.random_normal([TIME_STEPS, BATCH_SIZE, INPUT_SIZE])

    output, _ = rnn.dynamic_rnn(
        cell=self.module,
        inputs=inputs,
        initial_state=self.initial_state,
        time_major=True)

    targets = np.random.rand(TIME_STEPS, BATCH_SIZE, NUM_READS, WORD_SIZE)
    loss = tf.reduce_mean(tf.square(output - targets))
    train_op = tf.train.GradientDescentOptimizer(1).minimize(loss)
    init = tf.global_variables_initializer()

    with self.test_session():
      init.run()
      train_op.run() 
开发者ID:deepmind,项目名称:dnc,代码行数:19,代码来源:access_test.py

示例2: _build_model_op

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def _build_model_op(self):
        with tf.variable_scope("densely_connected_bi_rnn"):
            dense_bi_rnn = DenselyConnectedBiRNN(self.cfg["num_layers"], self.cfg["num_units_list"],
                                                 cell_type=self.cfg["cell_type"])
            context = dense_bi_rnn(self.word_emb, seq_len=self.seq_len)
            print("densely connected bi_rnn output shape: {}".format(context.get_shape().as_list()))

        with tf.variable_scope("attention"):
            p_context = tf.layers.dense(context, units=2 * self.cfg["num_units_list"][-1], use_bias=True,
                                        bias_initializer=tf.constant_initializer(0.0))
            context = tf.transpose(context, [1, 0, 2])
            p_context = tf.transpose(p_context, [1, 0, 2])
            attn_cell = AttentionCell(self.cfg["num_units_list"][-1], context, p_context)
            attn_outs, _ = dynamic_rnn(attn_cell, context[1:, :, :], sequence_length=self.seq_len - 1, dtype=tf.float32,
                                       time_major=True)
            attn_outs = tf.transpose(attn_outs, [1, 0, 2])
            print("attention output shape: {}".format(attn_outs.get_shape().as_list()))

        with tf.variable_scope("project"):
            self.logits = tf.layers.dense(attn_outs, units=self.tag_vocab_size, use_bias=True,
                                          bias_initializer=tf.constant_initializer(0.0))
            print("logits shape: {}".format(self.logits.get_shape().as_list())) 
开发者ID:IsaacChanghau,项目名称:neural_sequence_labeling,代码行数:24,代码来源:punct_attentive_model.py

示例3: testDebugTrainingDynamicRNNWorks

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def testDebugTrainingDynamicRNNWorks(self):
    with session.Session() as sess:
      input_size = 3
      state_size = 2
      time_steps = 4
      batch_size = 2

      input_values = np.random.randn(time_steps, batch_size, input_size)
      sequence_length = np.random.randint(0, time_steps, size=batch_size)
      concat_inputs = array_ops.placeholder(
          dtypes.float32, shape=(time_steps, batch_size, input_size))

      outputs_dynamic, _ = rnn.dynamic_rnn(
          _RNNCellForTest(input_size, state_size),
          inputs=concat_inputs,
          sequence_length=sequence_length,
          time_major=True,
          dtype=dtypes.float32)
      toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
      train_op = gradient_descent.GradientDescentOptimizer(
          learning_rate=0.1).minimize(toy_loss, name="train_op")

      sess.run(variables.global_variables_initializer())

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph_with_blacklists(
          run_options,
          sess.graph,
          node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
          debug_urls=self._debug_urls())
      # b/36870549: Nodes with these name patterns need to be excluded from
      # tfdbg in order to prevent MSAN warnings of uninitialized Tensors
      # under both file:// and grpc:// debug URL schemes.

      run_metadata = config_pb2.RunMetadata()
      sess.run(train_op, feed_dict={concat_inputs: input_values},
               options=run_options, run_metadata=run_metadata)

      debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:42,代码来源:session_debug_testlib.py

示例4: __call__

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.stack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unstack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unstack(inputs)
      outputs, state = rnn.static_rnn(
          self._cell,
          inputs,
          initial_state=initial_state,
          dtype=dtype,
          sequence_length=sequence_length,
          scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.stack(outputs)

    return outputs, state 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:fused_rnn_cell.py

示例5: crf_log_norm

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:crf.py

示例6: ndlstm_base_dynamic

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = math_ops.to_int64(
        array_ops.fill([batch_size], sequence_length))
    if reverse:
      inputs = array_ops.reverse_v2(inputs, [0])
    outputs, _ = rnn.dynamic_rnn(
        lstm_cell, inputs, sequence_lengths, state, time_major=True)
    if reverse:
      outputs = array_ops.reverse_v2(outputs, [0])
    return outputs 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:33,代码来源:lstm1d.py

示例7: ndlstm_base

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def ndlstm_base(inputs, noutput, scope=None, reverse=False, dynamic=True):
  """Implements a 1D LSTM, either forward or backward.

  This is a base case for multidimensional LSTM implementations, which
  tend to be used differently from sequence-to-sequence
  implementations.  For general 1D sequence to sequence
  transformations, you may want to consider another implementation
  from TF slim.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse
    dynamic: use dynamic_rnn

  Returns:
    Output sequence (length, batch_size, noutput)

  """
  # TODO(tmb) maybe add option for other LSTM implementations, like
  # slim.rnn.basic_lstm_cell
  if dynamic:
    return ndlstm_base_dynamic(inputs, noutput, scope=scope, reverse=reverse)
  else:
    return ndlstm_base_unrolled(inputs, noutput, scope=scope, reverse=reverse) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:lstm1d.py

示例8: __call__

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.stack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unstack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unstack(inputs)
      outputs, state = contrib_rnn.static_rnn(self._cell,
                                              inputs,
                                              initial_state=initial_state,
                                              dtype=dtype,
                                              sequence_length=sequence_length,
                                              scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.stack(outputs)

    return outputs, state 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:37,代码来源:fused_rnn_cell.py

示例9: ndlstm_base_dynamic

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = math_ops.to_int64(
        array_ops.fill([batch_size], sequence_length))
    if reverse:
      inputs = array_ops.reverse_v2(inputs, [0])
    outputs, _ = rnn.dynamic_rnn(
        lstm_cell, inputs, sequence_lengths, state, time_major=True)
    if reverse:
      outputs = array_ops.reverse_v2(outputs, [0])
    return outputs 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:33,代码来源:lstm1d.py

示例10: __call__

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def __call__(self,inputs,seq_len = None):
        if self.call_cnt ==0:
            self.cell = LSTMCell(self.output_dim,initializer = self.initializer(dtype=inputs.dtype))
        
        with tf.variable_scope(self.scope) as scope:
            #self.check_reuse(scope)
            #if self.call_cnt ==0:
                #self.cell = LSTMCell(self.output_dim,initializer = self.initializer)
                #cell = BasicLSTMCell(self.output_dim)
            print scope.reuse
            rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
            print scope.reuse
            return rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
            
            #return rnn.static_rnn(self.cell,inputs.as_list(),dtype = inputs.dtype) 
开发者ID:sanmusunrise,项目名称:NPNs,代码行数:17,代码来源:test.py

示例11: __call__

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def __call__(self,inputs,seq_len = None):
        if self.call_cnt ==0:
            self.cell = LSTMCell(self.output_dim,initializer = self.initializer(dtype=inputs.dtype))
        
        with tf.variable_scope(self.scope) as scope:
            self.check_reuse(scope)
            #if self.call_cnt ==0:
                #self.cell = LSTMCell(self.output_dim,initializer = self.initializer)
                #cell = BasicLSTMCell(self.output_dim)
            return rnn.dynamic_rnn(self.cell,inputs,seq_len,dtype = inputs.dtype)
            
            #return rnn.static_rnn(self.cell,inputs.as_list(),dtype = inputs.dtype) 
开发者ID:sanmusunrise,项目名称:NPNs,代码行数:14,代码来源:LSTMLayer.py

示例12: LSTM_Model

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def LSTM_Model():
        """
        :param x: inputs of size [T, batch_size, input_size]
        :param W: matrix of fully-connected output layer weights
        :param b: vector of fully-connected output layer biases
        """
        cell = rnn_cell.BasicLSTMCell(hidden_dim)
        outputs, states = rnn.dynamic_rnn(cell, x, dtype=tf.float32)
        num_examples = tf.shape(x)[0]
        W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1])
        out = tf.matmul(outputs, W_repeated) + b_out
        out = tf.squeeze(out)
        return out 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow-Second-Edition,代码行数:15,代码来源:TimeSeriesPredictor.py

示例13: crf_log_norm

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm

# 对数似然 
开发者ID:koala-ai,项目名称:tensorflow_nlp,代码行数:32,代码来源:crf.py

示例14: __call__

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.pack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unpack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unpack(inputs)
      outputs, state = rnn.rnn(self._cell,
                               inputs,
                               initial_state=initial_state,
                               dtype=dtype,
                               sequence_length=sequence_length,
                               scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.pack(outputs)

    return outputs, state 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:37,代码来源:fused_rnn_cell.py

示例15: _construct_rnn

# 需要导入模块: from tensorflow.python.ops import rnn [as 别名]
# 或者: from tensorflow.python.ops.rnn import dynamic_rnn [as 别名]
def _construct_rnn(self, initial_state, sequence_input):
    """Apply an RNN to `features`.

    The `features` dict must contain `self._inputs_key`, and the corresponding
    input should be a `Tensor` of shape `[batch_size, padded_length, k]`
    where `k` is the dimension of the input for each element of a sequence.

    `activations` has shape `[batch_size, sequence_length, n]` where `n` is
    `self._target_column.num_label_columns`. In the case of a multiclass
    classifier, `n` is the number of classes.

    `final_state` has shape determined by `self._cell` and its dtype must match
    `self._dtype`.

    Args:
      initial_state: the initial state to pass the the RNN. If `None`, the
        default starting state for `self._cell` is used.
      sequence_input: a `Tensor` with shape `[batch_size, padded_length, d]`
        that will be passed as input to the RNN.

    Returns:
      activations: the output of the RNN, projected to the appropriate number of
        dimensions.
      final_state: the final state output by the RNN.
    """
    with ops.name_scope('RNN'):
      rnn_outputs, final_state = rnn.dynamic_rnn(
          cell=self._cell,
          inputs=sequence_input,
          initial_state=initial_state,
          dtype=self._dtype,
          parallel_iterations=self._parallel_iterations,
          swap_memory=self._swap_memory,
          time_major=False)
      activations = layers.fully_connected(
          inputs=rnn_outputs,
          num_outputs=self._target_column.num_label_columns,
          activation_fn=None,
          trainable=True)
      return activations, final_state 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:42,代码来源:dynamic_rnn_estimator.py


注:本文中的tensorflow.python.ops.rnn.dynamic_rnn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。