当前位置: 首页>>代码示例>>Python>>正文


Python rnn.rnn函数代码示例

本文整理汇总了Python中tensorflow.models.rnn.rnn.rnn函数的典型用法代码示例。如果您正苦于以下问题:Python rnn函数的具体用法?Python rnn怎么用?Python rnn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了rnn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testSharingWeightsWithReuse

  def testSharingWeightsWithReuse(self):
    num_units = 3
    input_size = 5
    batch_size = 2
    num_proj = 4
    with self.test_session(graph=tf.Graph()) as sess:
      initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
      inputs = 10 * [
          tf.placeholder(tf.float32, shape=(None, input_size))]
      cell = rnn_cell.LSTMCell(
          num_units, input_size, use_peepholes=True,
          num_proj=num_proj, initializer=initializer)

      with tf.variable_scope("share_scope"):
        outputs0, _ = rnn.rnn(cell, inputs, dtype=tf.float32)
      with tf.variable_scope("share_scope", reuse=True):
        outputs1, _ = rnn.rnn(cell, inputs, dtype=tf.float32)
      with tf.variable_scope("diff_scope"):
        outputs2, _ = rnn.rnn(cell, inputs, dtype=tf.float32)

      tf.initialize_all_variables().run()
      input_value = np.random.randn(batch_size, input_size)
      output_values = sess.run(
          outputs0 + outputs1 + outputs2, feed_dict={inputs[0]: input_value})
      outputs0_values = output_values[:10]
      outputs1_values = output_values[10:20]
      outputs2_values = output_values[20:]
      self.assertEqual(len(outputs0_values), len(outputs1_values))
      self.assertEqual(len(outputs0_values), len(outputs2_values))
      for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
        # Same weights used by both RNNs so outputs should be the same.
        self.assertAllEqual(o1, o2)
        # Different weights used so outputs should be different.
        self.assertTrue(np.linalg.norm(o1-o3) > 1e-6)
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:34,代码来源:rnn_test.py

示例2: testDropout

  def testDropout(self):
    cell = Plus1RNNCell()
    full_dropout_cell = rnn_cell.DropoutWrapper(
        cell, input_keep_prob=1e-12, seed=0)
    batch_size = 2
    inputs = [tf.placeholder(tf.float32, shape=(batch_size, 5))] * 10
    with tf.variable_scope("share_scope"):
      outputs, states = rnn.rnn(cell, inputs, dtype=tf.float32)
    with tf.variable_scope("drop_scope"):
      dropped_outputs, _ = rnn.rnn(full_dropout_cell, inputs, dtype=tf.float32)
    self.assertEqual(len(outputs), len(inputs))
    for out, inp in zip(outputs, inputs):
      self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
      self.assertEqual(out.dtype, inp.dtype)

    with self.test_session(use_gpu=False) as sess:
      input_value = np.random.randn(batch_size, 5)
      values = sess.run(outputs + [states[-1]],
                        feed_dict={inputs[0]: input_value})
      full_dropout_values = sess.run(dropped_outputs,
                                     feed_dict={inputs[0]: input_value})

      for v in values[:-1]:
        self.assertAllClose(v, input_value + 1.0)
      for d_v in full_dropout_values[:-1]:  # Add 1.0 to dropped_out (all zeros)
        self.assertAllClose(d_v, np.ones_like(input_value))
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:26,代码来源:rnn_test.py

示例3: testSharingWeightsWithDifferentNamescope

  def testSharingWeightsWithDifferentNamescope(self):
    num_units = 3
    input_size = 5
    batch_size = 2
    num_proj = 4
    with self.test_session(graph=tf.Graph()) as sess:
      initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
      inputs = 10 * [
          tf.placeholder(tf.float32, shape=(None, input_size))]
      cell = rnn_cell.LSTMCell(
          num_units, input_size, use_peepholes=True,
          num_proj=num_proj, initializer=initializer)

      with tf.name_scope("scope0"):
        with tf.variable_scope("share_scope"):
          outputs0, _ = rnn.rnn(cell, inputs, dtype=tf.float32)
      with tf.name_scope("scope1"):
        with tf.variable_scope("share_scope", reuse=True):
          outputs1, _ = rnn.rnn(cell, inputs, dtype=tf.float32)

      tf.initialize_all_variables().run()
      input_value = np.random.randn(batch_size, input_size)
      output_values = sess.run(
          outputs0 + outputs1, feed_dict={inputs[0]: input_value})
      outputs0_values = output_values[:10]
      outputs1_values = output_values[10:]
      self.assertEqual(len(outputs0_values), len(outputs1_values))
      for out0, out1 in zip(outputs0_values, outputs1_values):
        self.assertAllEqual(out0, out1)
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:29,代码来源:rnn_test.py

示例4: rnn_estimator

 def rnn_estimator(X, y):
     """RNN estimator with target predictor function on top."""
     X = input_op_fn(X)
     if cell_type == 'rnn':
         cell_fn = rnn_cell.BasicRNNCell
     elif cell_type == 'gru':
         cell_fn = rnn_cell.GRUCell
     elif cell_type == 'lstm':
         cell_fn = rnn_cell.BasicLSTMCell
     else:
         raise ValueError("cell_type {} is not supported. ".format(cell_type))
     if bidirection:
         # forward direction cell
         rnn_fw_cell = rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
         # backward direction cell
         rnn_bw_cell = rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
         # pylint: disable=unexpected-keyword-arg, no-value-for-parameter
         encoding = rnn.bidirectional_rnn(rnn_fw_cell, rnn_bw_cell,
                                          sequence_length=sequence_length,
                                          initial_state=initial_state)
     else:
         cell = rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
         _, encoding = rnn.rnn(cell, X, dtype=tf.float32,
                               sequence_length=sequence_length,
                               initial_state=initial_state)
     return target_predictor_fn(encoding[-1], y)
开发者ID:Rmanso,项目名称:skflow,代码行数:26,代码来源:models.py

示例5: RNN

def RNN(_X, _istate, _weights, _biases):

    # input shape: (batch_size, n_steps, n_input)
    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size => (n_steps,batch_size,n_input)
    # Reshape to prepare input to hidden activation
    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input) (2D list with 28*128 vectors with 28 features each)
    # Linear activation
    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden'] # (n_steps*batch_size=128x28,n_hidden=128)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)

    #lstm_cell_drop = rnn_cell.DropoutWrapper(lstm_cell)

    #multi_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)

    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden) => step1 (batch_size=128,n_hidden=128)..step28 (batch_size=128,n_hidden=128)
    # It means that RNN receives list with element (batch_size,n_hidden) for each time step

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)
    # Output is list with element (batch_size,n_hidden) for each time step?
    #for output in outputs:
    #    print(output)
    #exit(0)

    # Linear activation
    # Get inner loop last output
    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
开发者ID:paintception,项目名称:ZeroAccuracySystems,代码行数:30,代码来源:rnn_mnist.py

示例6: build_lstm_model

 def build_lstm_model(self):
     # r = rnn_cell.LSTMCell(tf.split(0, self.batch_size, self.inputs), self.input_size,
     #                       initializer=tf.contrib.layers.xavier_initializer())
     r = rnn_cell.BasicLSTMCell(self.input_size)
     istate = r.zero_state(1, dtype=tf.float32)
     o, s = rnn.rnn(r, tf.split(0, self.batch_size, self.inputs), istate)
     return o[-1]
开发者ID:jramapuram,项目名称:dynamic_learning,代码行数:7,代码来源:dynamic_learning.py

示例7: myRNN

def myRNN(_x, _istate, _weights, _biases):
    ''' input shape: (batch_size, n_steps, n_input) '''
    _x = tf.transpose(_x, [1, 0, 2])  # permute n_steps and batch_size
    ''' _x: (n_steps,batch_size, n_input) '''
    ''' All first row in all batches are aggregate together
        [[all first rows (2d matrix)],
         [all second rows (2d matrix)]
         [all third rows (2d matrix)],
         ....
         ....
         [all 28-th rows (2d matrix)]]

         Take first 2d matrix as example
         [[first row of no.1 image (vector)],
          [first row of no.2 image (vector)],
          .....
          .....
          [first row of no.batch_size image (vector)]]
    '''
    ''' Reshape to prepare input to hidden activation '''
    _x = tf.reshape(_x, [-1, n_input]) # (n_steps*batch_size, n_input)
    ''' Linear activation '''
    _x = tf.matmul(_x, _weights['hidden']) + _biases['hidden'] # (n_steps*batch_size, n_hidden)
    ''' Define a lstm cell with tensorflow '''
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    ''' Split data because rnn cell needs a list of inputs for the RNN inner loop '''
    _x = tf.split(0, n_steps, _x) # n_steps * (batch_size, n_hidden)
    ''' Get lstm cell output '''
    outputs, states = rnn.rnn(lstm_cell, _x, initial_state=_istate)
    '''
    Linear activation
    Get inner loop last output
    '''
    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
开发者ID:kkboy123,项目名称:Tensorflow001,代码行数:34,代码来源:rnn_mnist_multi_input.py

示例8: RNN

def RNN(x, weights, biases, init_state):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])  #(n_steps , batch_size,  n_input)
    # Reshaping to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
    # This input shape is required by `rnn` function
    x = tf.split(0, n_steps, x)

    '''
    个人觉得上面的三行代码是最难理解的,具体的reshape 的demo可以看1_Introduction中的basic_op.
    最后转化成了每一副图像的第一行拿出来作为一个矩阵, 这样正好满足了[batch_size, cell.input_zise]的要求的格式,
    具体的逻辑处理在rnn.rnn函数里边
    '''

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, x, initial_state=init_state, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'], lstm_cell.state_size
开发者ID:Jangol,项目名称:TensorFlow-Examples-ZH,代码行数:26,代码来源:3_lstm.py

示例9: build_model

  def build_model(self):
    # Representation Generator
    self.inputs = tf.placeholder(tf.int32, [self.batch_size, self.seq_length])

    embed = tf.get_variable("embed", [self.vocab_size, self.embed_dim])
    word_embeds = tf.nn.embedding_lookup(embed, self.inputs)

    self.cell = rnn_cell.BasicLSTMCell(self.rnn_size)
    self.stacked_cell = rnn_cell.MultiRNNCell([self.cell] * self.layer_depth)

    outputs, _ = rnn.rnn(self.cell,
                         [tf.squeeze(embed_t) for embed_t in tf.split(1, self.seq_length, word_embeds)],
                         dtype=tf.float32)

    output_embed = tf.pack(outputs)
    mean_pool = tf.nn.relu(tf.reduce_mean(output_embed, 1))

    self.num_action = 4
    self.object_size = 4

    # Action scorer. no bias in paper
    self.pred_action = rnn_cell.linear(mean_pool, self.num_action, 0, "action")
    self.object_ = rnn_cell.linear(mean_pool, self.object_size, 0, "object")

    self.true_action = tf.placeholder(tf.int32, [self.batch_size, self.num_action])
开发者ID:stozpark,项目名称:text-based-game-rl-tensorflow,代码行数:25,代码来源:lstmdqn.py

示例10: __init__

  def __init__(self, vocabularySize, config_param):
    self.vocabularySize = vocabularySize
    self.config = config_param

    self._inputX = tf.placeholder(tf.int32, [self.config.batch_size, self.config.sequence_size], "InputsX")
    self._inputTargetsY = tf.placeholder(tf.int32, [self.config.batch_size, self.config.sequence_size], "InputTargetsY")


    #Converting Input in an Embedded form
    with tf.device("/cpu:0"): #Tells Tensorflow what GPU to use specifically
      embedding = tf.get_variable("embedding", [self.vocabularySize, self.config.embeddingSize])
      embeddingLookedUp = tf.nn.embedding_lookup(embedding, self._inputX)
      inputs = tf.split(1, self.config.sequence_size, embeddingLookedUp)
      inputTensorsAsList = [tf.squeeze(input_, [1]) for input_ in inputs]


    #Define Tensor RNN
    singleRNNCell = rnn_cell.BasicRNNCell(self.config.hidden_size)
    self.multilayerRNN =  rnn_cell.MultiRNNCell([singleRNNCell] * self.config.num_layers)
    self._initial_state = self.multilayerRNN.zero_state(self.config.batch_size, tf.float32)

    #Defining Logits
    hidden_layer_output, states = rnn.rnn(self.multilayerRNN, inputTensorsAsList, initial_state=self._initial_state)
    hidden_layer_output = tf.reshape(tf.concat(1, hidden_layer_output), [-1, self.config.hidden_size])
    self._logits = tf.nn.xw_plus_b(hidden_layer_output, tf.get_variable("softmax_w", [self.config.hidden_size, self.vocabularySize]), tf.get_variable("softmax_b", [self.vocabularySize]))
    self._predictionSoftmax = tf.nn.softmax(self._logits)

    #Define the loss
    loss = seq2seq.sequence_loss_by_example([self._logits], [tf.reshape(self._inputTargetsY, [-1])], [tf.ones([self.config.batch_size * self.config.sequence_size])], self.vocabularySize)
    self._cost = tf.div(tf.reduce_sum(loss), self.config.batch_size)

    self._final_state = states[-1]
开发者ID:jwjohnson314,项目名称:TrumpBSQuoteRNNGenerator,代码行数:32,代码来源:RNN_Model.py

示例11: RNN

def RNN(X, num_words_in_X, hidden_size, max_input_size):
  # Reshape `X` as a vector. -1 means "set this dimension automatically".
  X_as_vector = tf.reshape(X, [-1])

  # Create another vector containing zeroes to pad `X` to (MAX_INPUT_LENGTH * WORD_VECTOR_LENGTH) elements.
  zero_padding = tf.zeros([max_input_size * WORD_VECTOR_LENGTH] - tf.shape(X_as_vector), dtype=X.dtype)

  # Concatenate `X_as_vector` with the padding.
  X_padded_as_vector = tf.concat(0, [X_as_vector, zero_padding])

  # Reshape the padded vector to the desired shape.
  X_padded = tf.reshape(X_padded_as_vector, [max_input_size, WORD_VECTOR_LENGTH])

  # Split X into a list of tensors of length MAX_INPUT_LENGTH where each tensor is a 1xWORD_VECTOR_LENGTH vector
  # of the word vectors
  # TODO change input to be a list of tensors of length MAX_INPUT_LENGTH where each tensor is a BATCH_SIZExWORD_VECTOR_LENGTH vector
  X = tf.split(0, max_input_size, X_padded)

  print "Length X: {}".format(len(X))

  gru_cell = rnn_cell.GRUCell(num_units=hidden_size, input_size=WORD_VECTOR_LENGTH)

  output, state = rnn.rnn(gru_cell, X, sequence_length=(num_words_in_X), dtype=tf.float32)

  print "State: {}".format(state)

  return output, state, X_padded
开发者ID:pchase8818,项目名称:dynamic-memory-networks,代码行数:27,代码来源:rnn_baseline.py

示例12: build_model

    def build_model(self):
        video = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps, self.dim_image])
        video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])

        HLness = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
        HLness_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])

        video_flat = tf.reshape(video, [-1, self.dim_image])
        image_emb = tf.nn.xw_plus_b( video_flat, self.encode_image_W, self.encode_image_b) # (batch_size*n_lstm_steps, dim_hidden)
        image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])
        image_emb = tf.transpose(image_emb, [1,0,2]) # n x b x h

        state2 = tf.zeros([self.batch_size, self.lstm2.state_size])

	loss_HL = 0.0
	_X = tf.reshape(image_emb, [-1, self.dim_hidden]) # (n x b) x h
	_X = tf.split(0, self.n_lstm_steps, _X) # n x (b x h)
	[output2, state2] = rnn.rnn(self.lstm_HL_net,_X,dtype=tf.float32) # n x (b x h)
	output2 = tf.transpose(tf.pack(output2), [1,0,2]) # b x n x h
	onehot_labels = []
	logit_words = []
	indices = tf.expand_dims(tf.range(0, self.n_lstm_steps, 1), 1) # n x 1
	for ii in xrange(10):
		labels = tf.expand_dims(HLness[ii,:], 1) # n x 1
		concated = tf.concat(1, [indices, labels]) # n x 2
		onehot_labels = tf.sparse_to_dense(concated, tf.pack([self.n_lstm_steps, 2]), 1.0, 0.0) # n x 2
		logit_words = tf.nn.xw_plus_b(output2[ii,:,:], self.embed_HL_W, self.embed_HL_b) # n x 2
		cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_words, onehot_labels) # n x 1
		cross_entropy = tf.mul(cross_entropy, HLness_mask[ii,:]) # n x 1
		loss_HL += tf.reduce_sum(cross_entropy) # 1

	loss_HL = loss_HL / tf.reduce_sum(HLness_mask)
	loss = loss_HL
        return loss, video, video_mask, HLness, HLness_mask
开发者ID:KuoHaoZeng,项目名称:VH,代码行数:34,代码来源:HL.py

示例13: tied_rnn_seq2seq

def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                     loop_function=None, dtype=tf.float32, scope=None):
  """RNN sequence-to-sequence model with tied encoder and decoder parameters.

  This model first runs an RNN to encode encoder_inputs into a state vector, and
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell and share parameters.

  Args:
    encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    loop_function: if not None, this function will be applied to i-th output
      in order to generate i+1-th input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol), see rnn_decoder for details.
    dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".

  Returns:
    outputs: A list of the same length as decoder_inputs of 2D Tensors with
      shape [batch_size x cell.output_size] containing the generated outputs.
    states: The state of each decoder cell in each time-step. This is a list
      with length len(decoder_inputs) -- one item for each time-step.
      Each item is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with tf.variable_scope("combined_tied_rnn_seq2seq"):
    scope = scope or "tied_rnn_seq2seq"
    _, enc_states = rnn.rnn(
        cell, encoder_inputs, dtype=dtype, scope=scope)
    tf.get_variable_scope().reuse_variables()
    return rnn_decoder(decoder_inputs, enc_states[-1], cell,
                       loop_function=loop_function, scope=scope)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:32,代码来源:seq2seq.py

示例14: create_model

    def create_model(self):
        print "Setting up model",
        sys.stdout.flush()
        # placeholders for data + targets
        self._input_data = tf.placeholder(tf.int32, shape=(self.batch_size, self.num_steps))
        self._targets = tf.placeholder(tf.int32, [self.batch_size, self.num_steps])

        # set up lookup function
        self.embedding = tf.constant(self.saved_embedding,name="embedding")
        self.inputs = tf.nn.embedding_lookup(self.embedding, self._input_data)
        # lstm model
        self.lstm_cell = rnn_cell.BasicLSTMCell(self.lstm_size)
        self.cell = rnn_cell.MultiRNNCell([self.lstm_cell] * self.num_layers)


        self._initial_state = self.cell.zero_state(self.batch_size, tf.float32)

        from tensorflow.models.rnn import rnn
        self.inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, self.num_steps, self.inputs)]
        self.outputs, self.states = rnn.rnn(self.cell, self.inputs, initial_state=self._initial_state)

        self.output = tf.reshape(tf.concat(1, self.outputs), [-1, self.lstm_size])
        self.softmax_w = tf.get_variable("softmax_w", [self.lstm_size, self.vocab_size])
        self.softmax_b = tf.get_variable("softmax_b", [self.vocab_size])
        self.logits = tf.matmul(self.output, self.softmax_w) + self.softmax_b
        #print  "self.states.get_shape():",self.states.get_shape()
        #print  "tf.shape(self.states)",tf.shape(self.states)
        self._final_state = self.states
        self.saver = tf.train.Saver()
        
        #delete data to save memory if network is used for sampling only
        if self.only_for_sampling:
            del self.data
            
        print "done"
开发者ID:wohnjayne,项目名称:what-will-you-publish-next,代码行数:35,代码来源:lstm.py

示例15: basic_rnn_seq2seq

def basic_rnn_seq2seq(
    encoder_inputs, decoder_inputs, cell, dtype=tf.float32, scope=None):
  """Basic RNN sequence-to-sequence model.

  This model first runs an RNN to encode encoder_inputs into a state vector, and
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell type, but don't share parameters.

  Args:
    encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".

  Returns:
    outputs: A list of the same length as decoder_inputs of 2D Tensors with
      shape [batch_size x cell.output_size] containing the generated outputs.
    states: The state of each decoder cell in each time-step. This is a list
      with length len(decoder_inputs) -- one item for each time-step.
      Each item is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with tf.variable_scope(scope or "basic_rnn_seq2seq"):
    _, enc_states = rnn.rnn(cell, encoder_inputs, dtype=dtype)
    return rnn_decoder(decoder_inputs, enc_states[-1], cell)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:25,代码来源:seq2seq.py


注:本文中的tensorflow.models.rnn.rnn.rnn函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。