当前位置: 首页>>代码示例>>Python>>正文


Python rnn.static_rnn函数代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.static_rnn函数的典型用法代码示例。如果您正苦于以下问题:Python static_rnn函数的具体用法?Python static_rnn怎么用?Python static_rnn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了static_rnn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _basic_rnn_seq2seq

 def _basic_rnn_seq2seq(encoder_inputs,
                       decoder_inputs,
                       cell,
                       feed_previous,
                       dtype=dtypes.float32,
                       scope=None):
   """Basic RNN sequence-to-sequence model.
   This model first runs an RNN to encode encoder_inputs into a state vector,
   then runs decoder, initialized with the last encoder state, on decoder_inputs.
   Encoder and decoder use the same RNN cell type, but don't share parameters.
   Args:
     encoder_inputs: A list of 2D Tensors [batch_size x input_size].
     decoder_inputs: A list of 2D Tensors [batch_size x input_size].
     feed_previous: Boolean; if True, only the first of decoder_inputs will be
       used (the "GO" symbol), all other inputs will be generated by the previous 
       decoder output using _loop_function below. If False, decoder_inputs are used 
       as given (the standard decoder case).
     dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
     scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
   Returns:
     A tuple of the form (outputs, state), where:
       outputs: A list of the same length as decoder_inputs of 2D Tensors with
         shape [batch_size x output_size] containing the generated outputs.
       state: The state of each decoder cell in the final time-step.
         It is a 2D Tensor of shape [batch_size x cell.state_size].
   """
   with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
     enc_cell = copy.deepcopy(cell)
     _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
     if feed_previous:
         return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)
     else:
         return _rnn_decoder(decoder_inputs, enc_state, cell)
开发者ID:zeyu-h,项目名称:Multivariate-Time-Series-forecast-using-seq2seq-in-TensorFlow,代码行数:33,代码来源:build_model_multi_variate.py

示例2: prune_model

def prune_model(model,batchsize = 50,ckpt="model_pruned"):
    weights = model.get_weights()
    W = weights[0]
    U = weights[1]
    bias = weights[2]
    W_out = weights[3]
    bias_out = weights[4]
    GRU = PrunableGRU(W,U,bias)
    Logits = PrunableLogits(W_out,bias_out)
    X = tf.placeholder("float", [40, batchsize, 2])
    Y = tf.placeholder("float", [None, W_out.shape[1]])
    x = tf.unstack(X,axis=0)
    outputs, states = static_rnn(GRU, x, dtype=tf.float32)
    logits = Logits(outputs[-1])
    prediction = tf.nn.softmax(logits)
    loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
    train_op = optimizer.minimize(loss_op)
    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    init = tf.global_variables_initializer()
    dataset = build_dataset()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        for i in range(1000):
            batch_x, batch_y = get_batch(dataset,batchsize=batchsize,batchtype="train")
            batch_x = np.swapaxes(batch_x,1,0)
            sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})
            if i%100: saver.save(sess,ckpt)
            print(loss)
开发者ID:arjo129,项目名称:uSpeech,代码行数:32,代码来源:prunableGRU.py

示例3: build

    def build(self, input_number, sequence_length, layers_number, units_number, output_number):
        self.x = tf.placeholder("float", [None, sequence_length, input_number])
        self.y = tf.placeholder("float", [None, output_number])
        self.sequence_length = sequence_length

        self.weights = {
            'out': tf.Variable(tf.random_normal([units_number, output_number]))
        }
        self.biases = {
            'out': tf.Variable(tf.random_normal([output_number]))
        }

        x = tf.transpose(self.x, [1, 0, 2])
        x = tf.reshape(x, [-1, input_number])
        x = tf.split(x, sequence_length, 0)

        lstm_layers = []
        for i in range(0, layers_number):
            lstm_layer = rnn.BasicLSTMCell(units_number)
            lstm_layers.append(lstm_layer)
        deep_lstm = rnn.MultiRNNCell(lstm_layers)

        self.outputs, states = rnn.static_rnn(deep_lstm, x, dtype=tf.float32)

        print "Build model with input_number: {}, sequence_length: {}, layers_number: {}, " \
              "units_number: {}, output_number: {}".format(input_number, sequence_length, layers_number,
                                                           units_number, output_number)

        self.save(input_number, sequence_length, layers_number, units_number, output_number)
开发者ID:Zumbalamambo,项目名称:Deep-Lyrics,代码行数:29,代码来源:Model.py

示例4: __init__

    def __init__(self, config, is_training=False):
        self.config = config
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.hidden_size = hidden_size = config.hidden_size
        self.num_layers = 1
        vocab_size = config.vocab_size
        self.max_grad_norm = config.max_grad_norm
        self.use_lstm = config.use_lstm

        # Placeholders for inputs.
        self.input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
        self.targets = tf.placeholder(tf.int32, [batch_size, num_steps])
        self.initial_state = array_ops.zeros(tf.stack([self.batch_size, self.num_steps]),
                 dtype=tf.float32).set_shape([None, self.num_steps])

        embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.hidden_size])

        # Set up ACT cell and inner rnn-type cell for use inside the ACT cell.
        with tf.variable_scope("rnn"):
            if self.use_lstm:
                inner_cell = BasicLSTMCell(self.config.hidden_size)
            else:
                inner_cell = GRUCell(self.config.hidden_size)

        with tf.variable_scope("ACT"):

            act = ACTCell(self.config.hidden_size, inner_cell, config.epsilon,
                          max_computation=config.max_computation, batch_size=self.batch_size)

        inputs = tf.nn.embedding_lookup(embedding, self.input_data)

        inputs = [tf.squeeze(single_input, [1]) for single_input in tf.split(inputs, self.config.num_steps, 1)]

        self.outputs, final_state = static_rnn(act, inputs, dtype = tf.float32)

        # Softmax to get probability distribution over vocab.
        output = tf.reshape(tf.concat(self.outputs, 1), [-1, hidden_size])
        softmax_w = tf.get_variable("softmax_w", [hidden_size, vocab_size])
        softmax_b = tf.get_variable("softmax_b", [vocab_size])
        self.logits = tf.matmul(output, softmax_w) + softmax_b   # dim (numsteps*batchsize, vocabsize)

        loss = sequence_loss_by_example(
                [self.logits],
                [tf.reshape(self.targets, [-1])],
                [tf.ones([batch_size * num_steps])],
                vocab_size)

        # Add up loss and retrieve batch-normalised ponder cost: sum N + sum Remainder.
        ponder_cost = act.calculate_ponder_cost(time_penalty=self.config.ponder_time_penalty)
        self.cost = (tf.reduce_sum(loss) / batch_size) + ponder_cost
        self.final_state = self.outputs[-1]

        if is_training:
            self.lr = tf.Variable(0.0, trainable=False)
            tvars = tf.trainable_variables()
            grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), self.max_grad_norm)
            optimizer = tf.train.AdamOptimizer(self.config.learning_rate)
            self.train_op = optimizer.apply_gradients(zip(grads, tvars))
开发者ID:DeNeutoy,项目名称:act-tensorflow,代码行数:59,代码来源:adaptive_computation_time.py

示例5: RNN

def RNN(x, weights, biases):
	x = tf.reshape(x, [-1, n_input])

	x = tf.split(x,n_input,1)

	rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden),rnn.BasicLSTMCell(n_hidden)])
	outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)

	return tf.matmul(outputs[-1], weights['out']) + biases['out']
开发者ID:PatrickCelentano,项目名称:MxM,代码行数:9,代码来源:lstm.py

示例6: _lstm_model

 def _lstm_model(X, y):
     stacked_lstm = rnn.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
     x_ = tf.unstack(X, axis=1, num=num_units)
     output, layers = rnn.static_rnn(stacked_lstm, x_, dtype=dtypes.float32)
     output = dnn_layers(output[-1], dense_layers)
     prediction, loss = tflearn.models.linear_regression(output, y)
     train_op = tf.contrib.layers.optimize_loss(
         loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
         learning_rate=learning_rate)
     return prediction, loss, train_op
开发者ID:zhaolei,项目名称:stoneLearn,代码行数:10,代码来源:lstm.py

示例7: rnn_estimator

 def rnn_estimator(x, y):
   """RNN estimator with target predictor function on top."""
   x = input_op_fn(x)
   if cell_type == 'rnn':
     cell_fn = contrib_rnn.BasicRNNCell
   elif cell_type == 'gru':
     cell_fn = contrib_rnn.GRUCell
   elif cell_type == 'lstm':
     cell_fn = functools.partial(
         contrib_rnn.BasicLSTMCell, state_is_tuple=False)
   else:
     raise ValueError('cell_type {} is not supported. '.format(cell_type))
   # TODO(ipolosukhin): state_is_tuple=False is deprecated
   if bidirectional:
     # forward direction cell
     fw_cell = cell_fn(rnn_size)
     bw_cell = cell_fn(rnn_size)
     # attach attention cells if specified
     if attn_length is not None:
       fw_cell = contrib_rnn.AttentionCellWrapper(
           fw_cell, attn_length=attn_length, attn_size=attn_size,
           attn_vec_size=attn_vec_size, state_is_tuple=False)
       bw_cell = contrib_rnn.AttentionCellWrapper(
           bw_cell, attn_length=attn_length, attn_size=attn_size,
           attn_vec_size=attn_vec_size, state_is_tuple=False)
     rnn_fw_cell = contrib_rnn.MultiRNNCell([fw_cell] * num_layers,
                                            state_is_tuple=False)
     # backward direction cell
     rnn_bw_cell = contrib_rnn.MultiRNNCell([bw_cell] * num_layers,
                                            state_is_tuple=False)
     # pylint: disable=unexpected-keyword-arg, no-value-for-parameter
     _, encoding = bidirectional_rnn(rnn_fw_cell,
                                     rnn_bw_cell,
                                     x,
                                     dtype=dtypes.float32,
                                     sequence_length=sequence_length,
                                     initial_state_fw=initial_state,
                                     initial_state_bw=initial_state)
   else:
     rnn_cell = cell_fn(rnn_size)
     if attn_length is not None:
       rnn_cell = contrib_rnn.AttentionCellWrapper(
           rnn_cell, attn_length=attn_length, attn_size=attn_size,
           attn_vec_size=attn_vec_size, state_is_tuple=False)
     cell = contrib_rnn.MultiRNNCell([rnn_cell] * num_layers,
                                     state_is_tuple=False)
     _, encoding = contrib_rnn.static_rnn(cell,
                                          x,
                                          dtype=dtypes.float32,
                                          sequence_length=sequence_length,
                                          initial_state=initial_state)
   return target_predictor_fn(encoding, y)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:52,代码来源:models.py

示例8: __init__

    def __init__(self, args, deterministic=False):
        self.args = args

        if args.model == 'rnn':
            cell_fn = rnn.BasicRNNCell
        elif args.model == 'gru':
            cell_fn = rnn.GRUCell
        elif args.model == 'lstm':
            cell_fn = rnn.BasicLSTMCell
        elif args.model == 'bn-lstm':
            cell_fn = BatchNormLSTMCell
        else:
            raise Exception('model type not supported: {}'.format(args.model))

        if args.model == 'bn-lstm':
            cell = cell_fn(args.rnn_size, self.is_training)
        else:
            cell = cell_fn(args.rnn_size)

        self.cell = cell = rnn.MultiRNNCell([cell] * args.num_layers)

        self.input_data = tf.placeholder(tf.int64, [None, args.seq_length])
        # self.targets = tf.placeholder(tf.int64, [None, args.seq_length])  # seq2seq model
        self.targets = tf.placeholder(tf.int64, [None, ])  # target is class label

        with tf.variable_scope('embeddingLayer'):
            with tf.device('/cpu:0'):
                W = tf.get_variable('W', [args.vocab_size, args.rnn_size])
                embedded = tf.nn.embedding_lookup(W, self.input_data)

                # shape: (batch_size, seq_length, cell.input_size) => (seq_length, batch_size, cell.input_size)
                inputs = tf.split(embedded, args.seq_length, 1)
                inputs = [tf.squeeze(input_, [1]) for input_ in inputs]

        outputs, last_state = rnn.static_rnn(self.cell, inputs, dtype=tf.float32, scope='rnnLayer')

        with tf.variable_scope('softmaxLayer'):
            softmax_w = tf.get_variable('w', [args.rnn_size, args.label_size])
            softmax_b = tf.get_variable('b', [args.label_size])
            logits = tf.matmul(outputs[-1], softmax_w) + softmax_b
            self.probs = tf.nn.softmax(logits)

        # self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.targets))  # Softmax loss
        self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.targets))  # Softmax loss
        self.final_state = last_state
        self.lr = tf.Variable(0.0, trainable=False)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.cost)  # Adam Optimizer

        self.correct_pred = tf.equal(tf.argmax(self.probs, 1), self.targets)
        self.correct_num = tf.reduce_sum(tf.cast(self.correct_pred, tf.float32))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
开发者ID:AlexYoung757,项目名称:RNN-Classification,代码行数:51,代码来源:model.py

示例9: recurrent_neural_network

def recurrent_neural_network(x):
    layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
             'biases':tf.Variable(tf.random_normal([n_classes]))}

    x = tf.transpose(x, [1, 0 ,2])
    x = tf.reshape(x, [-1, chunk_size])
    x = tf.split(x, n_chunks, 0)

    lstm_cell = rnn.BasicLSTMCell(rnn_size)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

    return output
开发者ID:hbdhj,项目名称:python,代码行数:14,代码来源:55.+RNN+Example+in+Tensorflow.py

示例10: simple_rnn

def simple_rnn(features, labels, mode):
    # 0. Reformat input shape to become a sequence
    x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)

    # 1. Configure the RNN
    lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0)
    outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)

    # Slice to keep only the last cell of the RNN
    outputs = outputs[-1]
    #print('last outputs={}'.format(outputs))

    # Output is result of linear activation of last layer of RNN
    weight = tf.get_variable("weight", initializer=tf.initializers.random_normal, 
			     shape=[LSTM_SIZE, N_OUTPUTS])
    bias = tf.get_variable("bias", initializer=tf.initializers.random_normal, 
			   shape=[N_OUTPUTS])
    predictions = tf.matmul(outputs, weight) + bias
    
    # 2. Loss function, training/eval ops
    if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
        loss = tf.losses.mean_squared_error(labels, predictions)
        train_op = tf.contrib.layers.optimize_loss(
            loss = loss,
            global_step = tf.train.get_global_step(),
            learning_rate = 0.01,
            optimizer = "SGD")
        eval_metric_ops = {
            "rmse": tf.metrics.root_mean_squared_error(labels, predictions)
        }
    else:
        loss = None
        train_op = None
        eval_metric_ops = None
  
    # 3. Create predictions
    predictions_dict = {"predicted": predictions}
    
    # 4. Create export outputs
    export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}

    # 4. Return EstimatorSpec
    return tf.estimator.EstimatorSpec(
        mode = mode,
        predictions = predictions_dict,
        loss = loss,
        train_op = train_op,
        eval_metric_ops = eval_metric_ops,
        export_outputs = export_outputs)
开发者ID:GoogleCloudPlatform,项目名称:training-data-analyst,代码行数:49,代码来源:model.py

示例11: generate_rnn_output

 def generate_rnn_output(self):
   """
   Generate RNN state outputs with word embeddings as inputs
   """
   with tf.variable_scope("generate_seq_output"):
     if self.bidirectional_rnn:
       embedding = tf.get_variable("embedding",
                                   [self.source_vocab_size,
                                    self.word_embedding_size])
       encoder_emb_inputs = list()
       encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                               for encoder_input in self.encoder_inputs]
       rnn_outputs = static_bidirectional_rnn(self.cell_fw,
                                              self.cell_bw, 
                                              encoder_emb_inputs, 
                                              sequence_length=self.sequence_length,
                                              dtype=tf.float32)
       encoder_outputs, encoder_state_fw, encoder_state_bw = rnn_outputs
       # with state_is_tuple = True, if num_layers > 1, 
       # here we simply use the state from last layer as the encoder state
       state_fw = encoder_state_fw[-1]
       state_bw = encoder_state_bw[-1]
       encoder_state = tf.concat([tf.concat(state_fw, 1),
                                  tf.concat(state_bw, 1)], 1)
       top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size \
                                 + self.cell_bw.output_size])
                     for e in encoder_outputs]
       attention_states = tf.concat(top_states, 1)
     else:
       embedding = tf.get_variable("embedding", 
                                   [self.source_vocab_size,
                                    self.word_embedding_size])
       encoder_emb_inputs = list()
       encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                             for encoder_input in self.encoder_inputs] 
       rnn_outputs = static_rnn(self.cell_fw,
                                encoder_emb_inputs,
                                sequence_length=self.sequence_length,
                                dtype=tf.float32)
       encoder_outputs, encoder_state = rnn_outputs
       # with state_is_tuple = True, if num_layers > 1, 
       # here we use the state from last layer as the encoder state
       state = encoder_state[-1]
       encoder_state = tf.concat(state, 1)
       top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size])
                     for e in encoder_outputs]
       attention_states = tf.concat(top_states, 1)
     return encoder_outputs, encoder_state, attention_states
开发者ID:HadoopIt,项目名称:rnn-nlu,代码行数:48,代码来源:multi_task_model.py

示例12: RNN

def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
开发者ID:ChillarAnand,项目名称:01,代码行数:17,代码来源:tf_lstm_mnist.py

示例13: recurrent_neural_network

def recurrent_neural_network(x):
	
    layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
			'biases':tf.Variable(tf.random_normal([n_classes]))}
	# transpose 這個 funtion 是對矩陣做 不同維度座標軸的轉換,這邊把一張圖片轉成以每列為單位的輸入
   
    x = tf.transpose(x, [1,0,2])
    x = tf.reshape(x, [-1, chunk_size])

    x = tf.split(axis=0, num_or_size_splits=n_chunks, value=x)
   
	# 定義要被 loop 的基本單元
    lstm_cell = BasicLSTMCell(rnn_size)
	# 選一個把 cell 串起來的 model
    outputs, states = static_rnn(lstm_cell, x, dtype= tf.float32)
	# 用一個 full connection layer 輸出預測
    output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
    return output
开发者ID:Xavier-Pan,项目名称:Hello-Github,代码行数:18,代码来源:untitled0.py

示例14: simple_rnn

def simple_rnn(features, targets, mode):
  # 0. Reformat input shape to become a sequence
  x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
  #print 'x={}'.format(x)
    
  # 1. configure the RNN
  lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
  outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

  # slice to keep only the last cell of the RNN
  outputs = outputs[-1]
  #print 'last outputs={}'.format(outputs)
  
  # output is result of linear activation of last layer of RNN
  weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
  bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
  predictions = tf.matmul(outputs, weight) + bias
    
  # 2. loss function, training/eval ops
  if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
     loss = tf.losses.mean_squared_error(targets, predictions)
     train_op = tf.contrib.layers.optimize_loss(
         loss=loss,
         global_step=tf.contrib.framework.get_global_step(),
         learning_rate=0.01,
         optimizer="SGD")
     eval_metric_ops = {
      "rmse": tf.metrics.root_mean_squared_error(targets, predictions)
     }
  else:
     loss = None
     train_op = None
     eval_metric_ops = None
  
  # 3. Create predictions
  predictions_dict = {"predicted": predictions}
  
  # 4. return ModelFnOps
  return tflearn.ModelFnOps(
      mode=mode,
      predictions=predictions_dict,
      loss=loss,
      train_op=train_op,
      eval_metric_ops=eval_metric_ops)
开发者ID:GoogleCloudPlatform,项目名称:training-data-analyst,代码行数:44,代码来源:model.py

示例15: _half_seq_len_vs_unroll_half_rnn_benchmark

def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
  (_, input_size) = inputs_list_t[0].get_shape().as_list()
  initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
  cell = contrib_rnn.LSTMCell(
      num_units=input_size,
      use_peepholes=True,
      initializer=initializer,
      state_is_tuple=False)
  outputs, final_state = contrib_rnn.static_rnn(
      cell,
      inputs_list_t,
      sequence_length=sequence_length,
      dtype=dtypes.float32)

  trainable_variables = ops_lib.get_collection(
      ops_lib.GraphKeys.TRAINABLE_VARIABLES)
  gradients = gradients_impl.gradients(outputs + [final_state],
                                       trainable_variables)

  return control_flow_ops.group(final_state, *(gradients + outputs))
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:20,代码来源:rnn_test.py


注:本文中的tensorflow.contrib.rnn.static_rnn函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。