当前位置: 首页>>代码示例>>Python>>正文


Python rnn.rnn方法代码示例

本文整理汇总了Python中tensorflow.models.rnn.rnn.rnn方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.rnn方法的具体用法?Python rnn.rnn怎么用?Python rnn.rnn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.models.rnn.rnn的用法示例。


在下文中一共展示了rnn.rnn方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_rnn

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def create_rnn(config, x, scope='rnn'):
    with tf.variable_scope(scope):
        memory=config['rnn_size']
        cell = rnn_cell.BasicLSTMCell(memory)
        state = cell.zero_state(batch_size=config['batch_size'], dtype=tf.float32)
        x, state = rnn.rnn(cell, [tf.cast(x,tf.float32)], initial_state=state, dtype=tf.float32)
        x = x[-1]
        #w = tf.get_variable('w', [hc.get('rnn_size'),4])
        #b = tf.get_variable('b', [4])
        #x = tf.nn.xw_plus_b(x, w, b)
        x=tf.sign(x)
        return x, state

# Each step of the graph we have:
# x is [BATCH_SIZE, 4] where the data is an one hot binary vector of the form:
# [start_token end_token a b]
#
# y is [BATCH_SIZE, 4] is a binary vector of the chance each character is correct
# 
开发者ID:255BITS,项目名称:hyperchamber,代码行数:21,代码来源:rnn.py

示例2: rnn_model

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def rnn_model(X, y):
    """Recurrent neural network model to predict from sequence of words
    to a class."""
    # Convert indexes of words into embeddings.
    # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
    # maps word indexes of the sequence into [batch_size, sequence_length,
    # EMBEDDING_SIZE].
    word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
        embedding_size=EMBEDDING_SIZE, name='words')
    # Split into list of embedding per word, while removing doc length dim.
    # word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
    word_list = skflow.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, word_vectors)
    # Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
    cell = rnn_cell.GRUCell(EMBEDDING_SIZE)
    # Create an unrolled Recurrent Neural Networks to length of
    # MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
    _, encoding = rnn.rnn(cell, word_list, dtype=tf.float32)
    # Given encoding of RNN, take encoding of last step (e.g hidden size of the
    # neural network of last step) and pass it as features for logistic
    # regression over output classes.
    return skflow.models.logistic_regression(encoding, y) 
开发者ID:lixiaosi33,项目名称:-Classification-on-Chinese-Magazine-,代码行数:23,代码来源:text_classification.py

示例3: __init__

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def __init__(self, is_training, config):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        size = config.hidden_size

        self._input_data = tf.placeholder(tf.float32, [batch_size, num_steps])
        self._targets = tf.placeholder(tf.float32, [batch_size, num_steps])

        lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
        if is_training and config.keep_prob < 1:
            lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=config.keep_prob)
        cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)

        self._initial_state = cell.zero_state(batch_size, tf.float32)

        iw = tf.get_variable("input_w", [1, size])
        ib = tf.get_variable("input_b", [size])
        inputs = [tf.nn.xw_plus_b(i_, iw, ib) for i_ in tf.split(1, num_steps, self._input_data)]
        if is_training and config.keep_prob < 1:
            inputs = [tf.nn.dropout(input_, config.keep_prob) for input_ in inputs]

        outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)
        rnn_output = tf.reshape(tf.concat(1, outputs), [-1, size])

        self._output = output = tf.nn.xw_plus_b(rnn_output,
                                 tf.get_variable("out_w", [size, 1]),
                                 tf.get_variable("out_b", [1]))

        self._cost = cost = tf.reduce_mean(tf.square(output - tf.reshape(self._targets, [-1])))
        self._final_state = states[-1]

        if not is_training:
            return

        self._lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
        #optimizer = tf.train.GradientDescentOptimizer(self.lr)
        optimizer = tf.train.AdamOptimizer(self.lr)
        self._train_op = optimizer.apply_gradients(zip(grads, tvars)) 
开发者ID:eric574,项目名称:Stocks-LSTM,代码行数:42,代码来源:TrainStockLSTM.py

示例4: RNN

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def RNN(_X, _istate, _weights, _biases):

    # input shape: (batch_size, n_steps, n_input)
    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
    # Reshape to prepare input to hidden activation
    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
    # Linear activation
    # _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)

    # Linear activation
    # Get inner loop last output
    # return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
    return tf.matmul(outputs[-1], output_w) + output_b 
开发者ID:x75,项目名称:seqrnns,代码行数:23,代码来源:recurrent_network_regression.py

示例5: RNN

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def RNN(_X, _istate, _weights, _biases):

    # input shape: (batch_size, n_steps, n_input)
    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
    # Reshape to prepare input to hidden activation
    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
    # Linear activation
    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)

    # Linear activation
    # Get inner loop last output
    return tf.matmul(outputs[-1], _weights['out']) + _biases['out'], states[-1] 
开发者ID:x75,项目名称:seqrnns,代码行数:22,代码来源:recurrent_network_4a.py

示例6: RNN

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def RNN(_X, _istate, _weights, _biases):

    # input shape: (batch_size, n_steps, n_input)
    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
    # Reshape to prepare input to hidden activation
    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
    # Linear activation
    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)

    # Linear activation
    # Get inner loop last output
    return tf.matmul(outputs[-1], _weights['out']) + _biases['out'] 
开发者ID:x75,项目名称:seqrnns,代码行数:22,代码来源:recurrent_network_4.py

示例7: RNN

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def RNN(x, weights, biases, init_state):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])  #(n_steps , batch_size,  n_input)
    # Reshaping to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
    # This input shape is required by `rnn` function
    x = tf.split(0, n_steps, x)

    '''
    ?????????????????,???reshape ?demo???1_Introduction??basic_op.
    ????????????????????????, ???????[batch_size, cell.input_zise]??????,
    ????????rnn.rnn????
    '''

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, x, initial_state=init_state, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'], lstm_cell.state_size 
开发者ID:JinmingZhao,项目名称:TensorFlow-Examples-ZH,代码行数:28,代码来源:3_lstm.py

示例8: process_sentence_pair

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def process_sentence_pair(self, lsentence_raw, rsentence_raw, session, prev_state = None):
    """ TODO: this is mad inefficient esp. without symbolic
        compiling, should really batch this
        Input sentence is just string"""
    # convert sentence into word vector array
    lsentence = convert_sentence_to_glove_vectors(lsentence_raw, self.vocabulary, self.glove_word_vectors, self.word_vec_size)
    rsentence = convert_sentence_to_glove_vectors(rsentence_raw, self.vocabulary, self.glove_word_vectors, self.word_vec_size)

    # 5 x 300
    _left_inputs = tf.placeholder(tf.float32, [len(lsentence), self.config.word_vec_size])
    _right_inputs = tf.placeholder(tf.float32, [len(rsentence), self.config.word_vec_size])

    # _targets = tf.placeholder(tf.int32)

    # Apply dropout filter
    # if self.is_training and self.config.keep_prob < 1:
    #   left_inputs = [tf.nn.dropout(input_, self.config.keep_prob) for input_ in left_inputs]
    #   right_inputs = [tf.nn.dropout(input_, self.config.keep_prob) for input_ in right_inputs]

    linputs = [ tf.reshape(i, (1, self.config.word_vec_size)) for i in tf.split(0, len(lsentence), _left_inputs)]
    rinputs = [ tf.reshape(i, (1, self.config.word_vec_size)) for i in tf.split(0, len(rsentence), _right_inputs)]

    if prev_state is None:
      prev_state = self.left_lstm_cell.zero_state(1, tf.float32)

    with tf.variable_scope("LeftLSTM"):
      loutputs, rstates = rnn.rnn(self.left_lstm_cell, linputs, initial_state=prev_state, sequence_length=len(lsentence))
    with tf.variable_scope("RightLSTM"):
      routputs, rstates = rnn.rnn(self.right_lstm_cell, rinputs, initial_state=prev_state, sequence_length=len(lsentence))

    iop = tf.initialize_all_variables()
    session.run(iop)

    # TODO: the actual loss function and relatedness softmax layer
    louts = session.run(loutputs, feed_dict = {_left_inputs : lsentence, _right_inputs : rsentence })


    # outputs at each timestep of the sentence (i.e. each word)
    print(louts)
    print(len(louts))
    # print(routs) 
开发者ID:wolfhu,项目名称:LSTMRelatedness,代码行数:43,代码来源:relatedness.py

示例9: __call__

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def __call__(self, inputs, start_state, scope=None):
        """Run this RNN cell on inputs, starting from the given state.
        Args:
          inputs: list of 2D Tensors with shape [batch_size x self.input_size].
          start_state: 2D Tensor with shape [batch_size x self.state_size].
          scope: VariableScope for the created subgraph; defaults to class name.
        Returns:
          A pair containing:
          - Outputs: list of 2D Tensors with shape [batch_size x self.output_size]
          - States: list of 2D Tensors with shape [batch_size x self.state_size].
        """
        with vs.variable_scope(scope or "Encoder"):
            return rnn_encoder_factory(self.cell, inputs, start_state) 
开发者ID:sheffieldnlp,项目名称:stance-conditional,代码行数:15,代码来源:rnn.py

示例10: RNN

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def RNN(parameters, input, model, initial_state):
    # The model is:
    # 1. input
    # 2. linear layer
    # 3 - n. LSTM layers
    # n+1. linear layer
    # n+1. output
    input = tf.verify_tensor_all_finite(input, "Input not finite!")
    # input shape: (batch_size, n_steps, n_input)
    input = tf.transpose(input, [1, 0, 2])  # permute n_steps and batch_size
    input = tf.verify_tensor_all_finite(input, "Input not finite2!")
    
    # Reshape to prepare input to the linear layer
    input = tf.reshape(input, [-1, parameters['n_input']]) # (n_steps*batch_size, n_input)
    input = tf.verify_tensor_all_finite(input, "Input not finite3!")
    
    # 1. layer, linear activation for each batch and step.
    if (model.has_key('input_weights')):
        input = tf.matmul(input, model['input_weights']) + model['input_bias']
        # input = tf.nn.dropout(input, model['keep_prob'])

    # Split data because rnn cell needs a list of inputs for the RNN inner loop,
    # that is, a n_steps length list of tensors shaped: (batch_size, n_inputs)
    # This is not well documented, but check for yourself here: https://goo.gl/NzA5pX
    input = tf.split(0, parameters['n_steps'], input) # n_steps * (batch_size, :)

    initial_state = tf.verify_tensor_all_finite(initial_state, "Initial state not finite!")
    # Note: States is shaped: batch_size x cell.state_size
    outputs, states = rnn.rnn(model['rnn_cell'], input, initial_state=initial_state)
    #outputs[-1] = tf.Print(outputs[-1], [outputs[-1]], "LSTM Output: ", summarize = 100)
    lastOutput = tf.verify_tensor_all_finite(outputs[-1], "LSTM Outputs not finite!")
    #lastOutput = tf.nn.dropout(lastOutput, model['keep_prob'])
    # Only the last output is interesting for error back propagation and prediction.
    # Note that all batches are handled together here.

    raw_output = tf.matmul(lastOutput, model['output_weights']) + model['output_bias']
    raw_output = tf.verify_tensor_all_finite(raw_output, "Raw output not finite!")
    
    n_mixtures = parameters['n_mixtures']
    batch_size = parameters['batch_size']
    # And now, instead of just outputting the expected value, we output mixture distributions.
    # The number of mixtures is intuitively the number of possible actions the target can take.
    # The output is divided into triplets of n_mixtures mixture parameters for the 2 absolute position coordinates.
    output = softmax_mixtures(raw_output, n_mixtures, batch_size)
    #output = tf.Print(output, [output], "Output: ", summarize = 100)
    output = tf.verify_tensor_all_finite(output, "Final output not finite!")

    return (output, states)

# Returns the generative LSTM stack created based on the parameters.
# Processes one input at a time.
# Input shape is: 1 x (parameters['n_input'])
# State shape is: 1 x (parameters['n_input']) 
开发者ID:cybercom-finland,项目名称:location_tracking_ml,代码行数:55,代码来源:model.py

示例11: Lstm_training

# 需要导入模块: from tensorflow.models.rnn import rnn [as 别名]
# 或者: from tensorflow.models.rnn.rnn import rnn [as 别名]
def Lstm_training(self):
		# define placeholder
		data_in = tf.placeholder(tf.float32, shape=[None, n_step, \
			n_input], name='indat')
		data_out = tf.placeholder(tf.float32, shape=[None], name='outdat')
		istate = tf.placeholder("float", [None, 2*self.size])
			
		# calculate input for hidden layer
		data_in = tf.reshape(data_in, [-1, n_input])
		inputs = tf.matmul(data_in, self.iw) + self.ib

		# lstm cell	
		lstm_cell = rnn_cell.BasicLSTMCell(self.size, forget_bias=0.0) 
		
		# split input of hidden layer because rnn cell needs a list of inputs
		inputs = tf.split(0, n_step, inputs)
		
		# get lstm cell output
		outputs, states = rnn.rnn(lstm_cell, inputs, initial_state=istate)

		# output layer
		data_o = tf.matmul(outputs[-1], self.ow) + self.ob
		#data_o = tf.transpose(data_o)
		
		# loss function
		loss = tf.reduce_mean(tf.square(data_o - tf.reshape(data_out, [-1])))
		regularizer = tf.nn.l2_loss(self.iw) + tf.nn.l2_loss(self.ib)\
			+ tf.nn.l2_loss(self.ow) + tf.nn.l2_loss(self.ob)
		loss += 2e-3 * regularizer
		# optimization
		optimizer = tf.train.GradientDescentOptimizer(2e-3).minimize(loss)

		sess.run(tf.initialize_all_variables())
		for i in range(5000):
			print "number of iteration: %d"%i
			offset = i%60	
			batch_x = self.tr[batch_size*offset:batch_size*(offset+1),:]
			batch_y = self.expect[batch_size*offset+30:batch_size*(offset+1)+30]
			optimizer.run(feed_dict={data_in: batch_x, data_out: batch_y, istate: np.zeros((batch_size, 2*self.size))})
			if i%10 == 0:
				loss_pr = loss.eval(feed_dict={\
					data_in: batch_x, data_out: batch_y, istate:\
					np.zeros((batch_size, 2*self.size))})
				print "step %d, training accuracy %g"%(i, loss_pr) 
开发者ID:longfeng-li,项目名称:stock_price_trend_prediction,代码行数:46,代码来源:rnn.py


注:本文中的tensorflow.models.rnn.rnn.rnn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。