當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn.BasicLSTMCell方法代碼示例

本文整理匯總了Python中tensorflow.contrib.rnn.BasicLSTMCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.BasicLSTMCell方法的具體用法?Python rnn.BasicLSTMCell怎麽用?Python rnn.BasicLSTMCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.BasicLSTMCell方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases):

    # reshape to [1, n_input]
    x = tf.reshape(x, [-1, n_input])

    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    x = tf.split(x, n_input, 1)

    # 2-layer LSTM, each layer has n_hidden units.
    # Average Accuracy= 95.20% at 50k iter
    rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), rnn.BasicLSTMCell(n_hidden)])

    # 1-layer LSTM with n_hidden units but with lower accuracy.
    # Average Accuracy= 90.60% 50k iter
    # Uncomment line below to test but comment out the 2-layer rnn.MultiRNNCell above
    # rnn_cell = rnn.BasicLSTMCell(n_hidden)

    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)

    # there are n_input outputs but
    # we only want the last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:Hironsan,項目名稱:tensorflow-nlp-examples,代碼行數:26,代碼來源:word_rnn.py

示例2: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def __init__(self,
               num_units,
               forget_bias=1,
               state_is_tuple=True,
               output_is_tuple=True):
    def cell_fn(n):
      return rnn.BasicLSTMCell(num_units=n, forget_bias=forget_bias)
    super(Grid1BasicLSTMCell, self).__init__(
        num_units=num_units,
        num_dims=1,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        tied=False,
        cell_fn=cell_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:grid_rnn_cell.py

示例3: bidirectionnal_rnn

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def bidirectionnal_rnn(self, input_tensor, input_sequence_length):
    lstm_num_units = self.config.lstm_num_units
    print("rnn input tensor ===> ", input_tensor)
    with tf.variable_scope('lstm_layers'):
      fw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0, name='fw_cell_%d'%(nh)) for nh in [lstm_num_units] * 2]
      bw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0, name='bw_cell_%d'%(nh)) for nh in [lstm_num_units] * 2]

      stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
          cells_fw=fw_cell_list, 
          cells_bw=bw_cell_list, 
          inputs=input_tensor, 
          sequence_length=input_sequence_length, 
          dtype=tf.float32)
      hidden_num = lstm_num_units * 2
      rnn_reshaped = tf.nn.dropout(stack_lstm_layer, keep_prob=self.keep_prob)
      w = tf.get_variable(initializer=tf.truncated_normal([hidden_num, self.num_classes], stddev=0.02), name="w")
      w_t = tf.tile(tf.expand_dims(w, 0),[self.batch_size,1,1])
      logits = tf.matmul(rnn_reshaped, w_t, name="nn_logits")
      self.logits = tf.identity(tf.transpose(logits, (1, 0, 2)), name='logits')
      return logits 
開發者ID:rockyzhengwu,項目名稱:document-ocr,代碼行數:22,代碼來源:model.py

示例4: build_lstm

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def build_lstm(self):
        def build_cell():
            cell = rnn.BasicLSTMCell(self._hidden_size, forget_bias=1.0, state_is_tuple=True)
            cell = rnn.DropoutWrapper(cell, output_keep_prob=self._keep_prob)
            return cell
        mul_cell = rnn.MultiRNNCell([build_cell() for _ in range(self._num_layer)], 
                                    state_is_tuple=True)
        self._init_state = mul_cell.zero_state(self._num_seq, dtype=tf.float32)
        outputs, self._final_state = tf.nn.dynamic_rnn(mul_cell, self._inputs, 
                                                       initial_state=self._init_state)
        outputs = tf.reshape(outputs, [-1, self._hidden_size])
        W = tf.Variable(tf.truncated_normal([self._hidden_size, self._corpus.word_num],
                                            stddev=0.1, dtype=tf.float32))
        bais = tf.Variable(tf.zeros([1, self._corpus.word_num], 
                                    dtype=tf.float32), dtype=tf.float32)
        self._prediction = tf.nn.softmax(tf.matmul(outputs, W) + bais) 
開發者ID:yhswjtuILMARE,項目名稱:Machine-Learning-Study-Notes,代碼行數:18,代碼來源:model.py

示例5: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases):
        
    
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    with tf.name_scope('RNN'):
        x = tf.unstack(x, timesteps, 1)
        variable_summaries(x)
        # Define a lstm cell with tensorflow
        lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
        #variable_summaries(lstm_cell)
        # Get lstm cell output
        outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
        #variable_summaries(outputs)
        #variable_summaries(states)
    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:PacktPublishing,項目名稱:Artificial-Intelligence-By-Example,代碼行數:22,代碼來源:LSTM.py

示例6: LSTMs

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def LSTMs(x, weights, biases, timesteps , num_hidden):
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:18,代碼來源:tensorflow_LSTMs.py

示例7: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases, timesteps , num_hidden):
    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:18,代碼來源:tf_use_TF_LSTM.py

示例8: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases):
    timesteps = 28 # timesteps
    num_hidden = 128 # hidden layer num of features


    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:22,代碼來源:test_use_studyTF_LSTM_demo_me.1.py

示例9: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:19,代碼來源:use_studyTF_LSTM_demo_me.py

示例10: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicLSTMCell [as 別名]
def RNN(x, weights, biases):
    timesteps = 83 # timesteps
    num_hidden = 128 # hidden layer num of features


    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, timesteps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:22,代碼來源:train_TF_LSTM_demo_me.py


注:本文中的tensorflow.contrib.rnn.BasicLSTMCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。