本文整理匯總了Python中tensorflow.python.ops.rnn_cell.BasicLSTMCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell.BasicLSTMCell方法的具體用法?Python rnn_cell.BasicLSTMCell怎麽用?Python rnn_cell.BasicLSTMCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.rnn_cell
的用法示例。
在下文中一共展示了rnn_cell.BasicLSTMCell方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: baseline_forward
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def baseline_forward(self, X, size, n_class):
shape = X.get_shape()
# batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length
_X = tf.transpose(X, [1, 0, 2])
_X = tf.reshape(_X, [-1, int(shape[2])]) # (batch_size x sentence_length) x word_length
seq = tf.split(0, int(shape[1]), _X) # sentence_length x (batch_size x word_length)
with tf.name_scope("LSTM"):
lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0)
outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32)
with tf.name_scope("LSTM-Classifier"):
W = tf.Variable(tf.random_normal([size, n_class]), name="W")
b = tf.Variable(tf.random_normal([n_class]), name="b")
output = tf.matmul(outputs[-1], W) + b
return output
示例2: initialize_weights
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def initialize_weights(self):
cell_size = self.lw_cell_size
self.dense_weighting_Q = weight_variable('dense_weighting_Q', [cell_size + cell_size, 1])
self.dense_weighting_A = weight_variable('dense_weighting_A', [cell_size + cell_size, 1])
with tf.variable_scope('lstm_cell_weighting_Q_fw'):
self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_Q_bw'):
self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_fw'):
self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_bw'):
self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
示例3: initialize_weights
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def initialize_weights(self):
cell_size = self.lstm_pooling_cell_size
self.mul_Q = weight_variable('mul_Q', [cell_size * 2, cell_size * 2])
self.reduction_Q = weight_variable('reduction_Q', [cell_size * 2, 1])
self.mul_A = weight_variable('mul_A', [cell_size * 2, cell_size * 2])
self.reduction_A = weight_variable('reduction_A', [cell_size * 2, 1])
with tf.variable_scope('lstm_cell_weighting_Q_fw'):
self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_Q_bw'):
self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_fw'):
self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_bw'):
self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
示例4: ndlstm_base_unrolled
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using unrolling and the TensorFlow
LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
output_u = []
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm_cell(inputs_u[i], state)
output_u += [output]
if reverse:
output_u = list(reversed(output_u))
outputs = array_ops.stack(output_u)
return outputs
示例5: ndlstm_base_dynamic
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using dynamic_rnn and
the TensorFlow LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
# TODO(tmb) make batch size, sequence_length dynamic
# example: sequence_length = tf.shape(inputs)[0]
_, batch_size, _ = _shape(inputs)
lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
sequence_length = int(inputs.get_shape()[0])
sequence_lengths = math_ops.to_int64(
array_ops.fill([batch_size], sequence_length))
if reverse:
inputs = array_ops.reverse_v2(inputs, [0])
outputs, _ = rnn.dynamic_rnn(
lstm_cell, inputs, sequence_lengths, state, time_major=True)
if reverse:
outputs = array_ops.reverse_v2(outputs, [0])
return outputs
示例6: sequence_to_final
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def sequence_to_final(inputs, noutput, scope=None, name=None, reverse=False):
"""Run an LSTM across all steps and returns only the final state.
Args:
inputs: (length, batch_size, depth) tensor
noutput: size of output vector
scope: optional scope name
name: optional name for output tensor
reverse: run in reverse
Returns:
Batch of size (batch_size, noutput).
"""
with variable_scope.variable_scope(scope, "SequenceToFinal", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm.state_size])
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm(inputs_u[i], state)
outputs = array_ops.reshape(output, [batch_size, noutput], name=name)
return outputs
示例7: initialize_weights
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
with tf.variable_scope('lstm_cell_fw'):
self.lstm_cell_forward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_bw'):
self.lstm_cell_backward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
示例8: LSTM_Model
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def LSTM_Model():
"""
:param x: inputs of size [T, batch_size, input_size]
:param W: matrix of fully-connected output layer weights
:param b: vector of fully-connected output layer biases
"""
cell = rnn_cell.BasicLSTMCell(hidden_dim)
outputs, states = rnn.dynamic_rnn(cell, x, dtype=tf.float32)
num_examples = tf.shape(x)[0]
W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1])
out = tf.matmul(outputs, W_repeated) + b_out
out = tf.squeeze(out)
return out
開發者ID:PacktPublishing,項目名稱:Deep-Learning-with-TensorFlow-Second-Edition,代碼行數:15,代碼來源:TimeSeriesPredictor.py
示例9: __init__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def __init__(self, num_units, forget_bias=1):
super(Grid1BasicLSTMCell, self).__init__(
num_units=num_units, num_dims=1,
input_dims=0, output_dims=0, priority_dims=0, tied=False,
cell_fn=lambda n, i: rnn_cell.BasicLSTMCell(
num_units=n,
forget_bias=forget_bias, input_size=i,
state_is_tuple=False))
示例10: rnn_model
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def rnn_model(x, weights, biases):
"""Build a rnn model for image"""
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, n_input])
x = tf.split(0, n_steps, x)
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases
示例11: rnn_model
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def rnn_model(x, weights, biases):
"""RNN (LSTM or GRU) model for image"""
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, n_input])
x = tf.split(0, n_steps, x)
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases
示例12: build_graph
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def build_graph(self):
config = self.config
self.reader = utils.DataReader(seq_len=config.seq_length, batch_size=config.batch_size, data_filename=config.data_filename)
self.cell = rnn_cell.BasicLSTMCell(config.rnn_size, state_is_tuple=True)
self.input_data = tf.placeholder(tf.int32, [None, config.input_length])
self.targets = tf.placeholder(tf.int32, [None, 1])
self.initial_state = self.cell.zero_state(tf.shape(self.targets)[0], tf.float32)
with tf.variable_scope("input_embedding"):
embedding = tf.get_variable("embedding", [config.vocab_size, config.rnn_size])
inputs = tf.split(1, config.input_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input, [1]) for input in inputs]
with tf.variable_scope("send_to_rnn"):
state = self.initial_state
output = None
for i, input in enumerate(inputs):
if i > 0:
tf.get_variable_scope().reuse_variables()
output, state = self.cell(input, state)
with tf.variable_scope("softmax"):
softmax_w = tf.get_variable("softmax_w", [config.rnn_size, config.vocab_size])
softmax_b = tf.get_variable("softmax_b", [config.vocab_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
self.output = tf.cast(tf.reshape(tf.arg_max(self.probs, 1), [-1, 1]), tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.output, self.targets), tf.float32))
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([config.batch_size])],
config.vocab_size)
self.cost = tf.reduce_mean(loss)
self.final_state = state
# self.lr = tf.Variable(0.001, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
config.grad_clip)
optimizer = tf.train.AdamOptimizer()#self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.summary_accuracy = tf.scalar_summary('accuracy', self.accuracy)
tf.scalar_summary('cost', self.cost)
self.summary_all = tf.merge_all_summaries()
示例13: create_model
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import BasicLSTMCell [as 別名]
def create_model(max_word_id, is_test=False):
GO_VALUE = max_word_id + 1
network = tflearn.input_data(shape=[None, max_seq_len + max_seq_len], dtype=tf.int32, name="XY")
encoder_inputs = tf.slice(network, [0, 0], [-1, max_seq_len], name="enc_in")
encoder_inputs = tf.unpack(encoder_inputs, axis=1)
decoder_inputs = tf.slice(network, [0, max_seq_len], [-1, max_seq_len], name="dec_in")
decoder_inputs = tf.unpack(decoder_inputs, axis=1)
go_input = tf.mul( tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE )
decoder_inputs = [go_input] + decoder_inputs[: max_max_seq_len-1]
num_encoder_symbols = max_word_id + 1 # 從0起始
num_decoder_symbols = max_word_id + 2 # 包括GO
cell = rnn_cell.BasicLSTMCell(16*max_seq_len, state_is_tuple=True)
model_outputs, states = seq2seq.embedding_rnn_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols=num_encoder_symbols,
num_decoder_symbols=num_decoder_symbols,
embedding_size=max_word_id,
feed_previous=is_test)
network = tf.pack(model_outputs, axis=1)
targetY = tf.placeholder(shape=[None, max_seq_len], dtype=tf.float32, name="Y")
network = tflearn.regression(
network,
placeholder=targetY,
optimizer='adam',
learning_rate=learning_rate,
loss=sequence_loss,
metric=accuracy,
name="Y")
print "begin create DNN model"
model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=None)
print "create DNN model finish"
return model