本文整理汇总了Python中tensorflow.models.rnn.rnn_cell.MultiRNNCell方法的典型用法代码示例。如果您正苦于以下问题:Python rnn_cell.MultiRNNCell方法的具体用法?Python rnn_cell.MultiRNNCell怎么用?Python rnn_cell.MultiRNNCell使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.models.rnn.rnn_cell
的用法示例。
在下文中一共展示了rnn_cell.MultiRNNCell方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_lstm_inner
# 需要导入模块: from tensorflow.models.rnn import rnn_cell [as 别名]
# 或者: from tensorflow.models.rnn.rnn_cell import MultiRNNCell [as 别名]
def build_lstm_inner(H, lstm_input):
'''
build lstm decoder
'''
lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0)
if H['num_lstm_layers'] > 1:
lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'])
else:
lstm = lstm_cell
batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
state = tf.zeros([batch_size, lstm.state_size])
outputs = []
with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
for time_step in range(H['rnn_len']):
if time_step > 0: tf.get_variable_scope().reuse_variables()
output, state = lstm(lstm_input, state)
outputs.append(output)
return outputs
示例2: __init__
# 需要导入模块: from tensorflow.models.rnn import rnn_cell [as 别名]
# 或者: from tensorflow.models.rnn.rnn_cell import MultiRNNCell [as 别名]
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
self._input_data = tf.placeholder(tf.float32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.float32, [batch_size, num_steps])
lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=config.keep_prob)
cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
iw = tf.get_variable("input_w", [1, size])
ib = tf.get_variable("input_b", [size])
inputs = [tf.nn.xw_plus_b(i_, iw, ib) for i_ in tf.split(1, num_steps, self._input_data)]
if is_training and config.keep_prob < 1:
inputs = [tf.nn.dropout(input_, config.keep_prob) for input_ in inputs]
outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)
rnn_output = tf.reshape(tf.concat(1, outputs), [-1, size])
self._output = output = tf.nn.xw_plus_b(rnn_output,
tf.get_variable("out_w", [size, 1]),
tf.get_variable("out_b", [1]))
self._cost = cost = tf.reduce_mean(tf.square(output - tf.reshape(self._targets, [-1])))
self._final_state = states[-1]
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
#optimizer = tf.train.GradientDescentOptimizer(self.lr)
optimizer = tf.train.AdamOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
示例3: __init__
# 需要导入模块: from tensorflow.models.rnn import rnn_cell [as 别名]
# 或者: from tensorflow.models.rnn.rnn_cell import MultiRNNCell [as 别名]
def __init__(self, rnn_size, rnn_layer, batch_size, input_embedding_size, dim_image, dim_hidden, max_words_q, vocabulary_size, drop_out_rate):
self.rnn_size = rnn_size
self.rnn_layer = rnn_layer
self.batch_size = batch_size
self.input_embedding_size = input_embedding_size
self.dim_image = dim_image
self.dim_hidden = dim_hidden
self.max_words_q = max_words_q
self.vocabulary_size = vocabulary_size
self.drop_out_rate = drop_out_rate
# question-embedding
self.embed_ques_W = tf.Variable(tf.random_uniform([self.vocabulary_size, self.input_embedding_size], -0.08, 0.08), name='embed_ques_W')
# encoder: RNN body
self.lstm_1 = rnn_cell.LSTMCell(rnn_size, input_embedding_size, use_peepholes=True)
self.lstm_dropout_1 = rnn_cell.DropoutWrapper(self.lstm_1, output_keep_prob = 1 - self.drop_out_rate)
self.lstm_2 = rnn_cell.LSTMCell(rnn_size, rnn_size, use_peepholes=True)
self.lstm_dropout_2 = rnn_cell.DropoutWrapper(self.lstm_2, output_keep_prob = 1 - self.drop_out_rate)
self.stacked_lstm = rnn_cell.MultiRNNCell([self.lstm_dropout_1, self.lstm_dropout_2])
# state-embedding
self.embed_state_W = tf.Variable(tf.random_uniform([2*rnn_size*rnn_layer, self.dim_hidden], -0.08,0.08),name='embed_state_W')
self.embed_state_b = tf.Variable(tf.random_uniform([self.dim_hidden], -0.08, 0.08), name='embed_state_b')
# image-embedding
self.embed_image_W = tf.Variable(tf.random_uniform([dim_image, self.dim_hidden], -0.08, 0.08), name='embed_image_W')
self.embed_image_b = tf.Variable(tf.random_uniform([dim_hidden], -0.08, 0.08), name='embed_image_b')
# score-embedding
self.embed_scor_W = tf.Variable(tf.random_uniform([dim_hidden, num_output], -0.08, 0.08), name='embed_scor_W')
self.embed_scor_b = tf.Variable(tf.random_uniform([num_output], -0.08, 0.08), name='embed_scor_b')
示例4: __init__
# 需要导入模块: from tensorflow.models.rnn import rnn_cell [as 别名]
# 或者: from tensorflow.models.rnn.rnn_cell import MultiRNNCell [as 别名]
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
else:
raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(prev, _):
prev = tf.matmul(prev, softmax_w) + softmax_b
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])],
args.vocab_size)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = last_state
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))