本文整理匯總了Python中tensorflow.contrib.legacy_seq2seq.sequence_loss_by_example方法的典型用法代碼示例。如果您正苦於以下問題:Python legacy_seq2seq.sequence_loss_by_example方法的具體用法?Python legacy_seq2seq.sequence_loss_by_example怎麽用?Python legacy_seq2seq.sequence_loss_by_example使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.legacy_seq2seq
的用法示例。
在下文中一共展示了legacy_seq2seq.sequence_loss_by_example方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: generate_sequence_output
# 需要導入模塊: from tensorflow.contrib import legacy_seq2seq [as 別名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 別名]
def generate_sequence_output(num_encoder_symbols,
encoder_outputs,
encoder_state,
targets,
sequence_length,
num_decoder_symbols,
weights,
buckets,
softmax_loss_function=None,
per_example_loss=False,
name=None,
use_attention=False):
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
all_inputs = encoder_outputs + targets + weights
with tf.name_scope(name, "model_with_buckets", all_inputs):
with tf.variable_scope("decoder_sequence_output", reuse=None):
logits, attention_weights = attention_RNN(encoder_outputs,
encoder_state,
num_decoder_symbols,
sequence_length,
use_attention=use_attention)
if per_example_loss is None:
assert len(logits) == len(targets)
# We need to make target and int64-tensor and set its shape.
bucket_target = [tf.reshape(tf.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss_by_example(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
else:
assert len(logits) == len(targets)
bucket_target = [tf.reshape(tf.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
return logits, crossent
示例2: __init__
# 需要導入模塊: from tensorflow.contrib import legacy_seq2seq [as 別名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 別名]
def __init__(self, args, data, infer=False):
if infer:
args.batch_size = 1
args.seq_length = 1
with tf.name_scope('inputs'):
self.input_data = tf.placeholder(
tf.int32, [args.batch_size, args.seq_length])
self.target_data = tf.placeholder(
tf.int32, [args.batch_size, args.seq_length])
with tf.name_scope('model'):
self.cell = []
for n in range(args.num_layers):
self.cell.append(tf.nn.rnn_cell.LSTMCell(args.state_size))
self.cell = tf.nn.rnn_cell.MultiRNNCell(self.cell)
self.initial_state = self.cell.zero_state(
args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
w = tf.get_variable(
'softmax_w', [args.state_size, data.vocab_size])
b = tf.get_variable('softmax_b', [data.vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable(
'embedding', [data.vocab_size, args.state_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
outputs, last_state = tf.nn.dynamic_rnn(
self.cell, inputs, initial_state=self.initial_state)
with tf.name_scope('loss'):
output = tf.reshape(outputs, [-1, args.state_size])
self.logits = tf.matmul(output, w) + b
self.probs = tf.nn.softmax(self.logits)
self.last_state = last_state
targets = tf.reshape(self.target_data, [-1])
loss = seq2seq.sequence_loss_by_example([self.logits],
[targets],
[tf.ones_like(targets, dtype=tf.float32)])
self.cost = tf.reduce_sum(loss) / args.batch_size
tf.summary.scalar('loss', self.cost)
with tf.name_scope('optimize'):
self.lr = tf.placeholder(tf.float32, [])
tf.summary.scalar('learning_rate', self.lr)
optimizer = tf.train.AdamOptimizer(self.lr)
tvars = tf.trainable_variables()
grads = tf.gradients(self.cost, tvars)
for g in grads:
tf.summary.histogram(g.name, g)
grads, _ = tf.clip_by_global_norm(grads, args.grad_clip)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.merged_op = tf.summary.merge_all()
示例3: build
# 需要導入模塊: from tensorflow.contrib import legacy_seq2seq [as 別名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 別名]
def build(self):
self.inputs = tf.placeholder(tf.int32, [self.batch_size, None])
self.targets = tf.placeholder(tf.int32, [self.batch_size, None])
self.keep_prob = tf.placeholder(tf.float32)
self.seq_len = tf.placeholder(tf.int32, [self.batch_size])
self.learning_rate = tf.placeholder(tf.float64)
with tf.variable_scope('rnn'):
w = tf.get_variable("softmax_w", [self.num_units, self.data.words_size])
b = tf.get_variable("softmax_b", [self.data.words_size])
embedding = tf.get_variable("embedding", [self.data.words_size, self.num_units])
inputs = tf.nn.embedding_lookup(embedding, self.inputs)
self.cell = tf.nn.rnn_cell.MultiRNNCell([self.unit() for _ in range(self.num_layer)])
self.init_state = self.cell.zero_state(self.batch_size, dtype=tf.float32)
output, self.final_state = tf.nn.dynamic_rnn(self.cell,
inputs=inputs,
sequence_length=self.seq_len,
initial_state=self.init_state,
scope='rnn')
with tf.name_scope('fc'):
y = tf.reshape(output, [-1, self.num_units])
logits = tf.matmul(y, w) + b
with tf.name_scope('softmax'):
prob = tf.nn.softmax(logits)
self.prob = tf.reshape(prob, [self.batch_size, -1])
pre = tf.argmax(prob, 1)
self.pre = tf.reshape(pre, [self.batch_size, -1])
targets = tf.reshape(self.targets, [-1])
with tf.name_scope('loss'):
loss = seq2seq.sequence_loss_by_example([logits],
[targets],
[tf.ones_like(targets, dtype=tf.float32)])
self.loss = tf.reduce_mean(loss)
with tf.name_scope('summary'):
tf.summary.scalar('loss', self.loss)
self.merged_summary = tf.summary.merge_all()
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), 5)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
示例4: __init__
# 需要導入模塊: from tensorflow.contrib import legacy_seq2seq [as 別名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 別名]
def __init__(self, args, training=True):
"""Initialize RNN model"""
self.args = args
if not training:
args.batch_size = 1
args.seq_length = 1
cell_fn = rnn.GRUCell
cells = [cell_fn(args.rnn_size) for _ in range(args.num_layers)]
self.cell = cell = rnn.MultiRNNCell(cells, state_is_tuple=True)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnn'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
inputs = tf.split(inputs, args.seq_length, 1)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(prev, _):
prev = tf.matmul(prev, softmax_w) + softmax_b
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, cell,
loop_function=loop if not training else None, scope='rnnlm')
output = tf.reshape(tf.concat(outputs, 1), [-1, args.rnn_size])
self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
self.probs = tf.nn.softmax(self.logits)
loss = legacy_seq2seq.sequence_loss_by_example(
[self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])])
with tf.name_scope('loss'):
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = last_state
self.learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)
with tf.name_scope('optimization'):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))