本文整理汇总了Python中tensorflow.contrib.seq2seq.BasicDecoder方法的典型用法代码示例。如果您正苦于以下问题:Python seq2seq.BasicDecoder方法的具体用法?Python seq2seq.BasicDecoder怎么用?Python seq2seq.BasicDecoder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.seq2seq
的用法示例。
在下文中一共展示了seq2seq.BasicDecoder方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_train_decoder
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def build_train_decoder(self):
with tf.name_scope('train_decoder'):
training_helper = TrainingHelper(inputs=self.inputs_dense,
sequence_length=self.inputs_length,
time_major=False,
name='training_helper')
with tf.name_scope('basic_decoder'):
training_decoder = BasicDecoder(cell=self.cell,
helper=training_helper,
initial_state=self.initial_state,
output_layer=self.output_layer)
with tf.name_scope('dynamic_decode'):
(outputs, self.last_state, self.outputs_length) = (seq2seq.dynamic_decode(
decoder=training_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=self.inputs_max_length))
self.logits = tf.identity(outputs.rnn_output)
self.log_probs = tf.nn.log_softmax(self.logits)
self.gs_hypotheses = tf.argmax(self.log_probs, -1)
示例2: _build_decoder_train
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder_train(self):
self._decoder_train_inputs = tf.nn.embedding_lookup(self._embedding_matrix, self._labels_padded_GO)
if self._mode == 'train':
sampler = seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=self._decoder_train_inputs,
sequence_length=self._labels_length,
embedding=self._embedding_matrix,
sampling_probability=self._sampling_probability_outputs,
)
else:
sampler = seq2seq.TrainingHelper(
inputs=self._decoder_train_inputs,
sequence_length=self._labels_length,
)
cells = self._decoder_cells
decoder_train = seq2seq.BasicDecoder(
cell=cells,
helper=sampler,
initial_state=self._decoder_initial_state,
output_layer=self._dense_layer,
)
outputs, _, _ = seq2seq.dynamic_decode(
decoder_train,
output_time_major=False,
impute_finished=True,
swap_memory=False,
)
logits = outputs.rnn_output
self.decoder_train_outputs = logits
self.average_log_likelihoods = self._compute_likelihood(logits)
print('')
示例3: _build_decoder
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder(self, decoder_cell, batch_size):
embedding_fn = functools.partial(tf.one_hot, depth=self.num_classes)
output_layer = tf.layers.Dense(
self.num_classes,
activation=None,
use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer())
if self._is_training:
train_helper = seq2seq.TrainingHelper(
embedding_fn(self._groundtruth_dict['decoder_inputs']),
sequence_length=self._groundtruth_dict['decoder_lengths'],
time_major=False)
decoder = seq2seq.BasicDecoder(
cell=decoder_cell,
helper=train_helper,
initial_state=decoder_cell.zero_state(batch_size, tf.float32),
output_layer=output_layer)
else:
decoder = seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=embedding_fn,
start_tokens=tf.fill([batch_size], self.start_label),
end_token=self.end_label,
initial_state=decoder_cell.zero_state(batch_size * self._beam_width, tf.float32),
beam_width=self._beam_width,
output_layer=output_layer,
length_penalty_weight=0.0)
return decoder
示例4: _build_decoder
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder(self, helper, rnn_cell, initial_state, mode, hparams):
"""Builds a decoder instance."""
del mode # Unused.
del hparams # Unused.
decoder = contrib_seq2seq.BasicDecoder(
cell=rnn_cell, helper=helper, initial_state=initial_state)
return decoder
示例5: _make_train
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _make_train(self, decoder_cell, decoder_initial_state):
# Assume 0 is the START token
start_tokens = tf.zeros((self.batch_size,), dtype=tf.int32)
y = tf.concat([tf.expand_dims(start_tokens, 1), self.y], 1)
output_lengths = tf.reduce_sum(tf.to_int32(tf.not_equal(y, 1)), 1)
# Reuse encoding embeddings
inputs = layers.embed_sequence(
y,
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
scope='embed', reuse=True)
# Prepare the decoder with the attention cell
with tf.variable_scope('decode'):
# Project to correct dimensions
out_proj = tf.layers.Dense(self.vocab_size, name='output_proj')
inputs = tf.layers.dense(inputs, self.hidden_size, name='input_proj')
helper = seq2seq.TrainingHelper(inputs, output_lengths)
decoder = seq2seq.BasicDecoder(
cell=decoder_cell, helper=helper,
initial_state=decoder_initial_state,
output_layer=out_proj)
max_len = tf.reduce_max(output_lengths)
final_outputs, final_state, final_sequence_lengths = seq2seq.dynamic_decode(
decoder=decoder, impute_finished=True, maximum_iterations=max_len)
logits = final_outputs.rnn_output
# Set valid timesteps to 1 and padded steps to 0,
# so we only look at the actual sequence without the padding
mask = tf.sequence_mask(output_lengths, maxlen=max_len, dtype=tf.float32)
# Prioritize examples that the model was wrong on,
# by setting weight=1 to any example where the prediction was not 1,
# i.e. incorrect
# weights = tf.to_float(tf.not_equal(y[:, :-1], 1))
# Training and loss ops,
# with gradient clipping (see [4])
loss_op = seq2seq.sequence_loss(logits, self.y, weights=mask)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(loss_op))
gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm)
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Compute accuracy
# Use the mask from before so we only compare
# the relevant sequence lengths for each example
pred = tf.argmax(logits, axis=2, output_type=tf.int32)
pred = tf.boolean_mask(pred, mask)
true = tf.boolean_mask(self.y, mask)
accs = tf.cast(tf.equal(pred, true), tf.float32)
accuracy_op = tf.reduce_mean(accs, name='acc')
return loss_op, train_op, accuracy_op
示例6: _build_decoder_test_greedy
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder_test_greedy(self):
r"""
Builds the greedy test decoder, which feeds the most likely decoded symbol as input for the
next timestep
"""
self._helper_greedy = seq2seq.GreedyEmbeddingHelper(
embedding=self._embedding_matrix,
start_tokens=tf.tile([self._GO_ID], [self._batch_size]),
end_token=self._EOS_ID)
if self._hparams.enable_attention is True:
cells, initial_state = add_attention(
cells=self._decoder_cells,
attention_types=self._hparams.attention_type[1],
num_units=self._hparams.decoder_units_per_layer[-1],
memory=self._encoder_memory,
memory_len=self._encoder_features_len,
beam_search=False,
batch_size=self._batch_size,
initial_state=self._decoder_initial_state,
mode=self._mode,
dtype=self._hparams.dtype,
fusion_type='linear_fusion',
write_attention_alignment=self._hparams.write_attention_alignment)
else:
cells = self._decoder_cells
initial_state = self._decoder_initial_state
self._decoder_inference = seq2seq.BasicDecoder(
cell=cells,
helper=self._helper_greedy,
initial_state=initial_state,
output_layer=self._dense_layer)
outputs, states, lengths = seq2seq.dynamic_decode(
self._decoder_inference,
impute_finished=True,
swap_memory=False,
maximum_iterations=self._hparams.max_label_length)
self.inference_outputs = outputs.rnn_output
self.inference_predicted_ids = outputs.sample_id
if self._hparams.write_attention_alignment is True:
self.attention_summary = self._create_attention_alignments_summary(states)
示例7: _basic_decoder_train
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _basic_decoder_train(self):
r"""
Builds the standard teacher-forcing training decoder with sampling from previous predictions.
"""
helper_train = seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=self._decoder_train_inputs,
sequence_length=self._labels_len,
embedding=self._embedding_matrix,
sampling_probability=self._sampling_probability_outputs,
)
# christian_fun = lambda logits: tf.math.top_k(logits, 3).indices
#
# helper_train = seq2seq.ScheduledOutputTrainingHelper(
# inputs=self._decoder_train_inputs,
# sequence_length=self._labels_len,
# sampling_probability=self._sampling_probability_outputs,
# )
if self._hparams.enable_attention is True:
cells, initial_state = add_attention(
cells=self._decoder_cells,
attention_types=self._hparams.attention_type[1],
num_units=self._hparams.decoder_units_per_layer[-1],
memory=self._encoder_memory,
memory_len=self._encoder_features_len,
initial_state=self._decoder_initial_state,
batch_size=self._batch_size,
mode=self._mode,
dtype=self._hparams.dtype,
fusion_type='linear_fusion',
write_attention_alignment=False, # we are in train mode
)
else:
cells = self._decoder_cells
initial_state = self._decoder_initial_state
decoder_train = seq2seq.BasicDecoder(
cell=cells,
helper=helper_train,
initial_state=initial_state,
output_layer=self._dense_layer,
)
outputs, fstate, fseqlen = seq2seq.dynamic_decode(
decoder_train,
output_time_major=False,
impute_finished=True,
swap_memory=False,
)
return outputs, fstate, fseqlen
示例8: _build_decoder_train
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder_train(self):
self._labels_embedded = tf.nn.embedding_lookup(self._embedding_matrix, self._labels_padded_GO)
self._helper_train = seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=self._labels_embedded,
sequence_length=self._labels_len,
embedding=self._embedding_matrix,
sampling_probability=self._sampling_probability_outputs,
)
if self._hparams.enable_attention is True:
attention_mechanisms, layer_sizes = self._create_attention_mechanisms()
attention_cells = seq2seq.AttentionWrapper(
cell=self._decoder_cells,
attention_mechanism=attention_mechanisms,
attention_layer_size=layer_sizes,
initial_cell_state=self._decoder_initial_state,
alignment_history=False,
output_attention=self._output_attention,
)
batch_size, _ = tf.unstack(tf.shape(self._labels))
attn_zero = attention_cells.zero_state(
dtype=self._hparams.dtype, batch_size=batch_size
)
initial_state = attn_zero.clone(
cell_state=self._decoder_initial_state
)
cells = attention_cells
else:
cells = self._decoder_cells
initial_state = self._decoder_initial_state
self._decoder_train = seq2seq.BasicDecoder(
cell=cells,
helper=self._helper_train,
initial_state=initial_state,
output_layer=self._dense_layer,
)
self._basic_decoder_train_outputs, self._final_states, self._final_seq_lens = seq2seq.dynamic_decode(
self._decoder_train,
output_time_major=False,
impute_finished=True,
swap_memory=False,
)
self._logits = self._basic_decoder_train_outputs.rnn_output
示例9: _build_decoder_greedy
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_decoder_greedy(self):
batch_size, _ = tf.unstack(tf.shape(self._labels))
self._helper_greedy = seq2seq.GreedyEmbeddingHelper(
embedding=self._embedding_matrix,
start_tokens=tf.tile([self._GO_ID], [batch_size]),
end_token=self._EOS_ID)
if self._hparams.enable_attention is True:
attention_mechanisms, layer_sizes = self._create_attention_mechanisms()
attention_cells = seq2seq.AttentionWrapper(
cell=self._decoder_cells,
attention_mechanism=attention_mechanisms,
attention_layer_size=layer_sizes,
initial_cell_state=self._decoder_initial_state,
alignment_history=self._hparams.write_attention_alignment,
output_attention=self._output_attention
)
attn_zero = attention_cells.zero_state(
dtype=self._hparams.dtype, batch_size=batch_size
)
initial_state = attn_zero.clone(
cell_state=self._decoder_initial_state
)
cells = attention_cells
else:
cells = self._decoder_cells
initial_state = self._decoder_initial_state
self._decoder_inference = seq2seq.BasicDecoder(
cell=cells,
helper=self._helper_greedy,
initial_state=initial_state,
output_layer=self._dense_layer)
outputs, states, lengths = seq2seq.dynamic_decode(
self._decoder_inference,
impute_finished=True,
swap_memory=False,
maximum_iterations=self._hparams.max_label_length)
# self._result = outputs, states, lengths
self.inference_outputs = outputs.rnn_output
self.inference_predicted_ids = outputs.sample_id
if self._hparams.write_attention_alignment is True:
self.attention_summary = self._create_attention_alignments_summary(states)
示例10: _build_model
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_model(self):
with tf.variable_scope("embeddings"):
self.source_embs = tf.get_variable(name="source_embs", shape=[self.cfg.source_vocab_size, self.cfg.emb_dim],
dtype=tf.float32, trainable=True)
self.target_embs = tf.get_variable(name="embeddings", shape=[self.cfg.vocab_size, self.cfg.emb_dim],
dtype=tf.float32, trainable=True)
source_emb = tf.nn.embedding_lookup(self.source_embs, self.enc_source)
target_emb = tf.nn.embedding_lookup(self.target_embs, self.dec_target_in)
print("source embedding shape: {}".format(source_emb.get_shape().as_list()))
print("target input embedding shape: {}".format(target_emb.get_shape().as_list()))
with tf.variable_scope("encoder"):
if self.cfg.use_bi_rnn:
with tf.variable_scope("bi-directional_rnn"):
cell_fw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
LSTMCell(self.cfg.num_units)
cell_bw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
LSTMCell(self.cfg.num_units)
bi_outputs, _ = bidirectional_dynamic_rnn(cell_fw, cell_bw, source_emb, dtype=tf.float32,
sequence_length=self.enc_seq_len)
source_emb = tf.concat(bi_outputs, axis=-1)
print("bi-directional rnn output shape: {}".format(source_emb.get_shape().as_list()))
input_project = tf.layers.Dense(units=self.cfg.num_units, dtype=tf.float32, name="input_projection")
source_emb = input_project(source_emb)
print("encoder input projection shape: {}".format(source_emb.get_shape().as_list()))
enc_cells = self._create_encoder_cell()
self.enc_outputs, self.enc_states = dynamic_rnn(enc_cells, source_emb, sequence_length=self.enc_seq_len,
dtype=tf.float32)
print("encoder output shape: {}".format(self.enc_outputs.get_shape().as_list()))
with tf.variable_scope("decoder"):
self.max_dec_seq_len = tf.reduce_max(self.dec_seq_len, name="max_dec_seq_len")
self.dec_cells, self.dec_init_states = self._create_decoder_cell()
# define input and output projection layer
input_project = tf.layers.Dense(units=self.cfg.num_units, name="input_projection")
self.dense_layer = tf.layers.Dense(units=self.cfg.vocab_size, name="output_projection")
if self.mode == "train": # either "train" or "decode"
# for training
target_emb = input_project(target_emb)
train_helper = TrainingHelper(target_emb, sequence_length=self.dec_seq_len, name="train_helper")
train_decoder = BasicDecoder(self.dec_cells, helper=train_helper, output_layer=self.dense_layer,
initial_state=self.dec_init_states)
self.dec_output, _, _ = dynamic_decode(train_decoder, impute_finished=True,
maximum_iterations=self.max_dec_seq_len)
print("decoder output shape: {} (vocab size)".format(self.dec_output.rnn_output.get_shape().as_list()))
# for decode
start_token = tf.ones(shape=[self.batch_size, ], dtype=tf.int32) * self.cfg.target_dict[GO]
end_token = self.cfg.target_dict[EOS]
def inputs_project(inputs):
return input_project(tf.nn.embedding_lookup(self.target_embs, inputs))
dec_helper = GreedyEmbeddingHelper(embedding=inputs_project, start_tokens=start_token,
end_token=end_token)
infer_decoder = BasicDecoder(self.dec_cells, helper=dec_helper, initial_state=self.dec_init_states,
output_layer=self.dense_layer)
infer_dec_output, _, _ = dynamic_decode(infer_decoder, maximum_iterations=self.cfg.maximum_iterations)
self.dec_predicts = infer_dec_output.sample_id
示例11: _build_helper
# 需要导入模块: from tensorflow.contrib import seq2seq [as 别名]
# 或者: from tensorflow.contrib.seq2seq import BasicDecoder [as 别名]
def _build_helper(self, batch_size, embeddings, inputs, inputs_length,
mode, hparams, decoder_hparams):
"""Builds a helper instance for BasicDecoder."""
# Auxiliary decoding mode at training time.
if decoder_hparams.auxiliary:
start_tokens = tf.fill([batch_size], text_encoder.PAD_ID)
# helper = helpers.FixedContinuousEmbeddingHelper(
# embedding=embeddings,
# start_tokens=start_tokens,
# end_token=text_encoder.EOS_ID,
# num_steps=hparams.aux_decode_length)
helper = contrib_seq2seq.SampleEmbeddingHelper(
embedding=embeddings,
start_tokens=start_tokens,
end_token=text_encoder.EOS_ID,
softmax_temperature=None)
# Continuous decoding.
elif hparams.decoder_continuous:
# Scheduled mixing.
if mode == tf.estimator.ModeKeys.TRAIN and hparams.scheduled_training:
helper = helpers.ScheduledContinuousEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length,
mixing_concentration=hparams.scheduled_mixing_concentration)
# Pure continuous decoding (hard to train!).
elif mode == tf.estimator.ModeKeys.TRAIN:
helper = helpers.ContinuousEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length)
# EVAL and PREDICT expect teacher forcing behavior.
else:
helper = contrib_seq2seq.TrainingHelper(
inputs=inputs, sequence_length=inputs_length)
# Standard decoding.
else:
# Scheduled sampling.
if mode == tf.estimator.ModeKeys.TRAIN and hparams.scheduled_training:
helper = contrib_seq2seq.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=inputs_length,
embedding=embeddings,
sampling_probability=hparams.scheduled_sampling_probability)
# Teacher forcing (also for EVAL and PREDICT).
else:
helper = contrib_seq2seq.TrainingHelper(
inputs=inputs, sequence_length=inputs_length)
return helper