本文整理汇总了Python中data.Ids2Words方法的典型用法代码示例。如果您正苦于以下问题:Python data.Ids2Words方法的具体用法?Python data.Ids2Words怎么用?Python data.Ids2Words使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类data
的用法示例。
在下文中一共展示了data.Ids2Words方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _DecodeBatch
# 需要导入模块: import data [as 别名]
# 或者: from data import Ids2Words [as 别名]
def _DecodeBatch(self, article, abstract, output_ids):
"""Convert id to words and writing results.
Args:
article: The original article string.
abstract: The human (correct) abstract string.
output_ids: The abstract word ids output by machine.
"""
decoded_output = ' '.join(data.Ids2Words(output_ids, self._vocab))
end_p = decoded_output.find(data.SENTENCE_END, 0)
if end_p != -1:
decoded_output = decoded_output[:end_p]
tf.logging.info('article: %s', article)
tf.logging.info('abstract: %s', abstract)
tf.logging.info('decoded: %s', decoded_output)
self._decode_io.Write(abstract, decoded_output.strip())
示例2: _Eval
# 需要导入模块: import data [as 别名]
# 或者: from data import Ids2Words [as 别名]
def _Eval(model, data_batcher, vocab=None):
"""Runs model eval."""
model.build_graph()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
running_avg_loss = 0
step = 0
while True:
time.sleep(FLAGS.eval_interval_secs)
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.train_dir)
continue
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
(article_batch, abstract_batch, targets, article_lens, abstract_lens,
loss_weights, _, _) = data_batcher.NextBatch()
(summaries, loss, train_step) = model.run_eval_step(
sess, article_batch, abstract_batch, targets, article_lens,
abstract_lens, loss_weights)
tf.logging.info(
'article: %s',
' '.join(data.Ids2Words(article_batch[0][:].tolist(), vocab)))
tf.logging.info(
'abstract: %s',
' '.join(data.Ids2Words(abstract_batch[0][:].tolist(), vocab)))
summary_writer.add_summary(summaries, train_step)
running_avg_loss = _RunningAvgLoss(
running_avg_loss, loss, summary_writer, train_step)
if step % 100 == 0:
summary_writer.flush()
示例3: _Eval
# 需要导入模块: import data [as 别名]
# 或者: from data import Ids2Words [as 别名]
def _Eval(model, data_batcher, vocab=None):
"""Runs model eval."""
model.build_graph()
saver = tf.train.Saver()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
running_avg_loss = 0
step = 0
while True:
time.sleep(FLAGS.eval_interval_secs)
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.train_dir)
continue
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
(article_batch, abstract_batch, targets, article_lens, abstract_lens,
loss_weights, _, _) = data_batcher.NextBatch()
(summaries, loss, train_step) = model.run_eval_step(
sess, article_batch, abstract_batch, targets, article_lens,
abstract_lens, loss_weights)
tf.logging.info(
'article: %s',
' '.join(data.Ids2Words(article_batch[0][:].tolist(), vocab)))
tf.logging.info(
'abstract: %s',
' '.join(data.Ids2Words(abstract_batch[0][:].tolist(), vocab)))
summary_writer.add_summary(summaries, train_step)
running_avg_loss = _RunningAvgLoss(
running_avg_loss, loss, summary_writer, train_step)
if step % 100 == 0:
summary_writer.flush()
示例4: _DecodeBatch
# 需要导入模块: import data [as 别名]
# 或者: from data import Ids2Words [as 别名]
def _DecodeBatch(self, source, targets, dec_outputs):
"""Converts id to words and writes results.
Args:
source: The original source string.
targets: The human (correct) target string.
dec_outputs: The target word ids output by machine.
Returns:
List of metric scores for this batch.
"""
output = ['None'] * len(dec_outputs)
source_words = source.split()
for i in range(len(dec_outputs)):
if dec_outputs[i] < 0: # it's from copier
position = -1 - dec_outputs[i]
if position < len(source_words):
output[i] = source_words[position]
else:
output[i] = '<out_of_bound>'
elif dec_outputs[i] >= 0: # it's from generator or unk (if 0)
output[i] = data.Ids2Words([dec_outputs[i]], self._output_vocab)[0]
source = source.replace(data.SENTENCE_START + ' ', '').replace(
' ' + data.SENTENCE_END, '')
targets = [
x.replace(data.SENTENCE_START + ' ', '').replace(
' ' + data.SENTENCE_END, '') for x in targets
]
decoded = ' '.join(output)
end_p = decoded.find(data.SENTENCE_END, 0)
if end_p != -1:
decoded = decoded[:end_p].strip()
bleu_score = metrics.get_bleu(decoded, targets)
f1_score = metrics.get_f1(decoded, targets)
exact_score = metrics.get_exact(decoded, targets)
self._decode_io.Write(source, targets, decoded, bleu_score,
f1_score, exact_score)
return bleu_score, f1_score, exact_score