本文整理汇总了Python中nltk.translate.bleu_score.corpus_bleu方法的典型用法代码示例。如果您正苦于以下问题:Python bleu_score.corpus_bleu方法的具体用法?Python bleu_score.corpus_bleu怎么用?Python bleu_score.corpus_bleu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nltk.translate.bleu_score
的用法示例。
在下文中一共展示了bleu_score.corpus_bleu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_report
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def get_report(self):
tokenize = get_tokenize()
print('Generate report for {} samples'.format(len(self.hyps)))
refs, hyps = [], []
for label, hyp in zip(self.labels, self.hyps):
# label = label.replace(EOS, '')
# hyp = hyp.replace(EOS, '')
# ref_tokens = tokenize(label)[1:]
# hyp_tokens = tokenize(hyp)[1:]
ref_tokens = tokenize(label)
hyp_tokens = tokenize(hyp)
refs.append([ref_tokens])
hyps.append(hyp_tokens)
bleu = corpus_bleu(refs, hyps, smoothing_function=SmoothingFunction().method1)
report = '\n===== BLEU = %f =====\n' % (bleu,)
return '\n===== REPORT FOR DATASET {} ====={}'.format(self.data_name, report)
示例2: quora_read
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def quora_read(file_path, bleu_baseline=False):
"""Read the quora dataset"""
print("Reading quora raw data .. ")
print(" data path: %s" % file_path)
with open(file_path) as fd:
lines = fd.readlines()
sentence_sets = []
for l in tqdm(lines):
p0, p1 = l[:-1].lower().split("\t")
sentence_sets.append([word_tokenize(p0), word_tokenize(p1)])
if(bleu_baseline):
print("calculating bleu ... ")
hypothesis = [s[0] for s in sentence_sets]
references = [s[1:] for s in sentence_sets]
bleu = corpus_bleu(references, hypothesis)
print("bleu on the training set: %.4f" % bleu)
return sentence_sets
示例3: __call__
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def __call__(self, trainer):
device = self.device
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [device.send(x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
chainer.report({self.key: bleu})
示例4: __call__
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def __call__(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({self.key: bleu})
示例5: __call__
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def __call__(self, trainer):
print('## Calculate BLEU')
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1) * 100
print('BLEU:', bleu)
reporter.report({self.key: bleu})
示例6: forward
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def forward(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
chainer.report({self.key: bleu})
示例7: calculate_bleu_scores
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def calculate_bleu_scores(references, hypotheses):
"""
Calculates BLEU 1-4 scores based on NLTK functionality
Args:
references: List of reference sentences
hypotheses: List of generated sentences
Returns:
bleu_1, bleu_2, bleu_3, bleu_4: BLEU scores
"""
bleu_1 = np.round(100 * corpus_bleu(references, hypotheses, weights=(1.0, 0., 0., 0.)), decimals=2)
bleu_2 = np.round(100 * corpus_bleu(references, hypotheses, weights=(0.50, 0.50, 0., 0.)), decimals=2)
bleu_3 = np.round(100 * corpus_bleu(references, hypotheses, weights=(0.34, 0.33, 0.33, 0.)), decimals=2)
bleu_4 = np.round(100 * corpus_bleu(references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25)), decimals=2)
return bleu_1, bleu_2, bleu_3, bleu_4
示例8: computeGroupBLEU
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def computeGroupBLEU(outputs, targets, tokenizer=None, bra=10, maxmaxlen=80):
if tokenizer is None:
tokenizer = revtok.tokenize
outputs = [tokenizer(o) for o in outputs]
targets = [tokenizer(t) for t in targets]
maxlens = max([len(t) for t in targets])
print(maxlens)
maxlens = min([maxlens, maxmaxlen])
nums = int(np.ceil(maxlens / bra))
outputs_buckets = [[] for _ in range(nums)]
targets_buckets = [[] for _ in range(nums)]
for o, t in zip(outputs, targets):
idx = len(o) // bra
if idx >= len(outputs_buckets):
idx = -1
outputs_buckets[idx] += [o]
targets_buckets[idx] += [t]
for k in range(nums):
print(corpus_bleu([[t] for t in targets_buckets[k]], [o for o in outputs_buckets[k]], emulate_multibleu=True))
# load the dataset + reversible tokenization
示例9: oracle_bleu
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def oracle_bleu(hyp_list, ref, n_process=4):
assert len(set([len(h) for h in hyp_list])) == 1
all_hyp_sentence_bleu_list = [get_sent_bleu_list(hyp, ref, n_process=n_process)
for hyp in hyp_list]
if n_process > len(hyp_list[0]):
n_process = len(hyp_list[0])
with Pool(n_process) as pool:
max_hyp_index_list = list(tqdm(pool.imap(np.argmax, zip(*all_hyp_sentence_bleu_list)),
total=len(all_hyp_sentence_bleu_list)))
best_hyp_list = []
for i, max_hyp_index in enumerate(max_hyp_index_list):
best_hyp = hyp_list[max_hyp_index][i]
best_hyp_list.append(best_hyp)
return corpus_bleu([[r] for r in ref], best_hyp_list, smoothing_function=cm.method2)
示例10: bleu_scorer
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def bleu_scorer(ref, hyp, script='default'):
refsend = []
for i in range(len(ref)):
refsi = []
for j in range(len(ref[i])):
refsi.append(ref[i][j].split())
refsend.append(refsi)
gensend = []
for i in range(len(hyp)):
gensend.append(hyp[i].split())
if script == 'nltk':
metrics = corpus_bleu(refsend, gensend)
return [metrics]
metrics = compute_bleu(refsend, gensend)
return metrics
示例11: bleu_so_far
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def bleu_so_far(refs, preds):
Ba = corpus_bleu(refs, preds)
B1 = corpus_bleu(refs, preds, weights=(1,0,0,0))
B2 = corpus_bleu(refs, preds, weights=(0,1,0,0))
B3 = corpus_bleu(refs, preds, weights=(0,0,1,0))
B4 = corpus_bleu(refs, preds, weights=(0,0,0,1))
Ba = round(Ba * 100, 2)
B1 = round(B1 * 100, 2)
B2 = round(B2 * 100, 2)
B3 = round(B3 * 100, 2)
B4 = round(B4 * 100, 2)
ret = ''
ret += ('for %s functions\n' % (len(preds)))
ret += ('Ba %s\n' % (Ba))
ret += ('B1 %s\n' % (B1))
ret += ('B2 %s\n' % (B2))
ret += ('B3 %s\n' % (B3))
ret += ('B4 %s\n' % (B4))
return ret
示例12: calc_metrics
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def calc_metrics(refs, hyps, language, metric="all", meteor_jar=None):
metrics = dict()
metrics["count"] = len(hyps)
metrics["ref_example"] = refs[-1]
metrics["hyp_example"] = hyps[-1]
many_refs = [[r] if r is not list else r for r in refs]
if metric in ("bleu", "all"):
metrics["bleu"] = corpus_bleu(many_refs, hyps)
if metric in ("rouge", "all"):
rouge = Rouge()
scores = rouge.get_scores(hyps, refs, avg=True)
metrics.update(scores)
if metric in ("meteor", "all") and meteor_jar is not None and os.path.exists(meteor_jar):
meteor = Meteor(meteor_jar, language=language)
metrics["meteor"] = meteor.compute_score(hyps, many_refs)
if metric in ("duplicate_ngrams", "all"):
metrics["duplicate_ngrams"] = dict()
metrics["duplicate_ngrams"].update(calc_duplicate_n_grams_rate(hyps))
return metrics
示例13: test_corpus_bleu_with_bad_sentence
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def test_corpus_bleu_with_bad_sentence(self):
hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R"
ref = str(
"Their tasks include changing a pump on the faulty stokehold ."
"Likewise , two species that are very similar in morphology "
"were distinguished using genetics ."
)
references = [[ref.split()]]
hypotheses = [hyp.split()]
try: # Check that the warning is raised since no. of 2-grams < 0.
with self.assertWarns(UserWarning):
# Verify that the BLEU output is undesired since no. of 2-grams < 0.
self.assertAlmostEqual(
corpus_bleu(references, hypotheses), 0.0, places=4
)
except AttributeError: # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4)
示例14: evaluate_model
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def evaluate_model(model, images, captions, tokenizer, max_length):
actual, predicted = list(), list()
for image_id, caption_list in tqdm(captions.items()):
yhat = generate_caption(model, tokenizer, images[image_id], max_length)
ground_truth = [caption.split() for caption in caption_list]
actual.append(ground_truth)
predicted.append(yhat.split())
print('BLEU Scores :')
print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))
示例15: evaluate_model_beam_search
# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import corpus_bleu [as 别名]
def evaluate_model_beam_search(model, images, captions, tokenizer, max_length, beam_index=3):
actual, predicted = list(), list()
for image_id, caption_list in tqdm(captions.items()):
yhat = generate_caption_beam_search(model, tokenizer, images[image_id], max_length, beam_index=beam_index)
ground_truth = [caption.split() for caption in caption_list]
actual.append(ground_truth)
predicted.append(yhat.split())
print('BLEU Scores :')
print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))