本文整理匯總了Python中pycocoevalcap.rouge.rouge.Rouge方法的典型用法代碼示例。如果您正苦於以下問題:Python rouge.Rouge方法的具體用法?Python rouge.Rouge怎麽用?Python rouge.Rouge使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pycocoevalcap.rouge.rouge
的用法示例。
在下文中一共展示了rouge.Rouge方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: score
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def score(ref, sample):
# ref and sample are both dict
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
print('computing %s score with COCO-EVAL...' % (scorer.method()))
score, scores = scorer.compute_score(ref, sample)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例2: calc_scores
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def calc_scores(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例3: score_all
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def score_all(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(),"ROUGE_L"),
(Cider(),"CIDEr")
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例4: score
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def score(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例5: test
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def test(model, dataloader, args):
scorer = Bleu(4)
m_scorer = Meteor()
r_scorer = Rouge()
hyp = []
ref = []
model.eval()
gold_file = open('tmp_gold.txt', 'w')
pred_file = open('tmp_pred.txt', 'w')
with tqdm(dataloader, desc='Test ', mininterval=1) as tq:
for batch in tq:
with torch.no_grad():
seq = model(batch, beam_size=args.beam_size)
r = write_txt(batch, batch['tgt_text'], gold_file, args)
h = write_txt(batch, seq, pred_file, args)
hyp.extend(h)
ref.extend(r)
hyp = dict(zip(range(len(hyp)), hyp))
ref = dict(zip(range(len(ref)), ref))
print(hyp[0], ref[0])
print('BLEU INP', len(hyp), len(ref))
print('BLEU', scorer.compute_score(ref, hyp)[0])
print('METEOR', m_scorer.compute_score(ref, hyp)[0])
print('ROUGE_L', r_scorer.compute_score(ref, hyp)[0])
gold_file.close()
pred_file.close()
示例6: __init__
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def __init__(self):
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
# (Meteor(), "METEOR"),
(Rouge(), "ROUGE_L")
]#, (Cider(), "CIDEr")
示例7: __init__
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def __init__(self):
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L")
]#, (Cider(), "CIDEr")
示例8: get_dcc_scores
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def get_dcc_scores(self):
imgIds = self.params['image_id']
# imgIds = self.coco.getImgIds()
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco.imgToAnns[imgId]
res[imgId] = self.cocoRes.imgToAnns[imgId]
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(df='noc_test_freq'), "CIDEr"),
(Spice(), "SPICE")
]
score_dict = {}
for scorer, method in scorers:
print('computing %s score...' % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
score_dict[m] = sc
print("%s: %0.3f" % (m, sc))
else:
score_dict[method] = score
print("%s: %0.3f" % (method, score))
return score_dict
示例9: __init__
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def __init__(self):
implementation = rouge.Rouge()
super(ROUGE, self).__init__('rouge', implementation)
示例10: __init__
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def __init__(self, ground_truth_filenames=None, prediction_filename=None,
tious=None, max_proposals=1000,
prediction_fields=PREDICTION_FIELDS, verbose=False):
# Check that the gt and submission files exist and load them
if len(tious) == 0:
raise IOError('Please input a valid tIoU.')
if not ground_truth_filenames:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.verbose = verbose
self.tious = tious
self.max_proposals = max_proposals
self.pred_fields = prediction_fields
self.ground_truths = self.import_ground_truths(ground_truth_filenames)
self.prediction = self.import_prediction(prediction_filename)
self.tokenizer = PTBTokenizer()
# Set up scorers, if not verbose, we only use the one we're
# testing on: METEOR
if self.verbose:
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
else:
self.scorers = [(Meteor(), "METEOR")]
示例11: CocoScore
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def CocoScore(ref, hyp, metrics_list=None, language='en'):
"""
Obtains the COCO scores from the references and hypotheses.
:param ref: Dictionary of reference sentences (id, sentence)
:param hyp: Dictionary of hypothesis sentences (id, sentence)
:param metrics_list: List of metrics to evaluate on
:param language: Language of the sentences (for METEOR)
:return: dictionary of scores
"""
if metrics_list is None:
metrics_list = ['bleu', 'ter', 'meteor', 'rouge_l', 'cider']
metrics_list = map(lambda x: x.lower(), metrics_list)
scorers = []
if 'bleu' in metrics_list:
scorers.append((Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]))
if 'meteor' in metrics_list:
scorers.append((Meteor(language), "METEOR"))
if 'ter' in metrics_list:
scorers.append((Ter(), "TER"))
if 'rouge_l' in metrics_list or 'rouge' in metrics_list:
scorers.append((Rouge(), "ROUGE_L"))
if 'cider' in metrics_list:
scorers.append((Cider(), "CIDEr"))
final_scores = {}
for scorer, method in scorers:
score, _ = scorer.compute_score(ref, hyp)
if isinstance(score, list):
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例12: score
# 需要導入模塊: from pycocoevalcap.rouge import rouge [as 別名]
# 或者: from pycocoevalcap.rouge.rouge import Rouge [as 別名]
def score(self, GT, RES, IDs):
self.eval = {}
self.imgToEval = {}
gts = {}
res = {}
for ID in IDs:
gts[ID] = GT[ID]
res[ID] = RES[ID]
print 'tokenization...'
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
print 'setting up scorers...'
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
eval = {}
for scorer, method in scorers:
print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, IDs, m)
#print "%s: %0.3f"%(m, sc)
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, IDs, method)
#print "%s: %0.3f"%(method, score)
for metric, score in self.eval.items():
print '%s: %.3f'%(metric, score)
return self.eval