本文整理汇总了Python中pycocoevalcap.bleu.bleu.Bleu方法的典型用法代码示例。如果您正苦于以下问题:Python bleu.Bleu方法的具体用法?Python bleu.Bleu怎么用?Python bleu.Bleu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocoevalcap.bleu.bleu
的用法示例。
在下文中一共展示了bleu.Bleu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: score
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def score(ref, sample):
# ref and sample are both dict
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
print('computing %s score with COCO-EVAL...' % (scorer.method()))
score, scores = scorer.compute_score(ref, sample)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例2: calc_scores
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def calc_scores(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例3: score_all
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def score_all(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(),"ROUGE_L"),
(Cider(),"CIDEr")
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例4: score
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def score(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例5: test
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def test(model, dataloader, args):
scorer = Bleu(4)
m_scorer = Meteor()
r_scorer = Rouge()
hyp = []
ref = []
model.eval()
gold_file = open('tmp_gold.txt', 'w')
pred_file = open('tmp_pred.txt', 'w')
with tqdm(dataloader, desc='Test ', mininterval=1) as tq:
for batch in tq:
with torch.no_grad():
seq = model(batch, beam_size=args.beam_size)
r = write_txt(batch, batch['tgt_text'], gold_file, args)
h = write_txt(batch, seq, pred_file, args)
hyp.extend(h)
ref.extend(r)
hyp = dict(zip(range(len(hyp)), hyp))
ref = dict(zip(range(len(ref)), ref))
print(hyp[0], ref[0])
print('BLEU INP', len(hyp), len(ref))
print('BLEU', scorer.compute_score(ref, hyp)[0])
print('METEOR', m_scorer.compute_score(ref, hyp)[0])
print('ROUGE_L', r_scorer.compute_score(ref, hyp)[0])
gold_file.close()
pred_file.close()
示例6: score
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def score(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"])
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例7: init_scorer
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
示例8: __init__
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def __init__(self):
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
# (Meteor(), "METEOR"),
(Rouge(), "ROUGE_L")
]#, (Cider(), "CIDEr")
示例9: __init__
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def __init__(self):
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L")
]#, (Cider(), "CIDEr")
示例10: get_self_critical_reward
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[batch_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(2 * batch_size)]
res__ = {i: res[i] for i in range(2 * batch_size)}
gts = {i: gts[i % batch_size // seq_per_img] for i in range(2 * batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:batch_size] - scores[batch_size:]
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
示例11: get_dcc_scores
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def get_dcc_scores(self):
imgIds = self.params['image_id']
# imgIds = self.coco.getImgIds()
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco.imgToAnns[imgId]
res[imgId] = self.cocoRes.imgToAnns[imgId]
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(df='noc_test_freq'), "CIDEr"),
(Spice(), "SPICE")
]
score_dict = {}
for scorer, method in scorers:
print('computing %s score...' % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
score_dict[m] = sc
print("%s: %0.3f" % (m, sc))
else:
score_dict[method] = score
print("%s: %0.3f" % (method, score))
return score_dict
示例12: __init__
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def __init__(self, n=4):
implementation = bleu.Bleu(n)
super(BLEU, self).__init__('bleu', implementation)
self._n = n
示例13: cal_BLEU_4
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def cal_BLEU_4(generated, reference, is_corpus=False):
BLEUscore = [0.0, 0.0, 0.0, 0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]},
{0: [g]})
for i, s in zip([0, 1, 2, 3], score):
BLEUscore[i] += s
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
示例14: __init__
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def __init__(self, ground_truth_filenames=None, prediction_filename=None,
tious=None, max_proposals=1000,
prediction_fields=PREDICTION_FIELDS, verbose=False):
# Check that the gt and submission files exist and load them
if len(tious) == 0:
raise IOError('Please input a valid tIoU.')
if not ground_truth_filenames:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.verbose = verbose
self.tious = tious
self.max_proposals = max_proposals
self.pred_fields = prediction_fields
self.ground_truths = self.import_ground_truths(ground_truth_filenames)
self.prediction = self.import_prediction(prediction_filename)
self.tokenizer = PTBTokenizer()
# Set up scorers, if not verbose, we only use the one we're
# testing on: METEOR
if self.verbose:
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
else:
self.scorers = [(Meteor(), "METEOR")]
示例15: get_scores
# 需要导入模块: from pycocoevalcap.bleu import bleu [as 别名]
# 或者: from pycocoevalcap.bleu.bleu import Bleu [as 别名]
def get_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(batch_size)]
res__ = {i: res[i] for i in range(batch_size)}
gts = {i: gts[i // seq_per_img] for i in range(batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
return scores