本文整理汇总了Python中pycocoevalcap.cider.cider.Cider方法的典型用法代码示例。如果您正苦于以下问题:Python cider.Cider方法的具体用法?Python cider.Cider怎么用?Python cider.Cider使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocoevalcap.cider.cider
的用法示例。
在下文中一共展示了cider.Cider方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: score
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def score(ref, sample):
# ref and sample are both dict
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
print('computing %s score with COCO-EVAL...' % (scorer.method()))
score, scores = scorer.compute_score(ref, sample)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例2: calc_scores
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def calc_scores(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例3: score_all
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def score_all(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(),"ROUGE_L"),
(Cider(),"CIDEr")
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例4: __init__
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def __init__(self, args, task):
super().__init__(args, task)
self.task = task
self.generator = SimpleSequenceGenerator(beam=args.scst_beam,
penalty=args.scst_penalty,
max_pos=args.max_target_positions,
eos_index=task.target_dictionary.eos_index)
# Needed for decoding model output to string
self.conf_tokenizer = encoders.build_tokenizer(args)
self.conf_decoder = encoders.build_bpe(args)
self.captions_dict = task.target_dictionary
# Tokenizer needed for computing CIDEr scores
self.tokenizer = PTBTokenizer()
self.scorer = Cider()
示例5: score
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def score(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例6: get_dcc_scores
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def get_dcc_scores(self):
imgIds = self.params['image_id']
# imgIds = self.coco.getImgIds()
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco.imgToAnns[imgId]
res[imgId] = self.cocoRes.imgToAnns[imgId]
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(df='noc_test_freq'), "CIDEr"),
(Spice(), "SPICE")
]
score_dict = {}
for scorer, method in scorers:
print('computing %s score...' % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
score_dict[m] = sc
print("%s: %0.3f" % (m, sc))
else:
score_dict[method] = score
print("%s: %0.3f" % (method, score))
return score_dict
示例7: __init__
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def __init__(self):
implementation = cider.Cider()
super(CIDEr, self).__init__('cider', implementation)
示例8: __init__
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def __init__(self, ground_truth_filenames=None, prediction_filename=None,
tious=None, max_proposals=1000,
prediction_fields=PREDICTION_FIELDS, verbose=False):
# Check that the gt and submission files exist and load them
if len(tious) == 0:
raise IOError('Please input a valid tIoU.')
if not ground_truth_filenames:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.verbose = verbose
self.tious = tious
self.max_proposals = max_proposals
self.pred_fields = prediction_fields
self.ground_truths = self.import_ground_truths(ground_truth_filenames)
self.prediction = self.import_prediction(prediction_filename)
self.tokenizer = PTBTokenizer()
# Set up scorers, if not verbose, we only use the one we're
# testing on: METEOR
if self.verbose:
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
else:
self.scorers = [(Meteor(), "METEOR")]
示例9: get_self_critical_reward
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def get_self_critical_reward(greedy_res, gt_ids, gen_result, batch_size):
greedy_res = greedy_res.data.cpu().numpy()
gen_result = gen_result.data.cpu().numpy()
gt_ids = gt_ids.data.cpu().numpy()
res = OrderedDict()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[batch_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(batch_size):
gts[i] = [array_to_str(gt_ids[i])]
for i in range(batch_size):
gts[batch_size + i] = [array_to_str(gt_ids[i])]
cider_reward_weight = 1
# print(gts, res)
_, cider_scores = CiderD_scorer.compute_score(gts, res)
print('Cider scores:', _)
scores = cider_reward_weight * cider_scores
scores = scores[:batch_size] - scores[batch_size:]
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
示例10: CocoScore
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def CocoScore(ref, hyp, metrics_list=None, language='en'):
"""
Obtains the COCO scores from the references and hypotheses.
:param ref: Dictionary of reference sentences (id, sentence)
:param hyp: Dictionary of hypothesis sentences (id, sentence)
:param metrics_list: List of metrics to evaluate on
:param language: Language of the sentences (for METEOR)
:return: dictionary of scores
"""
if metrics_list is None:
metrics_list = ['bleu', 'ter', 'meteor', 'rouge_l', 'cider']
metrics_list = map(lambda x: x.lower(), metrics_list)
scorers = []
if 'bleu' in metrics_list:
scorers.append((Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]))
if 'meteor' in metrics_list:
scorers.append((Meteor(language), "METEOR"))
if 'ter' in metrics_list:
scorers.append((Ter(), "TER"))
if 'rouge_l' in metrics_list or 'rouge' in metrics_list:
scorers.append((Rouge(), "ROUGE_L"))
if 'cider' in metrics_list:
scorers.append((Cider(), "CIDEr"))
final_scores = {}
for scorer, method in scorers:
score, _ = scorer.compute_score(ref, hyp)
if isinstance(score, list):
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
示例11: score
# 需要导入模块: from pycocoevalcap.cider import cider [as 别名]
# 或者: from pycocoevalcap.cider.cider import Cider [as 别名]
def score(self, GT, RES, IDs):
self.eval = {}
self.imgToEval = {}
gts = {}
res = {}
for ID in IDs:
gts[ID] = GT[ID]
res[ID] = RES[ID]
print 'tokenization...'
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
print 'setting up scorers...'
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
eval = {}
for scorer, method in scorers:
print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, IDs, m)
#print "%s: %0.3f"%(m, sc)
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, IDs, method)
#print "%s: %0.3f"%(method, score)
for metric, score in self.eval.items():
print '%s: %.3f'%(metric, score)
return self.eval