本文整理汇总了Python中pycocoevalcap.eval.COCOEvalCap方法的典型用法代码示例。如果您正苦于以下问题:Python eval.COCOEvalCap方法的具体用法?Python eval.COCOEvalCap怎么用?Python eval.COCOEvalCap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocoevalcap.eval
的用法示例。
在下文中一共展示了eval.COCOEvalCap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: language_eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def language_eval(preds, model_id, split):
import sys
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例2: language_eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def language_eval(dataset, preds, model_id, split):
import sys
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例3: score_generation
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def score_generation(gt_filename=None, generation_result=None):
coco = COCO(gt_filename)
generation_coco = coco.loadRes(generation_result)
coco_evaluator = COCOEvalCap(coco, generation_coco, 'noc_test_freq')
coco_evaluator.evaluate()
示例4: main
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def main(reference_caption_file: str, system_caption_file: str):
coco = COCO(reference_caption_file)
coco_system_captions = coco.loadRes(system_caption_file)
coco_eval = COCOEvalCap(coco, coco_system_captions)
coco_eval.params['image_id'] = coco_system_captions.getImgIds()
coco_eval.evaluate()
print('\nScores:')
print('=======')
for metric, score in coco_eval.eval.items():
print('{}: {:.3f}'.format(metric, score))
示例5: eval_coco
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def eval_coco(annFile, resFile):
coco = COCO(annFile)
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.evaluate()
Bleu_4 = cocoEval.eval['Bleu_4']
METEOR = cocoEval.eval['METEOR']
ROUGE_L = cocoEval.eval['ROUGE_L']
CIDEr = cocoEval.eval['CIDEr']
total = Bleu_4 + METEOR + ROUGE_L + CIDEr
score = {'Bleu_4': Bleu_4, 'METEOR': METEOR, 'ROUGE_L': ROUGE_L, 'CIDEr': CIDEr, 'total': total}
return score
示例6: run
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def run(inp):
out = FLAGS.job_dir + '/val_%s.json' % inp
if not os.path.exists(out):
with open(COCO_PATH + '/annotations/captions_val2014.json') as g:
caption_data = json.load(g)
name_to_id = [(x['file_name'], x['id']) for x in caption_data['images']]
name_to_id = dict(name_to_id)
ret = []
with tf.Graph().as_default(), tf.Session() as sess:
example = tf.placeholder(tf.string, [])
name_op, class_op, _ = parse_image(example)
infer = Infer(job_dir='%s/model.ckpt-%s' % (FLAGS.job_dir, inp))
for i in tf.io.tf_record_iterator('data/image_val.tfrec'):
name, classes = sess.run([name_op, class_op], feed_dict={example: i})
sentences = infer.infer(classes[::-1])
cur = {}
cur['image_id'] = name_to_id[name]
cur['caption'] = sentences[0][0]
ret.append(cur)
with open(out, 'w') as g:
json.dump(ret, g)
coco = COCO(COCO_PATH + '/annotations/captions_val2014.json')
cocoRes = coco.loadRes(out)
# create cocoEval object by taking coco and cocoRes
cocoEval = COCOEvalCap(coco, cocoRes)
# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
# please remove this line when evaluating the full validation set
cocoEval.params['image_id'] = cocoRes.getImgIds()
# evaluate results
cocoEval.evaluate()
return (inp, cocoEval.eval['CIDEr'], cocoEval.eval['METEOR'],
cocoEval.eval['Bleu_4'], cocoEval.eval['Bleu_3'],
cocoEval.eval['Bleu_2'])
示例7: run
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def run(inp):
if no_gpu:
return
out = FLAGS.job_dir + '/val_%s.json' % inp
if not os.path.exists(out):
with open(COCO_PATH + '/annotations/captions_val2014.json') as g:
caption_data = json.load(g)
name_to_id = [(x['file_name'], x['id']) for x in caption_data['images']]
name_to_id = dict(name_to_id)
ret = []
with tf.Graph().as_default():
infer = Infer(job_dir='%s/model.ckpt-%s' % (FLAGS.job_dir, inp))
with open('data/coco_val.txt', 'r') as g:
for name in g:
name = name.strip()
sentences = infer.infer(name)
cur = {}
cur['image_id'] = name_to_id[name]
cur['caption'] = sentences[0][0]
ret.append(cur)
with open(out, 'w') as g:
json.dump(ret, g)
coco = COCO(COCO_PATH + '/annotations/captions_val2014.json')
cocoRes = coco.loadRes(out)
# create cocoEval object by taking coco and cocoRes
cocoEval = COCOEvalCap(coco, cocoRes)
# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
# please remove this line when evaluating the full validation set
cocoEval.params['image_id'] = cocoRes.getImgIds()
# evaluate results
cocoEval.evaluate()
return (inp, cocoEval.eval['CIDEr'], cocoEval.eval['METEOR'],
cocoEval.eval['Bleu_4'], cocoEval.eval['Bleu_3'],
cocoEval.eval['Bleu_2'])
示例8: eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def eval(self, captions, checkpoint_path, score_metric='CIDEr'):
# TODO: Make strings variables
captions_path = checkpoint_path + "-val-captions.json"
with open(captions_path, 'w') as f:
json.dump(captions, f)
cocoRes = self.coco.loadRes(captions_path)
cocoEval = COCOEvalCap(self.coco, cocoRes)
cocoEval.evaluate()
json.dump(cocoEval.evalImgs, open(checkpoint_path + "-val-metrics-imgs.json", 'w'))
json.dump(cocoEval.eval, open(checkpoint_path + "-val-metrics-overall.json", 'w'))
print(cocoEval.eval.items())
return cocoEval.eval[score_metric]
示例9: language_eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def language_eval(dataset, preds, model_id, split):
import sys
sys.path.append("coco-caption")
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'coco-caption/f30k_captions4eval.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例10: language_eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def language_eval(dataset, preds, model_id, split, opt):
import sys
sys.path.append("tools/coco-caption")
if dataset == 'coco':
annFile = 'tools/coco-caption/annotations/captions_val2014.json'
elif dataset == 'flickr30k':
annFile = 'tools/coco-caption/annotations/caption_flickr30k.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes, 'corpus')
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例11: language_eval
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def language_eval(dataset, preds, model_id, split):
import sys
sys.path.append("coco-caption")
if dataset == 'coco':
annFile = 'coco-caption/annotations/captions_val2014.json'
elif dataset == 'flickr30k':
annFile = 'coco-caption/annotations/caption_flickr30k.json'
elif dataset == 'cc':
annFile = 'coco-caption/annotations/caption_cc_val.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# valids = json.load(open('/mnt/dat/CC/annotations/cc_valid_jpgs.json'))
# valids = {int(i[:-4]):int(i[:-4]) for i,j in valids.items()}
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
# print(preds_filt)
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes, 'corpus')
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例12: eval_oracle
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def eval_oracle(preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = COCO(annFile)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
for i in range(len(capsById[capsById.keys()[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
out['ImgToEval'][img_id][metric] = max([_['scores'][metric] for _ in capsById[img_id]])
for metric in out['ImgToEval'].values()[0].keys():
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall']['oracle_'+metric] = tmp.mean()
return out
示例13: eval_oracle
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def eval_oracle(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
sample_n = capsById[list(capsById.keys())[0]]
for i in range(len(capsById[list(capsById.keys())[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
if metric == 'image_id': continue
out['ImgToEval'][img_id]['oracle_'+metric] = max([_['scores'][metric] for _ in capsById[img_id]])
out['ImgToEval'][img_id]['avg_'+metric] = sum([_['scores'][metric] for _ in capsById[img_id]]) / len(capsById[img_id])
out['ImgToEval'][img_id]['captions'] = capsById[img_id]
for metric in list(out['ImgToEval'].values())[0].keys():
if metric == 'captions':
continue
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall'][metric] = tmp.mean()
return out
示例14: main
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def main(_):
infer = Infer()
with open(COCO_PATH + '/annotations/captions_val2014.json') as g:
caption_data = json.load(g)
name_to_id = [(x['file_name'], x['id']) for x in caption_data['images']]
name_to_id = dict(name_to_id)
ret = []
with tf.Graph().as_default(), tf.Session() as sess:
example = tf.placeholder(tf.string, [])
name_op, class_op, _ = parse_image(example)
for i in tqdm(tf.io.tf_record_iterator('data/image_test.tfrec'),
total=5000):
name, classes = sess.run([name_op, class_op], feed_dict={example: i})
sentences = infer.infer(classes[::-1])
cur = {}
cur['image_id'] = name_to_id[name]
cur['caption'] = sentences[0][0]
ret.append(cur)
if os.path.isdir(FLAGS.job_dir):
out_dir = FLAGS.job_dir
else:
out_dir = os.path.split(FLAGS.job_dir)[0]
out = out_dir + '/test.json'
with open(out, 'w') as g:
json.dump(ret, g)
coco = COCO(COCO_PATH + '/annotations/captions_val2014.json')
cocoRes = coco.loadRes(out)
# create cocoEval object by taking coco and cocoRes
cocoEval = COCOEvalCap(coco, cocoRes)
# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
# please remove this line when evaluating the full validation set
cocoEval.params['image_id'] = cocoRes.getImgIds()
# evaluate results
cocoEval.evaluate()
# print output evaluation scores
for metric, score in cocoEval.eval.items():
print('%s: %.3f' % (metric, score))
示例15: main
# 需要导入模块: from pycocoevalcap import eval [as 别名]
# 或者: from pycocoevalcap.eval import COCOEvalCap [as 别名]
def main(_):
infer = Infer()
with open(COCO_PATH + '/annotations/captions_val2014.json') as g:
caption_data = json.load(g)
name_to_id = [(x['file_name'], x['id']) for x in caption_data['images']]
name_to_id = dict(name_to_id)
with open('data/coco_test.txt', 'r') as g:
ret = []
for name in tqdm(g, total=5000):
name = name.strip()
sentences = infer.infer(name)
cur = {}
cur['image_id'] = name_to_id[name]
cur['caption'] = sentences[0][0]
ret.append(cur)
if FLAGS.vis:
im = cv2.imread(FLAGS.data_dir + name)
print(sentences[0][0])
cv2.imshow('a', im)
k = cv2.waitKey()
if k & 0xff == 27:
return
if os.path.isdir(FLAGS.job_dir):
out_dir = FLAGS.job_dir
else:
out_dir = os.path.split(FLAGS.job_dir)[0]
out = out_dir + '/test.json'
with open(out, 'w') as g:
json.dump(ret, g)
coco = COCO(COCO_PATH + '/annotations/captions_val2014.json')
cocoRes = coco.loadRes(out)
# create cocoEval object by taking coco and cocoRes
cocoEval = COCOEvalCap(coco, cocoRes)
# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
# please remove this line when evaluating the full validation set
cocoEval.params['image_id'] = cocoRes.getImgIds()
# evaluate results
cocoEval.evaluate()
# print output evaluation scores
for metric, score in cocoEval.eval.items():
print('%s: %.3f' % (metric, score))