本文整理汇总了Python中pycocotools.coco.COCO类的典型用法代码示例。如果您正苦于以下问题:Python COCO类的具体用法?Python COCO怎么用?Python COCO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了COCO类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
def evaluate():
cocoGt = COCO('annotations.json')
cocoDt = cocoGt.loadRes('detections.json')
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例2: coco_eval
def coco_eval(ann_fn, json_fn, save_fn):
coco = COCO(ann_fn)
coco_res = coco.loadRes(json_fn)
coco_evaluator = COCOEvalCap(coco, coco_res)
# comment below line to evaluate the full validation or testing set.
coco_evaluator.params['image_id'] = coco_res.getImgIds()
coco_evaluator.evaluate(save_fn)
示例3: ablate
def ablate(imgIds = [], mode ='destroy', out_path="tmp", coco = coco, ct = None, **args):
"""[ablation entry point 2.0]
Created to accomodate background-destroying ablation. Will dispatch all
old ablations (gaussian, blackout, & median) to gen_ablation."""
if ct is None:
ct = coco_text.COCO_Text(os.path.join(CD, 'COCO_Text.json'))
if imgIds == []:
imgIds = ct.getImgIds(imgIds=ct.train, catIds=[('legibility','legible')])
imgIds = [imgIds[np.random.randint(0,len(imgIds))]]
#dispatch to old ablation entry point
if mode in ['gaussian', 'blackout', 'median']:
return gen_ablation(imgIds, mode, ct, out_path=out_path, **args)
#else do destroy_bg
if coco is None:
coco = COCO('%s/annotations/instances_%s.json'%(DATA_PATH,DATA_TYPE))
imgs = coco.loadImgs(imgIds)
results = []
for idx, img in enumerate(imgs):
print("Ablating image {}/{} with id {} ".format(idx+1, len(imgIds), img['id']))
ori_file_name = os.path.join(CD, DATA_PATH, DATA_TYPE, img['file_name'])
orig = io.imread(ori_file_name)
if mode == 'destroy':
ablt = destroy_bg(orig, img['id'], coco, **args)
elif mode == 'median_bg':
ablt = median_bg(orig, img['id'], coco, **args)
out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
io.imsave(out_file_name, ablt)
results.append((img['id'], ori_file_name, out_file_name))
return results
示例4: main
def main(argv):
## Parsing the command
in_path = ''
out_path = ''
ann_path = ''
try:
opts, args = getopt.getopt(argv,"hi:o:a:",["in=","out=","annotation="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile> -a <annotationfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile> -a <annotationfile>'
sys.exit()
elif opt in ("-i", "--in"):
in_path = arg
elif opt in ("-o", "--out"):
out_path = arg
elif opt in ("-a", "--annotation"):
ann_path = arg
print('Performing evaluation using Coco Python API...')
_COCO = COCO(ann_path)
_cats = _COCO.loadCats(_COCO.getCatIds())
_classes = tuple(['__background__'] + [c['name'] for c in _cats])
_do_eval(in_path,out_path, _COCO, _classes)
示例5: language_eval
def language_eval(dataset, preds):
import sys
if 'coco' in dataset:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
else:
sys.path.append("f30k-caption")
annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'using %d/%d predictions' % (len(preds_filt), len(preds))
json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...
resFile = 'tmp.json'
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
return out
示例6: __init__
def __init__(self, annotation_file=None):
"""
Constructor of SALICON helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:return:
"""
COCO.__init__(self,annotation_file=annotation_file)
示例7: language_eval
def language_eval(input_data, savedir, split):
if type(input_data) == str: # Filename given.
checkpoint = json.load(open(input_data, 'r'))
preds = checkpoint
elif type(input_data) == list: # Direct predictions give.
preds = input_data
annFile = 'third_party/coco-caption/annotations/captions_val2014.json'
coco = COCO(annFile)
valids = coco.getImgIds()
# Filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'Using %d/%d predictions' % (len(preds_filt), len(preds))
resFile = osp.join(savedir, 'result_%s.json' % (split))
json.dump(preds_filt, open(resFile, 'w')) # Serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# Create output dictionary.
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
# Return aggregate and per image score.
return out, cocoEval.evalImgs
示例8: main
def main():
random.seed(123)
dataDir='/home/gchrupala/repos/coco'
dataType='val2014'
cap = COCO('%s/annotations/captions_%s.json'%(dataDir,dataType))
coco = COCO('%s/annotations/instances_%s.json'%(dataDir,dataType))
imgCat = {}
for cat,imgs in coco.catToImgs.items():
for img in imgs:
if img in imgCat:
imgCat[img].add(cat)
else:
imgCat[img]=set([cat])
with open('hard2.csv','w') as file:
writer = csv.writer(file)
writer.writerow(["desc", "url_1", "url_2", "url_3", "url_4" ])
imgIds = random.sample(coco.getImgIds(), 1000)
for img in coco.loadImgs(imgIds):
if img['id'] not in imgCat:
continue
cats = imgCat[img['id']]
desc = random.sample(cap.imgToAnns[img['id']],1)[0]
imgs = coco.loadImgs(random.sample(sum([ coco.getImgIds(catIds=[cat])
for cat in cats ],[]),3))
urls = [ img['coco_url'] ] + [ img['coco_url'] for img in imgs ]
random.shuffle(urls)
writer.writerow([desc['caption']] + urls )
示例9: coco_eval
def coco_eval(candidates_file, references_file):
"""
Given the candidates and references, the coco-caption module is
used to calculate various metrics. Returns a list of dictionaries containing:
-BLEU
-ROUGE
-METEOR
-CIDEr
"""
# This is used to suppress the output of coco-eval:
old_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
try:
# Derived from example code in coco-captions repo
coco = COCO( references_file )
cocoRes = coco.loadRes( candidates_file )
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.evaluate()
finally:
# Change back to standard output
sys.stdout.close()
sys.stdout = old_stdout
return cocoEval.evalImgs
示例10: main
def main(argv):
input_json = 'results/' + sys.argv[1]
annFile = 'annotations/captions_val2014.json'
coco = COCO(annFile)
valids = coco.getImgIds()
checkpoint = json.load(open(input_json, 'r'))
preds = checkpoint['val_predictions']
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'using %d/%d predictions' % (len(preds_filt), len(preds))
json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...
resFile = 'tmp.json'
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
# serialize to file, to be read from Lua
json.dump(out, open(input_json + '_out.json', 'w'))
示例11: main
def main():
HASH_IMG_NAME = True
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputfile", type=str, required=True,
help='File containing model-generated/hypothesis sentences.')
parser.add_argument("-r", "--references", type=str, required=True,
help='JSON File containing references/groundtruth sentences.')
args = parser.parse_args()
prediction_file = args.inputfile
reference_file = args.references
json_predictions_file = '{0}.json'.format(prediction_file)
crf = CocoResFormat()
crf.read_file(prediction_file, HASH_IMG_NAME)
crf.dump_json(json_predictions_file)
# create coco object and cocoRes object.
coco = COCO(reference_file)
cocoRes = coco.loadRes(json_predictions_file)
# create cocoEval object.
cocoEval = COCOEvalCap(coco, cocoRes)
# evaluate results
cocoEval.evaluate()
# print output evaluation scores
for metric, score in cocoEval.eval.items():
print '%s: %.3f'%(metric, score)
示例12: __init__
def __init__(self, root_dir, data_dir, anno_file):
coco = COCO(os.path.join(root_dir, anno_file))
anns = coco.loadAnns(coco.getAnnIds())
self.coco = coco
self.anns = anns
self.vocab = None # Later set from outside
self.coco_root = root_dir
self.coco_data = data_dir
示例13: cocoval
def cocoval(detected_json):
eval_json = config.eval_json
eval_gt = COCO(eval_json)
eval_dt = eval_gt.loadRes(detected_json)
cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')
# cocoEval.params.imgIds = eval_gt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例14: score_generation
def score_generation(gt_filename=None, generation_result=None):
coco_dict = read_json(generation_result)
coco = COCO(gt_filename)
generation_coco = coco.loadRes(generation_result)
coco_evaluator = COCOEvalCap(coco, generation_coco)
#coco_image_ids = [self.sg.image_path_to_id[image_path]
# for image_path in self.images]
coco_image_ids = [j['image_id'] for j in coco_dict]
coco_evaluator.params['image_id'] = coco_image_ids
results = coco_evaluator.evaluate(return_results=True)
return results
示例15: _load_gt_roidb
def _load_gt_roidb(self):
_coco = COCO(self._anno_file)
# deal with class names
cats = [cat['name'] for cat in _coco.loadCats(_coco.getCatIds())]
class_to_coco_ind = dict(zip(cats, _coco.getCatIds()))
class_to_ind = dict(zip(self.classes, range(self.num_classes)))
coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls])
for cls in self.classes[1:]])
image_ids = _coco.getImgIds()
gt_roidb = [self._load_annotation(_coco, coco_ind_to_class_ind, index) for index in image_ids]
return gt_roidb