当前位置: 首页>>代码示例>>Python>>正文


Python COCO.loadRes方法代码示例

本文整理汇总了Python中pycocotools.coco.COCO.loadRes方法的典型用法代码示例。如果您正苦于以下问题:Python COCO.loadRes方法的具体用法?Python COCO.loadRes怎么用?Python COCO.loadRes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pycocotools.coco.COCO的用法示例。


在下文中一共展示了COCO.loadRes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def main(argv):
    input_json = 'results/' + sys.argv[1]

    annFile = 'annotations/captions_val2014.json'
    coco = COCO(annFile)
    valids = coco.getImgIds()

    checkpoint = json.load(open(input_json, 'r'))
    preds = checkpoint['val_predictions']

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print 'using %d/%d predictions' % (len(preds_filt), len(preds))
    json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...

    resFile = 'tmp.json'
    cocoRes = coco.loadRes(resFile)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score
    # serialize to file, to be read from Lua
    json.dump(out, open(input_json + '_out.json', 'w'))
开发者ID:telin0411,项目名称:CS231A_Project,代码行数:29,代码来源:myeval.py

示例2: main

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def main():
  HASH_IMG_NAME = True
  pylab.rcParams['figure.figsize'] = (10.0, 8.0)
  json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')

  parser = argparse.ArgumentParser()
  parser.add_argument("-i", "--inputfile", type=str, required=True,
      help='File containing model-generated/hypothesis sentences.')
  parser.add_argument("-r", "--references", type=str, required=True,
      help='JSON File containing references/groundtruth sentences.')
  args = parser.parse_args()
  prediction_file = args.inputfile
  reference_file = args.references
  json_predictions_file = '{0}.json'.format(prediction_file)
  
  crf = CocoResFormat()
  crf.read_file(prediction_file, HASH_IMG_NAME)
  crf.dump_json(json_predictions_file)
   
  # create coco object and cocoRes object.
  coco = COCO(reference_file)
  cocoRes = coco.loadRes(json_predictions_file)
  
  # create cocoEval object.
  cocoEval = COCOEvalCap(coco, cocoRes)
  
  # evaluate results
  cocoEval.evaluate()
  
  # print output evaluation scores
  for metric, score in cocoEval.eval.items():
    print '%s: %.3f'%(metric, score)
开发者ID:meteora9479,项目名称:caption-eval,代码行数:34,代码来源:run_evaluations.py

示例3: language_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def language_eval(input_data, savedir, split):
  if type(input_data) == str: # Filename given.
    checkpoint = json.load(open(input_data, 'r'))
    preds = checkpoint
  elif type(input_data) == list: # Direct predictions give.
    preds = input_data

  annFile = 'third_party/coco-caption/annotations/captions_val2014.json'
  coco = COCO(annFile)
  valids = coco.getImgIds()

  # Filter results to only those in MSCOCO validation set (will be about a third)
  preds_filt = [p for p in preds if p['image_id'] in valids]
  print 'Using %d/%d predictions' % (len(preds_filt), len(preds))
  resFile = osp.join(savedir, 'result_%s.json' % (split))
  json.dump(preds_filt, open(resFile, 'w')) # Serialize to temporary json file. Sigh, COCO API...

  cocoRes = coco.loadRes(resFile)
  cocoEval = COCOEvalCap(coco, cocoRes)
  cocoEval.params['image_id'] = cocoRes.getImgIds()
  cocoEval.evaluate()

  # Create output dictionary.
  out = {}
  for metric, score in cocoEval.eval.items():
    out[metric] = score

  # Return aggregate and per image score.
  return out, cocoEval.evalImgs
开发者ID:reem94,项目名称:convcap,代码行数:31,代码来源:evaluate.py

示例4: language_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def language_eval(dataset, preds):
    import sys
    if 'coco' in dataset:
        sys.path.append("coco-caption")
        annFile = 'coco-caption/annotations/captions_val2014.json'
    else:
        sys.path.append("f30k-caption")
        annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
    from pycocotools.coco import COCO
    from pycocoevalcap.eval import COCOEvalCap

    encoder.FLOAT_REPR = lambda o: format(o, '.3f')

    coco = COCO(annFile)
    valids = coco.getImgIds()

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print 'using %d/%d predictions' % (len(preds_filt), len(preds))
    json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...

    resFile = 'tmp.json'
    cocoRes = coco.loadRes(resFile)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score

    return out
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:35,代码来源:eval_utils.py

示例5: coco_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def coco_eval(candidates_file, references_file):
  """
    Given the candidates and references, the coco-caption module is 
    used to calculate various metrics. Returns a list of dictionaries containing:
    -BLEU
    -ROUGE
    -METEOR
    -CIDEr
  """

  # This is used to suppress the output of coco-eval:
  old_stdout = sys.stdout
  sys.stdout = open(os.devnull, "w")
  try:
    # Derived from example code in coco-captions repo
    coco    = COCO( references_file )
    cocoRes = coco.loadRes( candidates_file )
  
    cocoEval = COCOEvalCap(coco, cocoRes)

    cocoEval.evaluate()
  finally:
    # Change back to standard output
    sys.stdout.close()
    sys.stdout = old_stdout
  
  return cocoEval.evalImgs
开发者ID:text-machine-lab,项目名称:MUTT,代码行数:29,代码来源:metrics.py

示例6: evaluate

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def evaluate():
    cocoGt = COCO('annotations.json')
    cocoDt = cocoGt.loadRes('detections.json')
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
开发者ID:cyberCBM,项目名称:DetectO,代码行数:9,代码来源:face_detector_accuracy.py

示例7: coco_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def coco_eval(ann_fn, json_fn, save_fn):
    coco = COCO(ann_fn)
    coco_res = coco.loadRes(json_fn)
    coco_evaluator = COCOEvalCap(coco, coco_res)
    # comment below line to evaluate the full validation or testing set. 
    coco_evaluator.params['image_id'] = coco_res.getImgIds()
    coco_evaluator.evaluate(save_fn)
开发者ID:qyouurcs,项目名称:seq_style,代码行数:9,代码来源:caption_nil_training_dp_eval.py

示例8: cocoval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def cocoval(detected_json):
    eval_json = config.eval_json
    eval_gt = COCO(eval_json)

    eval_dt = eval_gt.loadRes(detected_json)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')

    # cocoEval.params.imgIds = eval_gt.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
开发者ID:Zumbalamambo,项目名称:light_head_rcnn,代码行数:13,代码来源:cocoval.py

示例9: score_generation

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def score_generation(gt_filename=None, generation_result=None):

  coco_dict = read_json(generation_result)
  coco = COCO(gt_filename)
  generation_coco = coco.loadRes(generation_result)
  coco_evaluator = COCOEvalCap(coco, generation_coco)
  #coco_image_ids = [self.sg.image_path_to_id[image_path]
  #                  for image_path in self.images]
  coco_image_ids = [j['image_id'] for j in coco_dict]
  coco_evaluator.params['image_id'] = coco_image_ids
  results = coco_evaluator.evaluate(return_results=True)
  return results
开发者ID:luukhoavn,项目名称:DCC,代码行数:14,代码来源:eval_sentences.py

示例10: language_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def language_eval(dataset, preds, model_id, split):
    import sys
    if 'coco' in dataset:
        sys.path.append("coco-caption")
        annFile = 'coco-caption/annotations/captions_val2014.json'
    elif 'msvd' in dataset:
        sys.path.append('coco-caption')
        annFile = 'coco-caption/annotations/coco_ref_msvd.json'
    elif 'kuaishou' in dataset:
        sys.path.append('coco-caption')
        annFile = 'coco-caption/annotations/coco_ref_kuaishou.json'
    else:
        sys.path.append("f30k-caption")
        annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
    from pycocotools.coco import COCO
    from pycocoevalcap.eval import COCOEvalCap

    encoder.FLOAT_REPR = lambda o: format(o, '.3f')

    if not os.path.isdir('eval_results'):
        os.mkdir('eval_results')
    cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')

    coco = COCO(annFile)
    valids = coco.getImgIds()

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print('using %d/%d predictions' % (len(preds_filt), len(preds)))
    json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...

    cocoRes = coco.loadRes(cache_path)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score

    imgToEval = cocoEval.imgToEval
    for p in preds_filt:
        image_id, caption = p['image_id'], p['caption']
        imgToEval[image_id]['caption'] = caption
    with open(cache_path, 'w') as outfile:
        json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)

    return out
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:51,代码来源:eval_utils.py

示例11: eval_init

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def eval_init(cfg):
    dataset = cfg.dataset
    dataset_phase = cfg.dataset_phase
    dataset_ann = cfg.dataset_ann
    threshold = 0

    # initialize cocoGT api
    annFile = '%s/annotations/%s_%s.json' % (dataset, dataset_ann, dataset_phase)
    cocoGT = COCO(annFile)

    # initialize cocoPred api
    inFile = "predictions_with_segm.json"
    predFile = apply_threhsold(inFile, threshold)
    cocoPred = cocoGT.loadRes(predFile)

    return cocoGT, cocoPred
开发者ID:bhuWenDongchao,项目名称:pose-tensorflow,代码行数:18,代码来源:eval_mscoco.py

示例12: coco_val_eval

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
 def coco_val_eval(self, pred_path, result_path):
   """Evaluate the predicted sentences on MS COCO validation."""
   sys.path.append('./external/coco-caption')
   from pycocotools.coco import COCO
   from pycocoevalcap.eval import COCOEvalCap
   
   coco = COCO('./external/coco-caption/annotations/captions_val2014.json')
   cocoRes = coco.loadRes(pred_path)
   
   cocoEval = COCOEvalCap(coco, cocoRes)
   cocoEval.params['image_id'] = cocoRes.getImgIds()
   cocoEval.evaluate()
   
   with open(result_path, 'w') as fout:
     for metric, score in cocoEval.eval.items():
       print('%s: %.3f' % (metric, score), file=fout)
开发者ID:Sxq2004123,项目名称:TF-mRNN,代码行数:18,代码来源:common_utils.py

示例13: run

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def run(dataset,algName,outDir):

    pylab.rcParams['figure.figsize'] = (10.0, 8.0)

    import json
    from json import encoder
    encoder.FLOAT_REPR = lambda o: format(o, '.3f')

    # set up file names and pathes
    # dataDir='./data/'+dataset
    # dataDir= '/media/SSD/projects/NeuralTalkAnimator'
    dataType='val'

    # annFile='%s/annotations/captions_%s.json'%(dataDir,dataType)
    # annFile='/media/SSD/projects/NeuralTalkAnimator/data/youtube2text/captions_val2014.json'
    dataDir = 'data/'+dataset
    annFile='%s/captions_%s.json'%(dataDir,dataType)
    subtypes=['results', 'evalImgs', 'eval']
    [resFile, evalImgsFile, evalFile]= \
    ['%s/captions_%s_%s_%s.json'%(outDir,dataType,algName,subtype) for subtype in subtypes]

    coco = COCO(annFile)
    cocoRes = coco.loadRes(resFile)

    # create cocoEval object by taking coco and cocoRes
    cocoEval = COCOEvalCap(coco, cocoRes)

    # evaluate on a subset of images by setting
    # cocoEval.params['image_id'] = cocoRes.getImgIds()
    # please remove this line when evaluating the full validation set
    cocoEval.params['image_id'] = cocoRes.getImgIds()

    # evaluate results
    cocoEval.evaluate()

    # print output evaluation scores
    scores = list()
    for metric, score in cocoEval.eval.items():
        print '%s: %.3f'%(metric, score)
        scores.append(score)

    print 'inside metrics'
    return scores
开发者ID:olivernina,项目名称:neuralvideo,代码行数:45,代码来源:metrics.py

示例14: print_evaluation_scores

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    ret['mAP(bbox)'] = cocoEval.stats[0]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        ret['mAP(segm)'] = cocoEval.stats[0]
    return ret
开发者ID:caserzer,项目名称:tensorpack,代码行数:23,代码来源:eval.py

示例15: print_evaluation_scores

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadRes [as 别名]
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
    for k in range(6):
        ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        for k in range(6):
            ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
    return ret
开发者ID:wu-yy,项目名称:tensorpack,代码行数:26,代码来源:eval.py


注:本文中的pycocotools.coco.COCO.loadRes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。