本文整理汇总了Python中pycocotools.coco.COCO.getImgIds方法的典型用法代码示例。如果您正苦于以下问题:Python COCO.getImgIds方法的具体用法?Python COCO.getImgIds怎么用?Python COCO.getImgIds使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocotools.coco.COCO
的用法示例。
在下文中一共展示了COCO.getImgIds方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def main():
random.seed(123)
dataDir='/home/gchrupala/repos/coco'
dataType='val2014'
cap = COCO('%s/annotations/captions_%s.json'%(dataDir,dataType))
coco = COCO('%s/annotations/instances_%s.json'%(dataDir,dataType))
imgCat = {}
for cat,imgs in coco.catToImgs.items():
for img in imgs:
if img in imgCat:
imgCat[img].add(cat)
else:
imgCat[img]=set([cat])
with open('hard2.csv','w') as file:
writer = csv.writer(file)
writer.writerow(["desc", "url_1", "url_2", "url_3", "url_4" ])
imgIds = random.sample(coco.getImgIds(), 1000)
for img in coco.loadImgs(imgIds):
if img['id'] not in imgCat:
continue
cats = imgCat[img['id']]
desc = random.sample(cap.imgToAnns[img['id']],1)[0]
imgs = coco.loadImgs(random.sample(sum([ coco.getImgIds(catIds=[cat])
for cat in cats ],[]),3))
urls = [ img['coco_url'] ] + [ img['coco_url'] for img in imgs ]
random.shuffle(urls)
writer.writerow([desc['caption']] + urls )
示例2: language_eval
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def language_eval(input_data, savedir, split):
if type(input_data) == str: # Filename given.
checkpoint = json.load(open(input_data, 'r'))
preds = checkpoint
elif type(input_data) == list: # Direct predictions give.
preds = input_data
annFile = 'third_party/coco-caption/annotations/captions_val2014.json'
coco = COCO(annFile)
valids = coco.getImgIds()
# Filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'Using %d/%d predictions' % (len(preds_filt), len(preds))
resFile = osp.join(savedir, 'result_%s.json' % (split))
json.dump(preds_filt, open(resFile, 'w')) # Serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# Create output dictionary.
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
# Return aggregate and per image score.
return out, cocoEval.evalImgs
示例3: main
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def main(argv):
input_json = 'results/' + sys.argv[1]
annFile = 'annotations/captions_val2014.json'
coco = COCO(annFile)
valids = coco.getImgIds()
checkpoint = json.load(open(input_json, 'r'))
preds = checkpoint['val_predictions']
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'using %d/%d predictions' % (len(preds_filt), len(preds))
json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...
resFile = 'tmp.json'
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
# serialize to file, to be read from Lua
json.dump(out, open(input_json + '_out.json', 'w'))
示例4: language_eval
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def language_eval(dataset, preds):
import sys
if 'coco' in dataset:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
else:
sys.path.append("f30k-caption")
annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print 'using %d/%d predictions' % (len(preds_filt), len(preds))
json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...
resFile = 'tmp.json'
cocoRes = coco.loadRes(resFile)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
return out
示例5: getImgIds
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def getImgIds(self, imgIds=[], catIds=[]):
"""
Didn't change the original method, just call supe
"""
#not support category filtering
if len(catIds) !=0 :
return []
return COCO.getImgIds(self,imgIds,catIds)
示例6: load_coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def load_coco(self, dataset_dir, subset, class_ids=None,
class_map=None, return_coco=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, val35k)
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
"""
# Path
image_dir = os.path.join(dataset_dir, "train2014" if subset == "train"
else "val2014")
# Create COCO object
json_path_dict = {
"train": "annotations/instances_train2014.json",
"val": "annotations/instances_val2014.json",
"minival": "annotations/instances_minival2014.json",
"val35k": "annotations/instances_valminusminival2014.json",
}
coco = COCO(os.path.join(dataset_dir, json_path_dict[subset]))
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
示例7: _load_gt_roidb
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def _load_gt_roidb(self):
_coco = COCO(self._anno_file)
# deal with class names
cats = [cat['name'] for cat in _coco.loadCats(_coco.getCatIds())]
class_to_coco_ind = dict(zip(cats, _coco.getCatIds()))
class_to_ind = dict(zip(self.classes, range(self.num_classes)))
coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls])
for cls in self.classes[1:]])
image_ids = _coco.getImgIds()
gt_roidb = [self._load_annotation(_coco, coco_ind_to_class_ind, index) for index in image_ids]
return gt_roidb
示例8: load_coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
示例9: creat_catids
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def creat_catids(data_folder=DATA_FOLDER):
obj = COCO(COCO_TRAIN_OBJ_FILE)
vobj = COCO(COCO_VALID_OBJ_FILE)
catids = []
for img_id in obj.getImgIds():
if img_id in obj.imgToAnns:
objs = []
for obj_map in obj.imgToAnns[img_id]:
objs += [obj_map['category_id']]
catids += [(img_id, objs)]
else : catids += [(img_id, [])]
for img_id in vobj.getImgIds():
if img_id in vobj.imgToAnns:
objs = []
for obj_map in vobj.imgToAnns[img_id]:
objs += [obj_map['category_id']]
catids += [(img_id, objs)]
else : catids += [(img_id, [])]
f = open(data_folder + '/preprocessed/catids.json', 'w')
json.dump(catids, f)
f.close()
示例10: language_eval
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def language_eval(dataset, preds, model_id, split):
import sys
if 'coco' in dataset:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
elif 'msvd' in dataset:
sys.path.append('coco-caption')
annFile = 'coco-caption/annotations/coco_ref_msvd.json'
elif 'kuaishou' in dataset:
sys.path.append('coco-caption')
annFile = 'coco-caption/annotations/coco_ref_kuaishou.json'
else:
sys.path.append("f30k-caption")
annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
# create output dictionary
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
示例11: split_valid
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def split_valid(data_folder=DATA_FOLDER):
cap = COCO(COCO_VALID_CAP_FILE)
imgIds = cap.getImgIds()
random.seed(0)
random.shuffle(imgIds)
mid = len(imgIds)/2
vimgids, timgids = imgIds[:mid], imgIds[mid:]
f = open(data_folder + '/preprocessed/valimgids.json', 'w')
json.dump(vimgids, f)
f.close()
f = open(data_folder + '/preprocessed/tesimgids.json', 'w')
json.dump(timgids, f)
f.close()
示例12: _load_all
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def _load_all(self, anno_file, shuffle):
"""
initialize all entries given annotation json file
Parameters:
----------
anno_file: str
annotation json file
shuffle: bool
whether to shuffle image list
"""
image_set_index = []
labels = []
coco = COCO(anno_file)
img_ids = coco.getImgIds()
for img_id in img_ids:
# filename
image_info = coco.loadImgs(img_id)[0]
filename = image_info["file_name"]
subdir = filename.split('_')[1]
height = image_info["height"]
width = image_info["width"]
# label
anno_ids = coco.getAnnIds(imgIds=img_id)
annos = coco.loadAnns(anno_ids)
label = []
for anno in annos:
cat_id = int(anno["category_id"])
bbox = anno["bbox"]
assert len(bbox) == 4
xmin = float(bbox[0]) / width
ymin = float(bbox[1]) / height
xmax = xmin + float(bbox[2]) / width
ymax = ymin + float(bbox[3]) / height
label.append([cat_id, xmin, ymin, xmax, ymax, 0])
if label:
labels.append(np.array(label))
image_set_index.append(os.path.join(subdir, filename))
if shuffle:
import random
indices = range(len(image_set_index))
random.shuffle(indices)
image_set_index = [image_set_index[i] for i in indices]
labels = [labels[i] for i in indices]
# store the results
self.image_set_index = image_set_index
self.labels = labels
示例13: Resize_Image
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
class Resize_Image():
def __init__(self, imgeDir, resizeImageDir):
self.ImageDir = imgeDir
self.ResizeImageDir = resizeImageDir
self.dataDir = APP_ROOT + "/Data/"
self.dataType = 'val2014'
self.annFile = '%s/annotations/instances_%s.json'\
% (self.dataDir, self.dataType)
# initialize COCO api for instance annotations
self.coco = COCO(self.annFile)
# display COCO categories and supercategories
self.cats = self.coco.loadCats(self.coco.getCatIds())
self.names = [cat['name'] for cat in self.cats]
self.ids = [cat['id'] for cat in self.cats]
self.name_ids = {}
# get all images containing given categories, select one at random
self.img_dict = {}
def resize_image(self):
for i in range(len(self.names)):
if self.ids[i] not in self.name_ids:
self.name_ids.update({self.names[i]: self.ids[i]})
self.__image_dict_update()
def __image_dict_update(self):
for name in self.names:
catIds = self.coco.getCatIds(catNms=[name])
imgIds = self.coco.getImgIds(catIds=catIds)
for i in range(len(imgIds)):
img = self.coco.loadImgs(imgIds[i])[0]
if img["file_name"] not in self.img_dict:
self.img_dict.update({img["file_name"]: name})
self.__output_resize_images()
def __output_resize_images(self):
for k, v in sorted(self.img_dict.items(), key=lambda x: x[0]):
ImageFile = '%s/%s' % (self.ImageDir, k)
pil_im = Image.open(ImageFile)
out = pil_im.resize((255, 255))
save_image = '%s/%s' % (self.ResizeImageDir, k)
out.save(save_image)
print(save_image + " " + str(self.name_ids[v]))
示例14: main
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def main():
if len(sys.argv) != 3:
print 'usage: python convert_to_pascalformat.py coco_dataDir coco_dataType'
print 'for example: python convert_to_pascalformat.py \'./\' \'val2014\''
sys.exit(1)
dataDir = sys.argv[1]
dataType = sys.argv[2]
from pycocotools.coco import COCO
import os
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco=COCO(annFile)
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats]
imgIds = coco.getImgIds()
directory = './annotations_pascalformat/'
if not os.path.exists(directory):
os.makedirs(directory)
for n in xrange(len(imgIds)):
img = coco.loadImgs(imgIds[n])[0]
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
xml = '<annotation>\n<folder>\nCOCO2014pascalformat\n</folder>\n<filename>\n'
xml += img['file_name'] + '\n</filename>\n<source>\n<database>\nCOCO2014pascalformat\n</database>\n</source>\n<size>\n'
xml += '<width>\n' + str(img['width']) + '\n</width>\n' + '<height>\n' + str(img['height']) + '\n</height>\n'
xml += '<depth>\n3\n</depth>\n</size>\n<segmented>\n0\n</segmented>\n'
for i in xrange(len(anns)):
bbox = anns[i]['bbox']
xml += '<object>\n<name>\n' + str(anns[i]['category_id']) + '\n</name>\n'
xml += '<bndbox>\n<xmin>\n' + str(int(round(bbox[0]))) + '\n</xmin>\n'
xml += '<ymin>\n' + str(int(round(bbox[1]))) + '\n</ymin>\n'
xml += '<xmax>\n' + str(int(round(bbox[0] + bbox[2]))) + '\n</xmax>\n'
xml += '<ymax>\n' + str(int(round(bbox[1] + bbox[3]))) + '\n</ymax>\n</bndbox>\n'
xml += '<truncated>\n0\n</truncated>\n<difficult>\n0\n</difficult>\n</object>\n'
xml += '</annotation>'
f_xml = open(directory + img['file_name'].split('.jpg')[0] + '.xml', 'w')
f_xml.write(xml)
f_xml.close()
print str(n) + ' out of ' + str(len(imgIds))
示例15: process_coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getImgIds [as 别名]
def process_coco(data_dir, out_dir, num_shards, is_train=True):
if is_train:
data_type = 'train2014'
out_path = join(out_dir, 'train_%04d_wmeta.tfrecord')
else:
data_type = 'val2014'
out_path = join(out_dir, 'val_%04d_wmeta.tfrecord')
anno_file = join(data_dir,
'annotations/person_keypoints_%s.json' % data_type)
img_dir = join(data_dir, 'images', data_type)
# initialize COCO api for person keypoints annotations
coco = COCO(anno_file)
catIds = coco.getCatIds(catNms=['person'])
img_inds = coco.getImgIds(catIds=catIds)
# Only run on 'single person's
coder = ImageCoder()
i = 0
# Count on shards
fidx = 0
num_ppl = 0
total_num_ppl = 0
while i < len(img_inds):
tf_filename = out_path % fidx
print('Starting tfrecord file %s' % tf_filename)
with tf.python_io.TFRecordWriter(tf_filename) as writer:
# Count on total ppl in each shard
num_ppl = 0
while i < len(img_inds) and num_ppl < num_shards:
if i % 100 == 0:
print('Reading img %d/%d' % (i, len(img_inds)))
num_ppl += add_to_tfrecord(coco, img_inds[i], img_dir, coder,
writer, is_train)
i += 1
total_num_ppl += num_ppl
fidx += 1
print('Made %d shards, with total # of people: %d' %
(fidx - 1, total_num_ppl))