本文整理汇总了Python中pycocotools.coco.COCO.getAnnIds方法的典型用法代码示例。如果您正苦于以下问题:Python COCO.getAnnIds方法的具体用法?Python COCO.getAnnIds怎么用?Python COCO.getAnnIds使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocotools.coco.COCO
的用法示例。
在下文中一共展示了COCO.getAnnIds方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def __init__(self, root_dir, data_dir, anno_file):
coco = COCO(os.path.join(root_dir, anno_file))
anns = coco.loadAnns(coco.getAnnIds())
self.coco = coco
self.anns = anns
self.vocab = None # Later set from outside
self.coco_root = root_dir
self.coco_data = data_dir
示例2: load_coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def load_coco(self, dataset_dir, subset, class_ids=None,
class_map=None, return_coco=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, val35k)
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
"""
# Path
image_dir = os.path.join(dataset_dir, "train2014" if subset == "train"
else "val2014")
# Create COCO object
json_path_dict = {
"train": "annotations/instances_train2014.json",
"val": "annotations/instances_val2014.json",
"minival": "annotations/instances_minival2014.json",
"val35k": "annotations/instances_valminusminival2014.json",
}
coco = COCO(os.path.join(dataset_dir, json_path_dict[subset]))
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
示例3: load_coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
示例4: create_tokcap
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def create_tokcap(data_folder=DATA_FOLDER):
cap = COCO(COCO_TRAIN_CAP_FILE)
listedCapMap = {}
for i in cap.loadAnns(cap.getAnnIds()):
listedCapMap[i['id']] = [dict([('caption',i['caption']), ('image_id', i['image_id'])])]
tokenizedListedCapMap = PTBTokenizer().tokenize(listedCapMap)
tokcap = [] #map caption ids to a map of its tokenized caption and image id
for i, j in tokenizedListedCapMap.iteritems():
tokcap += [(i, dict([('caption', j[0]), ('image_id', listedCapMap[i][0]['image_id'])]))]
f = open(data_folder + '/preprocessed/tokcap.json', 'w')
json.dump(tokcap, f)
f.close()
示例5: _load_all
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def _load_all(self, anno_file, shuffle):
"""
initialize all entries given annotation json file
Parameters:
----------
anno_file: str
annotation json file
shuffle: bool
whether to shuffle image list
"""
image_set_index = []
labels = []
coco = COCO(anno_file)
img_ids = coco.getImgIds()
for img_id in img_ids:
# filename
image_info = coco.loadImgs(img_id)[0]
filename = image_info["file_name"]
subdir = filename.split('_')[1]
height = image_info["height"]
width = image_info["width"]
# label
anno_ids = coco.getAnnIds(imgIds=img_id)
annos = coco.loadAnns(anno_ids)
label = []
for anno in annos:
cat_id = int(anno["category_id"])
bbox = anno["bbox"]
assert len(bbox) == 4
xmin = float(bbox[0]) / width
ymin = float(bbox[1]) / height
xmax = xmin + float(bbox[2]) / width
ymax = ymin + float(bbox[3]) / height
label.append([cat_id, xmin, ymin, xmax, ymax, 0])
if label:
labels.append(np.array(label))
image_set_index.append(os.path.join(subdir, filename))
if shuffle:
import random
indices = range(len(image_set_index))
random.shuffle(indices)
image_set_index = [image_set_index[i] for i in indices]
labels = [labels[i] for i in indices]
# store the results
self.image_set_index = image_set_index
self.labels = labels
示例6: main
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def main():
if len(sys.argv) != 3:
print 'usage: python convert_to_pascalformat.py coco_dataDir coco_dataType'
print 'for example: python convert_to_pascalformat.py \'./\' \'val2014\''
sys.exit(1)
dataDir = sys.argv[1]
dataType = sys.argv[2]
from pycocotools.coco import COCO
import os
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco=COCO(annFile)
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats]
imgIds = coco.getImgIds()
directory = './annotations_pascalformat/'
if not os.path.exists(directory):
os.makedirs(directory)
for n in xrange(len(imgIds)):
img = coco.loadImgs(imgIds[n])[0]
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
xml = '<annotation>\n<folder>\nCOCO2014pascalformat\n</folder>\n<filename>\n'
xml += img['file_name'] + '\n</filename>\n<source>\n<database>\nCOCO2014pascalformat\n</database>\n</source>\n<size>\n'
xml += '<width>\n' + str(img['width']) + '\n</width>\n' + '<height>\n' + str(img['height']) + '\n</height>\n'
xml += '<depth>\n3\n</depth>\n</size>\n<segmented>\n0\n</segmented>\n'
for i in xrange(len(anns)):
bbox = anns[i]['bbox']
xml += '<object>\n<name>\n' + str(anns[i]['category_id']) + '\n</name>\n'
xml += '<bndbox>\n<xmin>\n' + str(int(round(bbox[0]))) + '\n</xmin>\n'
xml += '<ymin>\n' + str(int(round(bbox[1]))) + '\n</ymin>\n'
xml += '<xmax>\n' + str(int(round(bbox[0] + bbox[2]))) + '\n</xmax>\n'
xml += '<ymax>\n' + str(int(round(bbox[1] + bbox[3]))) + '\n</ymax>\n</bndbox>\n'
xml += '<truncated>\n0\n</truncated>\n<difficult>\n0\n</difficult>\n</object>\n'
xml += '</annotation>'
f_xml = open(directory + img['file_name'].split('.jpg')[0] + '.xml', 'w')
f_xml.write(xml)
f_xml.close()
print str(n) + ' out of ' + str(len(imgIds))
示例7: loadFeaturesTargets
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def loadFeaturesTargets(fns, dataType, n_captions=1):
"""
Note: filenames should come from the same type of dataType.
filenames from val2014, for example, should have dataType val2014
Parameters
----------
fns: filenames, strings
dataType: string folder, i.e. train2014, val2014
n_captions: int, number of captions for each image to load
Returns
-------
X: list of im_vects
1st list length = len(fns)
vectors are shape (4096, )
Y: list of list of captions.
1st list length = len(fns)
sublist length = n_captions
"""
annFile = '%s/annotations/captions_%s.json'%(dataDir,dataType)
caps=COCO(annFile)
X = []
Y = []
for fn in fns:
# Features
x = np.load('%s/features/%s/%s'%(dataDir, dataType, fn))
# Targets
annIds = caps.getAnnIds(imgIds=getImageId(fn));
anns = caps.loadAnns(annIds)
# sample n_captions per image
anns = shuffle(anns)
captions = [getCaption(anns[i]) for i in range(n_captions)]
X.append(x)
Y.append(captions)
return X, Y
示例8: CocoUtils
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
class CocoUtils(object):
def __init__(self, data_dir, data_type):
ann_file = '%s/annotations/instances_%s.json' % (data_dir, data_type)
# initialize COCO api for instance annotations
self.coco = COCO(ann_file)
def get_img_annotations(self, pic_id):
ann_ids = self.coco.getAnnIds(imgIds=pic_id, iscrowd=None)
return self.coco.loadAnns(ann_ids)
def get_mask_array_and_image(self, annotation, img_width, img_height, fill_color):
seg = annotation['segmentation']
raster_img = Image.new('L', (img_width, img_height), 0)
for polyg in seg:
ImageDraw.Draw(raster_img).polygon(polyg, outline=fill_color, fill=fill_color)
return np.array(raster_img), raster_img
def get_annotation_mask(self, annotation, img_width, img_height):
seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, 1)
return seg_mask
# mask true's are 1 but image true's are 128- otherwise it's pretty much invisible
def get_annotation_image(self, annotation, img_width, img_height):
seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, mask_pic_true_color)
return seg_img
def are_legal_anotations(self, annotations):
# unfortunately, only polygon segmentations work for now (RLE mask type decoding causes a python crash)
polygon_segmentations = ['segmentation' in ann and type(ann['segmentation']) == list for ann in annotations]
return all(polygon_segmentations)
def show_annotations(self, pic_path, annotations):
if self.are_legal_anotations(annotations):
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
read_img = io.imread(pic_path)
plt.figure()
plt.imshow(read_img)
self.coco.showAnns(annotations)
else:
print 'cannot show invalid annotation'
def get_images_data(self):
# each item is image_id, image_file_name
return [pic_data[1] for pic_data in self.coco.imgs.items()]
示例9: __init__
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def __init__(self,annFile,text_field,transform=None):
from pycocotools.coco import COCO
coco = COCO(annFile)
ids = list(coco.imgs.keys())
transform = transform
field = [("text",text_field)]
examples = []
max_seq_len = 0
for i in ids:
ann_ids = coco.getAnnIds(imgIds=i)
anns = coco.loadAnns(ann_ids)
for ann in anns:
caption = ann['caption']
if transform is not None:
caption = transform(caption)
if len(caption) > max_seq_len:
max_seq_len = len(caption)
examples.append(Example.fromlist([caption],field))
self.max_seq_len = max_seq_len + 2 # one for <sos> and one for <eos>
super().__init__(examples=examples,fields=field)
示例10: coco2kitti
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
def coco2kitti(catNms, annFile):
# initialize COCO api for instance annotations
coco = COCO(annFile)
# Create an index for the category names
cats = coco.loadCats(coco.getCatIds())
cat_idx = {}
for c in cats:
cat_idx[c['id']] = c['name']
for img in coco.imgs:
# Get all annotation IDs for the image
catIds = coco.getCatIds(catNms=catNms)
annIds = coco.getAnnIds(imgIds=[img], catIds=catIds)
# If there are annotations, create a label file
if len(annIds) > 0:
# Get image filename
img_fname = coco.imgs[img]['file_name']
# open text file
with open('./labels/' + img_fname.split('.')[0] + '.txt','w') as label_file:
anns = coco.loadAnns(annIds)
for a in anns:
bbox = a['bbox']
# Convert COCO bbox coords to Kitti ones
bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
bbox = [str(b) for b in bbox]
catname = cat_idx[a['category_id']]
# Format line in label file
# Note: all whitespace will be removed from class names
out_str = [catname.replace(" ","")
+ ' ' + ' '.join(['0']*3)
+ ' ' + ' '.join([b for b in bbox])
+ ' ' + ' '.join(['0']*8)
+'\n']
label_file.write(out_str[0])
示例11: coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
#.........这里部分代码省略.........
# Sanity check
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
ds_utils.validate_boxes(boxes, width=width, height=height)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = self._COCO.loadAnns(annIds)
objs = [obj for obj in objs if obj['area'] > 0]
num_objs = len(objs)
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
x1 = obj['bbox'][0]
y1 = obj['bbox'][1]
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
示例12: COCODetection
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
class COCODetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
"""
def __init__(self, root, image_set='trainval35k', transform=None,
target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):
sys.path.append(osp.join(root, COCO_API))
from pycocotools.coco import COCO
self.root = osp.join(root, IMAGES, image_set)
self.coco = COCO(osp.join(root, ANNOTATIONS,
INSTANCES_SET.format(image_set)))
self.ids = list(self.coco.imgToAnns.keys())
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target).
target is the object returned by ``coco.loadAnns``.
"""
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target, height, width).
target is the object returned by ``coco.loadAnns``.
"""
img_id = self.ids[index]
target = self.coco.imgToAnns[img_id]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
target = self.coco.loadAnns(ann_ids)
path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])
assert osp.exists(path), 'Image path does not exist: {}'.format(path)
img = cv2.imread(osp.join(self.root, path))
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4],
target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
cv2 img
'''
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
return self.coco.loadAnns(ann_ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
#.........这里部分代码省略.........
示例13: coco
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
#.........这里部分代码省略.........
'Path does not exist: {}'.format(image_path)
return image_path
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = self._COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
示例14: set
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
nms = set([cat['supercategory'] for cat in cats])
print 'COCO supercategories: \n', ' '.join(nms)
indoorCats = coco.loadCats(indoorCatIds)
nms = [cat['name'] for cat in indoorCats]
print 'COCO selected indoor categories: \n', ' '.join(nms)
outdoorSupCat = ['outdoor', 'sports', 'vehicle'] #[0, 4, 7]
outdoorAnimalCat = [16, 19, 20, 21, 22, 23, 24, 25]
outdoorCatIds = coco.getCatIds(supNms = outdoorSupCat)
outdoorCatIds += outdoorAnimalCat
## for debug ###
imgIds = coco.getImgIds()
img = coco.loadImgs(imgIds[0])[0]
I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))
plt.figure(); plt.imshow(I); plt.imsave('a.png', I)
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
coco.showAnns(anns)
plt.savefig('b.png')
## ###
indoorImgIds = set()
for catId in indoorCatIds:
indoorImgIds |= set(coco.getImgIds(catIds=catId))
print 'cat id: %d, image number: %d\n' % (catId, len(indoorImgIds))
for catId in outdoorCatIds:
outdoorIds = coco.getImgIds(imgIds=list(imgIds), catIds=catId)
indoorAndOutdoor = indoorImgIds & set(outdoorIds)
indoorImgIds -= indoorAndOutdoor
print 'cat id: %d, image number: %d\n' % (catId, len(indoorImgIds))
indoorImgIds = list(indoorImgIds)
示例15: COCO
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import getAnnIds [as 别名]
mode = "val" # "train" = train, "val" - validation
dataset_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'dataset'))
val_anno_path = os.path.join(dataset_dir, "annotations/person_keypoints_%s2017.json" % mode)
val_images_dir = os.path.join(dataset_dir, "%s2017" % mode)
val_masks_dir = os.path.join(dataset_dir, "%smask2017" % mode)
if not os.path.exists(val_masks_dir):
os.makedirs(val_masks_dir)
coco = COCO(val_anno_path)
ids = list(coco.imgs.keys())
for i, img_id in enumerate(ids):
ann_ids = coco.getAnnIds(imgIds=img_id)
img_anns = coco.loadAnns(ann_ids)
img_path = os.path.join(val_images_dir, "%012d.jpg" % img_id)
mask_miss_path = os.path.join(val_masks_dir, "mask_miss_%012d.png" % img_id)
mask_all_path = os.path.join(val_masks_dir, "mask_all_%012d.png" % img_id)
img = cv2.imread(img_path)
h, w, c = img.shape
mask_all = np.zeros((h, w), dtype=np.uint8)
mask_miss = np.zeros((h, w), dtype=np.uint8)
flag = 0
for p in img_anns:
seg = p["segmentation"]