当前位置: 首页>>代码示例>>Python>>正文


Python COCO.loadAnns方法代码示例

本文整理汇总了Python中pycocotools.coco.COCO.loadAnns方法的典型用法代码示例。如果您正苦于以下问题:Python COCO.loadAnns方法的具体用法?Python COCO.loadAnns怎么用?Python COCO.loadAnns使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pycocotools.coco.COCO的用法示例。


在下文中一共展示了COCO.loadAnns方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_coco

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
    def load_coco(self, dataset_dir, subset, class_ids=None,
                  class_map=None, return_coco=False):
        """Load a subset of the COCO dataset.
        dataset_dir: The root directory of the COCO dataset.
        subset: What to load (train, val, minival, val35k)
        class_ids: If provided, only loads images that have the given classes.
        class_map: TODO: Not implemented yet. Supports maping classes from
            different datasets to the same class ID.
        return_coco: If True, returns the COCO object.
        """
        # Path
        image_dir = os.path.join(dataset_dir, "train2014" if subset == "train"
                                 else "val2014")

        # Create COCO object
        json_path_dict = {
            "train": "annotations/instances_train2014.json",
            "val": "annotations/instances_val2014.json",
            "minival": "annotations/instances_minival2014.json",
            "val35k": "annotations/instances_valminusminival2014.json",
        }
        coco = COCO(os.path.join(dataset_dir, json_path_dict[subset]))

        # Load all classes or a subset?
        if not class_ids:
            # All classes
            class_ids = sorted(coco.getCatIds())

        # All images or a subset?
        if class_ids:
            image_ids = []
            for id in class_ids:
                image_ids.extend(list(coco.getImgIds(catIds=[id])))
            # Remove duplicates
            image_ids = list(set(image_ids))
        else:
            # All images
            image_ids = list(coco.imgs.keys())

        # Add classes
        for i in class_ids:
            self.add_class("coco", i, coco.loadCats(i)[0]["name"])

        # Add images
        for i in image_ids:
            self.add_image(
                "coco", image_id=i,
                path=os.path.join(image_dir, coco.imgs[i]['file_name']),
                width=coco.imgs[i]["width"],
                height=coco.imgs[i]["height"],
                annotations=coco.loadAnns(coco.getAnnIds(
                    imgIds=[i], catIds=class_ids, iscrowd=None)))
        if return_coco:
            return coco
开发者ID:huanglizhi,项目名称:Pytorch_Mask_RCNN,代码行数:56,代码来源:data_center.py

示例2: __init__

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
    def __init__(self, root_dir, data_dir, anno_file):
        coco = COCO(os.path.join(root_dir, anno_file))
        anns = coco.loadAnns(coco.getAnnIds())

        self.coco = coco
        self.anns = anns
        self.vocab = None  # Later set from outside
        self.coco_root = root_dir
        self.coco_data = data_dir
开发者ID:Fhrozen,项目名称:chainer,代码行数:11,代码来源:datasets.py

示例3: load_coco

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
    def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
                  class_map=None, return_coco=False, auto_download=False):
        """Load a subset of the COCO dataset.
        dataset_dir: The root directory of the COCO dataset.
        subset: What to load (train, val, minival, valminusminival)
        year: What dataset year to load (2014, 2017) as a string, not an integer
        class_ids: If provided, only loads images that have the given classes.
        class_map: TODO: Not implemented yet. Supports maping classes from
            different datasets to the same class ID.
        return_coco: If True, returns the COCO object.
        auto_download: Automatically download and unzip MS-COCO images and annotations
        """

        if auto_download is True:
            self.auto_download(dataset_dir, subset, year)

        coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
        if subset == "minival" or subset == "valminusminival":
            subset = "val"
        image_dir = "{}/{}{}".format(dataset_dir, subset, year)

        # Load all classes or a subset?
        if not class_ids:
            # All classes
            class_ids = sorted(coco.getCatIds())

        # All images or a subset?
        if class_ids:
            image_ids = []
            for id in class_ids:
                image_ids.extend(list(coco.getImgIds(catIds=[id])))
            # Remove duplicates
            image_ids = list(set(image_ids))
        else:
            # All images
            image_ids = list(coco.imgs.keys())

        # Add classes
        for i in class_ids:
            self.add_class("coco", i, coco.loadCats(i)[0]["name"])

        # Add images
        for i in image_ids:
            self.add_image(
                "coco", image_id=i,
                path=os.path.join(image_dir, coco.imgs[i]['file_name']),
                width=coco.imgs[i]["width"],
                height=coco.imgs[i]["height"],
                annotations=coco.loadAnns(coco.getAnnIds(
                    imgIds=[i], catIds=class_ids, iscrowd=None)))
        if return_coco:
            return coco
开发者ID:RubensZimbres,项目名称:Mask_RCNN,代码行数:54,代码来源:coco.py

示例4: create_tokcap

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
def create_tokcap(data_folder=DATA_FOLDER):
    cap = COCO(COCO_TRAIN_CAP_FILE)
    
    listedCapMap = {}
    for i in cap.loadAnns(cap.getAnnIds()):
        listedCapMap[i['id']] = [dict([('caption',i['caption']), ('image_id', i['image_id'])])]
    tokenizedListedCapMap = PTBTokenizer().tokenize(listedCapMap)
    
    tokcap = [] #map caption ids to a map of its tokenized caption and image id
    for i, j in tokenizedListedCapMap.iteritems():
        tokcap += [(i, dict([('caption', j[0]), ('image_id', listedCapMap[i][0]['image_id'])]))]
    
    f = open(data_folder + '/preprocessed/tokcap.json', 'w')
    json.dump(tokcap, f)
    f.close()
开发者ID:duchesneaumathieu,项目名称:Image-Captioning,代码行数:17,代码来源:create_files.py

示例5: _load_all

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
    def _load_all(self, anno_file, shuffle):
        """
        initialize all entries given annotation json file

        Parameters:
        ----------
        anno_file: str
            annotation json file
        shuffle: bool
            whether to shuffle image list
        """
        image_set_index = []
        labels = []
        coco = COCO(anno_file)
        img_ids = coco.getImgIds()
        for img_id in img_ids:
            # filename
            image_info = coco.loadImgs(img_id)[0]
            filename = image_info["file_name"]
            subdir = filename.split('_')[1]
            height = image_info["height"]
            width = image_info["width"]
            # label
            anno_ids = coco.getAnnIds(imgIds=img_id)
            annos = coco.loadAnns(anno_ids)
            label = []
            for anno in annos:
                cat_id = int(anno["category_id"])
                bbox = anno["bbox"]
                assert len(bbox) == 4
                xmin = float(bbox[0]) / width
                ymin = float(bbox[1]) / height
                xmax = xmin + float(bbox[2]) / width
                ymax = ymin + float(bbox[3]) / height
                label.append([cat_id, xmin, ymin, xmax, ymax, 0])
            if label:
                labels.append(np.array(label))
                image_set_index.append(os.path.join(subdir, filename))

        if shuffle:
            import random
            indices = range(len(image_set_index))
            random.shuffle(indices)
            image_set_index = [image_set_index[i] for i in indices]
            labels = [labels[i] for i in indices]
        # store the results
        self.image_set_index = image_set_index
        self.labels = labels
开发者ID:Piyush3dB,项目名称:mxnet,代码行数:50,代码来源:mscoco.py

示例6: main

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
def main():
	if len(sys.argv) != 3:
		print 'usage: python convert_to_pascalformat.py coco_dataDir coco_dataType'
		print 'for example: python convert_to_pascalformat.py \'./\' \'val2014\''
		sys.exit(1)

	dataDir = sys.argv[1]
	dataType = sys.argv[2]

	from pycocotools.coco import COCO
	import os

	annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)

	coco=COCO(annFile)
	cats = coco.loadCats(coco.getCatIds())
	nms=[cat['name'] for cat in cats]

	imgIds = coco.getImgIds()

	directory = './annotations_pascalformat/'
	if not os.path.exists(directory):
	    os.makedirs(directory)

	for n in xrange(len(imgIds)):
		img = coco.loadImgs(imgIds[n])[0]
		annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
		anns = coco.loadAnns(annIds)

		xml = '<annotation>\n<folder>\nCOCO2014pascalformat\n</folder>\n<filename>\n'
		xml += img['file_name'] + '\n</filename>\n<source>\n<database>\nCOCO2014pascalformat\n</database>\n</source>\n<size>\n'
		xml += '<width>\n' + str(img['width']) + '\n</width>\n' + '<height>\n' + str(img['height']) + '\n</height>\n'
		xml += '<depth>\n3\n</depth>\n</size>\n<segmented>\n0\n</segmented>\n'

		for i in xrange(len(anns)):
			bbox = anns[i]['bbox']
			xml += '<object>\n<name>\n' + str(anns[i]['category_id']) + '\n</name>\n'
			xml += '<bndbox>\n<xmin>\n' + str(int(round(bbox[0]))) + '\n</xmin>\n'
			xml += '<ymin>\n' + str(int(round(bbox[1]))) + '\n</ymin>\n'
			xml += '<xmax>\n' + str(int(round(bbox[0] + bbox[2]))) + '\n</xmax>\n'
			xml += '<ymax>\n' + str(int(round(bbox[1] + bbox[3]))) + '\n</ymax>\n</bndbox>\n'
			xml += '<truncated>\n0\n</truncated>\n<difficult>\n0\n</difficult>\n</object>\n'
		xml += '</annotation>'
		f_xml = open(directory + img['file_name'].split('.jpg')[0] + '.xml', 'w')
		f_xml.write(xml)
		f_xml.close()
		print str(n) + ' out of ' + str(len(imgIds))
开发者ID:caomw,项目名称:coco-dpm,代码行数:49,代码来源:convert_to_pascalformat.py

示例7: loadFeaturesTargets

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
def loadFeaturesTargets(fns, dataType, n_captions=1):
    """
    Note: filenames should come from the same type of dataType.

    filenames from val2014, for example, should have dataType val2014
    Parameters
    ----------
    fns: filenames, strings

    dataType: string folder, i.e. train2014, val2014

    n_captions: int, number of captions for each image to load

    Returns
    -------
    X: list of im_vects
        1st list length = len(fns)
        vectors are shape (4096, )

    Y: list of list of captions.
        1st list length = len(fns)
        sublist length = n_captions
    """
    annFile = '%s/annotations/captions_%s.json'%(dataDir,dataType)
    caps=COCO(annFile)

    X = []
    Y = []

    for fn in fns:
        # Features
        x = np.load('%s/features/%s/%s'%(dataDir, dataType, fn))

        # Targets
        annIds = caps.getAnnIds(imgIds=getImageId(fn));
        anns = caps.loadAnns(annIds)

        # sample n_captions per image
        anns = shuffle(anns)
        captions = [getCaption(anns[i]) for i in range(n_captions)]

        X.append(x)
        Y.append(captions)

    return X, Y
开发者ID:youralien,项目名称:MLFun,代码行数:47,代码来源:dataset.py

示例8: CocoUtils

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
class CocoUtils(object):
    def __init__(self, data_dir, data_type):
        ann_file = '%s/annotations/instances_%s.json' % (data_dir, data_type)
        # initialize COCO api for instance annotations
        self.coco = COCO(ann_file)

    def get_img_annotations(self, pic_id):
        ann_ids = self.coco.getAnnIds(imgIds=pic_id, iscrowd=None)
        return self.coco.loadAnns(ann_ids)

    def get_mask_array_and_image(self, annotation, img_width, img_height, fill_color):
        seg = annotation['segmentation']
        raster_img = Image.new('L', (img_width, img_height), 0)
        for polyg in seg:
            ImageDraw.Draw(raster_img).polygon(polyg, outline=fill_color, fill=fill_color)
        return np.array(raster_img), raster_img

    def get_annotation_mask(self, annotation, img_width, img_height):
        seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, 1)
        return seg_mask

    # mask true's are 1 but image true's are 128- otherwise it's pretty much invisible
    def get_annotation_image(self, annotation, img_width, img_height):
        seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, mask_pic_true_color)
        return seg_img

    def are_legal_anotations(self, annotations):
        # unfortunately, only polygon segmentations work for now (RLE mask type decoding causes a python crash)
        polygon_segmentations = ['segmentation' in ann and type(ann['segmentation']) == list for ann in annotations]
        return all(polygon_segmentations)

    def show_annotations(self, pic_path, annotations):
        if self.are_legal_anotations(annotations):
            pylab.rcParams['figure.figsize'] = (10.0, 8.0)
            read_img = io.imread(pic_path)
            plt.figure()
            plt.imshow(read_img)
            self.coco.showAnns(annotations)
        else:
            print 'cannot show invalid annotation'

    def get_images_data(self):
        # each item is image_id, image_file_name
        return [pic_data[1] for pic_data in self.coco.imgs.items()]
开发者ID:BenJamesbabala,项目名称:NNProject_DeepMask,代码行数:46,代码来源:CocoUtils.py

示例9: __init__

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
	def __init__(self,annFile,text_field,transform=None):
		from pycocotools.coco import COCO
		coco = COCO(annFile)
		ids = list(coco.imgs.keys())
		transform = transform
		field = [("text",text_field)]
		examples = []
		max_seq_len = 0
		for i in ids:
			ann_ids = coco.getAnnIds(imgIds=i)
			anns = coco.loadAnns(ann_ids)
			for ann in anns:
				caption = ann['caption']
				if transform is not None:
					caption = transform(caption)
				if len(caption) > max_seq_len:
					max_seq_len = len(caption)
				examples.append(Example.fromlist([caption],field))
		self.max_seq_len = max_seq_len + 2 # one for <sos> and one for <eos>
		super().__init__(examples=examples,fields=field)
开发者ID:mhattingpete,项目名称:GenerativeAdversarialNetworks,代码行数:22,代码来源:cococaptions.py

示例10: coco2kitti

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
def coco2kitti(catNms, annFile):

    # initialize COCO api for instance annotations
    coco = COCO(annFile)

    # Create an index for the category names
    cats = coco.loadCats(coco.getCatIds())
    cat_idx = {}
    for c in cats:
        cat_idx[c['id']] = c['name']

    for img in coco.imgs:

        # Get all annotation IDs for the image
        catIds = coco.getCatIds(catNms=catNms)
        annIds = coco.getAnnIds(imgIds=[img], catIds=catIds)

        # If there are annotations, create a label file
        if len(annIds) > 0:
            # Get image filename
            img_fname = coco.imgs[img]['file_name']
            # open text file
            with open('./labels/' + img_fname.split('.')[0] + '.txt','w') as label_file:
                anns = coco.loadAnns(annIds)
                for a in anns:
                    bbox = a['bbox']
                    # Convert COCO bbox coords to Kitti ones
                    bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
                    bbox = [str(b) for b in bbox]
                    catname = cat_idx[a['category_id']]
                    # Format line in label file
                    # Note: all whitespace will be removed from class names
                    out_str = [catname.replace(" ","")
                               + ' ' + ' '.join(['0']*3)
                               + ' ' + ' '.join([b for b in bbox])
                               + ' ' + ' '.join(['0']*8)
                               +'\n']
                    label_file.write(out_str[0])
开发者ID:XJTUeducation,项目名称:jetson-inference,代码行数:40,代码来源:coco2kitti.py

示例11: COCO

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
mode = "val" # "train" = train, "val" - validation

dataset_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'dataset'))

val_anno_path = os.path.join(dataset_dir, "annotations/person_keypoints_%s2017.json" % mode)
val_images_dir = os.path.join(dataset_dir, "%s2017" % mode)
val_masks_dir = os.path.join(dataset_dir, "%smask2017" % mode)

if not os.path.exists(val_masks_dir):
    os.makedirs(val_masks_dir)

coco = COCO(val_anno_path)
ids = list(coco.imgs.keys())
for i, img_id in enumerate(ids):
    ann_ids = coco.getAnnIds(imgIds=img_id)
    img_anns = coco.loadAnns(ann_ids)

    img_path = os.path.join(val_images_dir, "%012d.jpg" % img_id)
    mask_miss_path = os.path.join(val_masks_dir, "mask_miss_%012d.png" % img_id)
    mask_all_path = os.path.join(val_masks_dir, "mask_all_%012d.png" % img_id)

    img = cv2.imread(img_path)
    h, w, c = img.shape

    mask_all = np.zeros((h, w), dtype=np.uint8)
    mask_miss = np.zeros((h, w), dtype=np.uint8)
    flag = 0
    for p in img_anns:
        seg = p["segmentation"]

        if p["iscrowd"] == 1:
开发者ID:Da-He,项目名称:keras_Realtime_Multi-Person_Pose_Estimation,代码行数:33,代码来源:generate_masks.py

示例12: COCOSegmentation

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
class COCOSegmentation(SegmentationDataset):
    """COCO Semantic Segmentation Dataset for VOC Pre-training.

    Parameters
    ----------
    root : string
        Path to COCO folder. Default is '$(HOME)/mxnet/datasets/coco'
    split: string
        'train', 'val' or 'test'
    transform : callable, optional
        A function that transforms the image

    Examples
    --------
    >>> from mxnet.gluon.data.vision import transforms
    >>> # Transforms for Normalization
    >>> input_transform = transforms.Compose([
    >>>     transforms.ToTensor(),
    >>>     transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
    >>> ])
    >>> # Create Dataset
    >>> trainset = gluoncv.data.COCOSegmentation(split='train', transform=input_transform)
    >>> # Create Training Loader
    >>> train_data = gluon.data.DataLoader(
    >>>     trainset, 4, shuffle=True, last_batch='rollover',
    >>>     num_workers=4)
    """
    CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
                1, 64, 20, 63, 7, 72]
    NUM_CLASS = 21
    def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/coco'),
                 split='train', mode=None, transform=None, **kwargs):
        super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs)
        # lazy import pycocotools
        try_import_pycocotools()
        from pycocotools.coco import COCO
        from pycocotools import mask
        if split == 'train':
            print('train set')
            ann_file = os.path.join(root, 'annotations/instances_train2017.json')
            ids_file = os.path.join(root, 'annotations/train_ids.mx')
            self.root = os.path.join(root, 'train2017')
        else:
            print('val set')
            ann_file = os.path.join(root, 'annotations/instances_val2017.json')
            ids_file = os.path.join(root, 'annotations/val_ids.mx')
            self.root = os.path.join(root, 'val2017')
        self.coco = COCO(ann_file)
        self.coco_mask = mask
        if os.path.exists(ids_file):
            with open(ids_file, 'rb') as f:
                self.ids = pickle.load(f)
        else:
            ids = list(self.coco.imgs.keys())
            self.ids = self._preprocess(ids, ids_file)
        self.transform = transform

    def __getitem__(self, index):
        coco = self.coco
        img_id = self.ids[index]
        img_metadata = coco.loadImgs(img_id)[0]
        path = img_metadata['file_name']
        img = Image.open(os.path.join(self.root, path)).convert('RGB')
        cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
        mask = Image.fromarray(self._gen_seg_mask(
            cocotarget, img_metadata['height'], img_metadata['width']))
        # synchrosized transform
        if self.mode == 'train':
            img, mask = self._sync_transform(img, mask)
        elif self.mode == 'val':
            img, mask = self._val_sync_transform(img, mask)
        else:
            assert self.mode == 'testval'
            img, mask = self._img_transform(img), self._mask_transform(mask)
        # general resize, normalize and toTensor
        if self.transform is not None:
            img = self.transform(img)
        return img, mask

    def __len__(self):
        return len(self.ids)

    def _gen_seg_mask(self, target, h, w):
        mask = np.zeros((h, w), dtype=np.uint8)
        coco_mask = self.coco_mask
        for instance in target:
            rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
            m = coco_mask.decode(rle)
            cat = instance['category_id']
            if cat in self.CAT_LIST:
                c = self.CAT_LIST.index(cat)
            else:
                continue
            if len(m.shape) < 3:
                mask[:, :] += (mask == 0) * (m * c)
            else:
                mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
        return mask

    def _preprocess(self, ids, ids_file):
#.........这里部分代码省略.........
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:103,代码来源:segmentation.py

示例13: enumerate

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
boxes_test


# In[834]:


import pandas as pd
box_cat = []
for i,id in enumerate(img_list_train):
    if i%100 == 0:
        print(i)
        
    img = coco_train.loadImgs([id])[0]
    annIds = coco_train.getAnnIds(imgIds=img['id'],  iscrowd=None)
    anns = coco_train.loadAnns(annIds)

    
    ## Get true boxes
    true_bboxes = []
    true_bboxes_cat = []
    
    for ann in anns:
        x, y, w, h = ann['bbox']
        true_bboxes += [ann['bbox']]
        true_bboxes_cat.append(ann['category_id'])

    bboxes = boxes_train[1][i]
    bboxes_cat = []
    
    if bboxes is not None:
开发者ID:EmilyYanW,项目名称:Machine_Learning,代码行数:32,代码来源:8.Computer_Vision_Image_Detection_and_Retrieval.py

示例14: COCOSegmentation

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
class COCOSegmentation(Dataset):
    NUM_CLASSES = 21
    CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
        1, 64, 20, 63, 7, 72]

    def __init__(self,
                 args,
                 base_dir=Path.db_root_dir('coco'),
                 split='train',
                 year='2017'):
        super().__init__()
        ann_file = os.path.join(base_dir, 'annotations/instances_{}{}.json'.format(split, year))
        ids_file = os.path.join(base_dir, 'annotations/{}_ids_{}.pth'.format(split, year))
        self.img_dir = os.path.join(base_dir, 'images/{}{}'.format(split, year))
        self.split = split
        self.coco = COCO(ann_file)
        self.coco_mask = mask
        if os.path.exists(ids_file):
            self.ids = torch.load(ids_file)
        else:
            ids = list(self.coco.imgs.keys())
            self.ids = self._preprocess(ids, ids_file)
        self.args = args

    def __getitem__(self, index):
        _img, _target = self._make_img_gt_point_pair(index)
        sample = {'image': _img, 'label': _target}

        if self.split == "train":
            return self.transform_tr(sample)
        elif self.split == 'val':
            return self.transform_val(sample)

    def _make_img_gt_point_pair(self, index):
        coco = self.coco
        img_id = self.ids[index]
        img_metadata = coco.loadImgs(img_id)[0]
        path = img_metadata['file_name']
        _img = Image.open(os.path.join(self.img_dir, path)).convert('RGB')
        cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
        _target = Image.fromarray(self._gen_seg_mask(
            cocotarget, img_metadata['height'], img_metadata['width']))

        return _img, _target

    def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while. " + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        torch.save(new_ids, ids_file)
        return new_ids

    def _gen_seg_mask(self, target, h, w):
        mask = np.zeros((h, w), dtype=np.uint8)
        coco_mask = self.coco_mask
        for instance in target:
            rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
            m = coco_mask.decode(rle)
            cat = instance['category_id']
            if cat in self.CAT_LIST:
                c = self.CAT_LIST.index(cat)
            else:
                continue
            if len(m.shape) < 3:
                mask[:, :] += (mask == 0) * (m * c)
            else:
                mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
        return mask

    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)

    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
#.........这里部分代码省略.........
开发者ID:WenmuZhou,项目名称:pytorch-deeplab-xception,代码行数:103,代码来源:coco.py

示例15: MSCOCO

# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import loadAnns [as 别名]
class MSCOCO(PoseDataset):
    def __init__(self, cfg):
        cfg.all_joints = [[0], [2, 1], [4, 3], [6, 5], [8, 7],[10, 9], [12, 11], [14, 13], [16, 15]]
        cfg.all_joints_names = ["nose", 'eye', 'ear', 'shoulder', 'elbow', 'hand', 'hip', 'knee', 'foot']
        cfg.num_joints = 17
        super().__init__(cfg)

    def load_dataset(self):
        dataset  = self.cfg.dataset
        dataset_phase = self.cfg.dataset_phase
        dataset_ann = self.cfg.dataset_ann

        # initialize COCO api
        annFile = '%s/annotations/%s_%s.json'%(dataset,dataset_ann,dataset_phase)
        self.coco = COCO(annFile)

        imgIds = self.coco.getImgIds()

        data = []

        # loop through each image
        for imgId in imgIds:
            item = DataItem()

            img = self.coco.loadImgs(imgId)[0]
            item.im_path = "%s/images/%s/%s"%(dataset, dataset_phase, img["file_name"])
            item.im_size = [3, img["height"], img["width"]]
            item.coco_id = imgId
            annIds = self.coco.getAnnIds(imgIds=img['id'], iscrowd=False)
            anns = self.coco.loadAnns(annIds)

            all_person_keypoints = []
            masked_persons_RLE = []
            visible_persons_RLE = []
            all_visibilities = []

            # Consider only images with people
            has_people = len(anns) > 0
            if not has_people and self.cfg.coco_only_images_with_people:
                continue

            for ann in anns: # loop through each person
                person_keypoints = []
                visibilities = []
                if ann["num_keypoints"] != 0:
                    for i in range(self.cfg.num_joints):
                        x_coord = ann["keypoints"][3 * i]
                        y_coord = ann["keypoints"][3 * i + 1]
                        visibility = ann["keypoints"][3 * i + 2]
                        visibilities.append(visibility)
                        if visibility != 0: # i.e. if labeled
                            person_keypoints.append([i, x_coord, y_coord])
                    all_person_keypoints.append(np.array(person_keypoints))
                    visible_persons_RLE.append(maskUtils.decode(self.coco.annToRLE(ann)))
                    all_visibilities.append(visibilities)
                if ann["num_keypoints"] == 0:
                    masked_persons_RLE.append(self.coco.annToRLE(ann))

            item.joints = all_person_keypoints
            item.im_neg_mask = maskUtils.merge(masked_persons_RLE)
            if self.cfg.use_gt_segm:
                item.gt_segm = np.moveaxis(np.array(visible_persons_RLE), 0, -1)
                item.visibilities = all_visibilities
            data.append(item)

        self.has_gt = self.cfg.dataset is not "image_info"
        return data


    def compute_scmap_weights(self, scmap_shape, joint_id, data_item):
        size = scmap_shape[0:2]
        scmask = np.ones(size)
        m = maskUtils.decode(data_item.im_neg_mask)
        if m.size:
            scmask = 1.0 - imresize(m, size)
        scmask = np.stack([scmask] * self.cfg.num_joints, axis=-1)
        return scmask

    def get_pose_segments(self):
       return [[0, 1], [0, 2], [1, 3], [2, 4], [5, 7], [6, 8], [7, 9], [8, 10], [11, 13], [12, 14], [13, 15], [14, 16]]

    def visualize_coco(self, coco_img_results, visibilities):
        inFile = "tmp.json"
        with open(inFile, 'w') as outfile:
            json.dump(coco_img_results, outfile)
        get_gt_visibilities(inFile, visibilities)

        # initialize cocoPred api
        cocoPred = self.coco.loadRes(inFile)
        os.remove(inFile)

        imgIds = [coco_img_results[0]["image_id"]]

        for imgId in imgIds:
            img = cocoPred.loadImgs(imgId)[0]
            im_path = "%s/images/%s/%s" % (self.cfg.dataset, self.cfg.dataset_phase, img["file_name"])
            I = io.imread(im_path)

            fig = plt.figure()
            a = fig.add_subplot(2, 2, 1)
#.........这里部分代码省略.........
开发者ID:PJunhyuk,项目名称:people-counting-pose,代码行数:103,代码来源:mscoco.py


注:本文中的pycocotools.coco.COCO.loadAnns方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。