本文整理汇总了Python中pycocotools.coco.COCO.showAnns方法的典型用法代码示例。如果您正苦于以下问题:Python COCO.showAnns方法的具体用法?Python COCO.showAnns怎么用?Python COCO.showAnns使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocotools.coco.COCO
的用法示例。
在下文中一共展示了COCO.showAnns方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CocoUtils
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
class CocoUtils(object):
def __init__(self, data_dir, data_type):
ann_file = '%s/annotations/instances_%s.json' % (data_dir, data_type)
# initialize COCO api for instance annotations
self.coco = COCO(ann_file)
def get_img_annotations(self, pic_id):
ann_ids = self.coco.getAnnIds(imgIds=pic_id, iscrowd=None)
return self.coco.loadAnns(ann_ids)
def get_mask_array_and_image(self, annotation, img_width, img_height, fill_color):
seg = annotation['segmentation']
raster_img = Image.new('L', (img_width, img_height), 0)
for polyg in seg:
ImageDraw.Draw(raster_img).polygon(polyg, outline=fill_color, fill=fill_color)
return np.array(raster_img), raster_img
def get_annotation_mask(self, annotation, img_width, img_height):
seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, 1)
return seg_mask
# mask true's are 1 but image true's are 128- otherwise it's pretty much invisible
def get_annotation_image(self, annotation, img_width, img_height):
seg_mask, seg_img = self.get_mask_array_and_image(annotation, img_width, img_height, mask_pic_true_color)
return seg_img
def are_legal_anotations(self, annotations):
# unfortunately, only polygon segmentations work for now (RLE mask type decoding causes a python crash)
polygon_segmentations = ['segmentation' in ann and type(ann['segmentation']) == list for ann in annotations]
return all(polygon_segmentations)
def show_annotations(self, pic_path, annotations):
if self.are_legal_anotations(annotations):
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
read_img = io.imread(pic_path)
plt.figure()
plt.imshow(read_img)
self.coco.showAnns(annotations)
else:
print 'cannot show invalid annotation'
def get_images_data(self):
# each item is image_id, image_file_name
return [pic_data[1] for pic_data in self.coco.imgs.items()]
示例2: print
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
for imgId in imgIds:
imgIds = coco.getImgIds(imgIds = [2018019951]) # 555705, cat
img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]
# load and display image
# I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))
# use url to load image
I = io.imread(imgDir + img['file_name'])
# load and display instance annotations
plt.imshow(I);
plt.axis('off')
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
print(anns)
# display mask
coco.showAnns(anns)
plt.show()
# display bbox
# img = cv2.imread(imgDir + img['file_name'])
# bbox = anns[1]['bbox']
# print(bbox)
# cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])), (255, 0, 0))
# # cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)
# cv2.imshow('demo', img)
# cv2.waitKey(8000)
示例3: set
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
nms = set([cat['supercategory'] for cat in cats]) #test=[cat['supercategory'] for cat in cats]
print 'COCO supercategoires :\n', ' '.join(nms)
# get all images of some given category
catIds = train.getCatIds(catNms=['person', 'dog', 'skateboard'])
imgIds = train.getImgIds(catIds=catIds)
# load and display image
for i in range(0, 5):
img = train.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
I = io.imread('%s/images/%s/%s'%(dataDir, trainDataType, img['file_name']))
print '%s/images/%s/%s'%(dataDir, trainDataType, img['file_name'])
plt.figure()
plt.imshow(I)
plt.show()
plt.imshow(I)
annIds = train.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = train.loadAnns(annIds)
train.showAnns(anns)
# for caption annotations
annFile = '%s/annotations/captions_%s.json'%(dataDir, trainDataType)
caps = COCO(annFile)
annIds = caps.getAnnIds(imgIds=img['id'])
anns = caps.loadAnns(annIds)
caps.showAnns(anns)
plt.imshow(I)
plt.show()
示例4: print
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
subject_id = interaction["subject_id"]
subject_anns = coco.loadAnns(subject_id)[0]
object_id = interaction["object_id"]
object_anns = coco.loadAnns(object_id)[0]
object_cat = coco.cats[object_anns["category_id"]]["name"]
v_actions = interaction["visual_actions"]
v_adverbs = interaction["visual_adverbs"]
print("Image ID: [{0}]".format(image_id))
print("Subject ID:[{0}]".format(subject_id))
print("Object ID: [{0}], Category: [{1}]".format(object_id, object_cat))
print("\nVisual Actions:")
for va_id in v_actions:
va = [x for x in visual_actions if x["id"] == va_id][0]
print(" - id:[{0}], name:[{1}]".format(va["id"], va["name"]))
print("\nVisual Adverbs:")
for va_id in v_adverbs:
va = [x for x in visual_adverbs if x["id"] == va_id][0]
print(" - id:[{0}], name:[{1}]".format(va["id"], va["name"]))
img = coco.loadImgs(image_id)[0]
I = io.imread("{0}/{1}/{2}".format(COCO_IMG_DIR, "train2014", img["file_name"]))
plt.figure(figsize=(12, 8))
plt.imshow(I)
coco.showAnns([subject_anns, object_anns])
plt.show()
示例5: MSCOCO
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
#.........这里部分代码省略.........
data = []
# loop through each image
for imgId in imgIds:
item = DataItem()
img = self.coco.loadImgs(imgId)[0]
item.im_path = "%s/images/%s/%s"%(dataset, dataset_phase, img["file_name"])
item.im_size = [3, img["height"], img["width"]]
item.coco_id = imgId
annIds = self.coco.getAnnIds(imgIds=img['id'], iscrowd=False)
anns = self.coco.loadAnns(annIds)
all_person_keypoints = []
masked_persons_RLE = []
visible_persons_RLE = []
all_visibilities = []
# Consider only images with people
has_people = len(anns) > 0
if not has_people and self.cfg.coco_only_images_with_people:
continue
for ann in anns: # loop through each person
person_keypoints = []
visibilities = []
if ann["num_keypoints"] != 0:
for i in range(self.cfg.num_joints):
x_coord = ann["keypoints"][3 * i]
y_coord = ann["keypoints"][3 * i + 1]
visibility = ann["keypoints"][3 * i + 2]
visibilities.append(visibility)
if visibility != 0: # i.e. if labeled
person_keypoints.append([i, x_coord, y_coord])
all_person_keypoints.append(np.array(person_keypoints))
visible_persons_RLE.append(maskUtils.decode(self.coco.annToRLE(ann)))
all_visibilities.append(visibilities)
if ann["num_keypoints"] == 0:
masked_persons_RLE.append(self.coco.annToRLE(ann))
item.joints = all_person_keypoints
item.im_neg_mask = maskUtils.merge(masked_persons_RLE)
if self.cfg.use_gt_segm:
item.gt_segm = np.moveaxis(np.array(visible_persons_RLE), 0, -1)
item.visibilities = all_visibilities
data.append(item)
self.has_gt = self.cfg.dataset is not "image_info"
return data
def compute_scmap_weights(self, scmap_shape, joint_id, data_item):
size = scmap_shape[0:2]
scmask = np.ones(size)
m = maskUtils.decode(data_item.im_neg_mask)
if m.size:
scmask = 1.0 - imresize(m, size)
scmask = np.stack([scmask] * self.cfg.num_joints, axis=-1)
return scmask
def get_pose_segments(self):
return [[0, 1], [0, 2], [1, 3], [2, 4], [5, 7], [6, 8], [7, 9], [8, 10], [11, 13], [12, 14], [13, 15], [14, 16]]
def visualize_coco(self, coco_img_results, visibilities):
inFile = "tmp.json"
with open(inFile, 'w') as outfile:
json.dump(coco_img_results, outfile)
get_gt_visibilities(inFile, visibilities)
# initialize cocoPred api
cocoPred = self.coco.loadRes(inFile)
os.remove(inFile)
imgIds = [coco_img_results[0]["image_id"]]
for imgId in imgIds:
img = cocoPred.loadImgs(imgId)[0]
im_path = "%s/images/%s/%s" % (self.cfg.dataset, self.cfg.dataset_phase, img["file_name"])
I = io.imread(im_path)
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
plt.imshow(I)
a.set_title('Initial Image')
a = fig.add_subplot(2, 2, 2)
plt.imshow(I)
a.set_title('Predicted Keypoints')
annIds = cocoPred.getAnnIds(imgIds=img['id'])
anns = cocoPred.loadAnns(annIds)
cocoPred.showAnns(anns)
a = fig.add_subplot(2, 2, 3)
plt.imshow(I)
a.set_title('GT Keypoints')
annIds = self.coco.getAnnIds(imgIds=img['id'])
anns = self.coco.loadAnns(annIds)
self.coco.showAnns(anns)
plt.show()
示例6:
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
# In[6]:
# load and display image
I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))
plt.figure()
plt.imshow(I)
# In[7]:
# load and display instance annotations
plt.imshow(I)
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
coco.showAnns(anns)
# In[8]:
# initialize COCO api for caption annotations
annFile = '%s/annotations/captions_%s.json'%(dataDir,dataType)
caps=COCO(annFile)
# In[9]:
# load and display caption annotations
annIds = caps.getAnnIds(imgIds=img['id']);
anns = caps.loadAnns(annIds)
caps.showAnns(anns)
示例7: COCO
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
# load and display instance annotations
plt.imshow(I)
plt.axis('off')
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None) # iscrowd: anns for given crowd label
print annIds
anns = coco.loadAnns(annIds) # list, len = 6
# [u'segmentation',
# u'area',
# u'iscrowd',
# u'image_id',
# u'bbox',
# u'category_id',
# u'id']
print anns
coco.showAnns(anns)
plt.show()
# 2) show person keypoints
annFile = '{}/annotations/person_keypoints_{}.json'.format(dataDir, dataType)
coco_kps = COCO(annFile)
# load and display keypoints
plt.imshow(I)
plt.axis('off')
ax = plt.gca()
annIds = coco_kps.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco_kps.loadAnns(annIds) # list, len = 6, dict
# [u'segmentation',
# u'num_keypoints', # new
# u'area',
示例8: __init__
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
class CaptionSaliency:
def __init__(self,dataType,usingSet,dataDir,savefileDir):
#setpath
self.dataType = dataType
self.usingSet = usingSet
self.dataDir = dataDir
self.savefileDir = savefileDir
self.InsFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
self.CapFile='%s/annotations/captions_%s.json'%(dataDir,dataType)
self.SALICON = pickle.load(open('%s/%s.p'%(savefileDir,usingSet),'rb'))
self.Ins_ID = pickle.load(open('%s/Ins_ID_%s.p'%(savefileDir,usingSet),'rb'))
self.category = pickle.load(open('%s/category.p'%savefileDir,'rb'))
self.category_idx = pickle.load(open('%s/cat_dict_idx.p'%savefileDir,'rb'))#eg., person -- 1
self.category_supercategory_idx = pickle.load(open('%s/cat_dict_supercat.p'%savefileDir,'rb')) #eg., person--human
self.supercategory_idx = pickle.load(open('%s/supercate_id.p'%savefileDir,'rb'))#eg., food--1
self.imsal_dict = pickle.load(open('%s/imsal_dict_%s.p'%(savefileDir,usingSet),'rb'))
self.Ins_coco = COCO(self.InsFile)
self.Cap_coco = COCO(self.CapFile)
self.cat_list = self.Ins_coco.cats#category list (official)
wordmat = sio.loadmat('%s/word_mat_%s.mat'%(savefileDir,usingSet))
wordmat = wordmat['word_mat']
self.wordmat = wordmat[:,0]
self.correction_list = ['men','man','kid','boy','baby']
self.nounlist = []
self.nounID = []
self.Cardi_Noun = []
self.Seque_Noun = []
self.size_norm = float(640*480)
self.loc_norm = float(math.sqrt(640**2+480**2))
self.saliencydict_c = {}
self.saliencydict_s = {}
#******************10-03-2016 update***********************
self.saliencydict_i = {}
self.transformer = TfidfTransformer()
#******************^^^^^^^10-03-2016 update^^^^^^^^^^***********************
def show_im(self,image_id):
if image_id == None:
raise NameError('no image ID')
I = io.imread('%s/images/%s/%s'%(self.dataDir,self.dataType,self.SALICON['SALICON_filename'][image_id]))
plt.imshow(I)
def show_ann(self,image_id):
if image_id == None:
raise NameError('no image ID')
blankim = np.zeros((480,640,3),np.uint8)
plt.imshow(blankim)
annIds = self.Ins_coco.getAnnIds(self.SALICON['SALICON_id'][image_id])
anns = self.Ins_coco.loadAnns(annIds)
self.Ins_coco.showAnns(anns)
def show_cap(self,image_id):
if image_id == None:
raise NameError('no image ID')
annIds = self.Cap_coco.getAnnIds(self.SALICON['SALICON_id'][image_id])
anns = self.Cap_coco.loadAnns(annIds)
self.Cap_coco.showAnns(anns)
def findID(self,word,im_idd):
if word in self.category:
return self.category_idx[word]
else:
temp_idlist={}
for item in self.category_idx.keys():
for item1 in wn.synsets(item, wn.NOUN):
for word1 in wn.synsets(word, wn.NOUN):
dist = item1.wup_similarity(word1)
if item not in temp_idlist.keys():
temp_idlist[self.category_idx[item]] = dist
continue
if dist > temp_idlist[self.category_idx[item]]:
temp_idlist[self.category_idx[item]] = dist
temp_idlist = sorted(temp_idlist.iteritems(), key=lambda d: d[1], reverse = True)
temp_idlist = temp_idlist[0:1]
for n in temp_idlist:
if n[0] in self.Ins_ID[im_idd]:
return n[0]
return 0
#.........这里部分代码省略.........
示例9: len
# 需要导入模块: from pycocotools.coco import COCO [as 别名]
# 或者: from pycocotools.coco.COCO import showAnns [as 别名]
I = io.imread('%s/val2014/%s'%(dataDir,img['file_name']))
if len(I.shape) < 3:
I = np.tile(I[:, :, np.newaxis], (1, 1, 3))
mask = np.zeros(I.shape, dtype=np.uint8)
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
rr, cc = polygon(poly[:, 1], poly[:, 0])
mask[rr, cc] = [64, 0, 0]
io.imsave(writeDir + '/images/' + img['file_name'], I)
io.imsave(writeDir + '/segmentations/' + img['file_name'].replace('jpg', 'png'), mask)
print 'Wrote file: ', writeDir + '/' + img['file_name']
filenames.close()
imgId = imgIds[np.random.randint(100)]
img = cocoGt.loadImgs(imgId)[0]
I = io.imread('%s/train2014/%s'%(dataDir,img['file_name']))
# visialuze gt and dt side by side
fig = plt.figure(figsize=[15,10])
# ground truth
plt.subplot(121)
plt.imshow(I); plt.axis('off'); plt.title('ground truth')
annIds = cocoGt.getAnnIds(imgIds=imgId, catIds=[17])
anns = cocoGt.loadAnns(annIds)
cocoGt.showAnns(anns)