本文整理汇总了Python中maskrcnn_benchmark.structures.segmentation_mask.SegmentationMask方法的典型用法代码示例。如果您正苦于以下问题:Python segmentation_mask.SegmentationMask方法的具体用法?Python segmentation_mask.SegmentationMask怎么用?Python segmentation_mask.SegmentationMask使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类maskrcnn_benchmark.structures.segmentation_mask
的用法示例。
在下文中一共展示了segmentation_mask.SegmentationMask方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例2: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(ModaNetDataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"]+1 for obj in anno]
#print(classes,'old')
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
#print(classes,classes2)
classes = torch.tensor(classes)
target.add_field("labels", classes) #
#masks = [obj["segmentation"] for obj in anno]
#masks = SegmentationMask(masks, img.size, mode='poly')
#target.add_field("masks", masks)
#if anno and "keypoints" in anno[0]:
# keypoints = [obj["keypoints"] for obj in anno]
# keypoints = PersonKeypoints(keypoints, img.size)
# target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例3: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
if anno and "segmentation" in anno[0]:
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
示例4: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(DeepFashion2Dataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
#print(classes)
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
#masks = [obj["segmentation"] for obj in anno]
#masks = SegmentationMask(masks, img.size, mode='poly')
#target.add_field("masks", masks)
#if anno and "keypoints" in anno[0]:
# keypoints = [obj["keypoints"] for obj in anno]
# keypoints = PersonKeypoints(keypoints, img.size)
# target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例5: __init__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __init__(self, method_name='runTest'):
super(TestSegmentationMask, self).__init__(method_name)
poly = [[[423.0, 306.5, 406.5, 277.0, 400.0, 271.5, 389.5, 277.0,
387.5, 292.0, 384.5, 295.0, 374.5, 220.0, 378.5, 210.0,
391.0, 200.5, 404.0, 199.5, 414.0, 203.5, 425.5, 221.0,
438.5, 297.0, 423.0, 306.5],
[100, 100, 200, 100, 200, 200, 100, 200],
]]
width = 640
height = 480
size = width, height
self.P = SegmentationMask(poly, size, 'poly')
self.M = SegmentationMask(poly, size, 'poly').convert('mask')
示例6: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例7: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
#boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
boxes = torch.as_tensor(boxes).reshape(-1, 5) # clw modify
#target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
target = BoxList(boxes, img.size, mode="xywh_angle") # clw modify
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例8: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
#img, anno = super(COCODataset, self).__getitem__(idx)
# use zipreader, change the function of super.getitem
coco = self.coco
img_id = self.ids[idx]
ann_ids = coco.getAnnIds(imgIds=img_id)
anno = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
# In philly cluster use zipreader instead Image.open
#img = Image.open(os.path.join(self.root, path)).convert('RGB')
img = zipreader.imread(os.path.join(self.root, path), \
cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
img = Image.fromarray(img)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例9: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img_path = self.img_paths[idx]
ann_path = self.ann_paths[idx]
if self.mode == "mask":
ann = torch.from_numpy(np.asarray(Image.open(ann_path)))
# masks are represented with tensors
boxes, segmentations, labels = self._processBinayMasks(ann)
else:
with open(ann_path, "r") as ann_file:
ann = json.load(ann_file)
# masks are represented with polygons
boxes, segmentations, labels = self._processPolygons(ann)
boxes, segmentations, labels = self._filterGT(boxes, segmentations, labels)
if len(segmentations) == 0:
empty_ann_path = self.get_img_info(idx)["ann_path"]
print("EMPTY ENTRY:", empty_ann_path)
# self.img_paths.pop(idx)
# self.ann_paths.pop(idx)
img, target, _ = self[(idx + 1) % len(self)]
# just override this image with the next
return img, target, idx
img = Image.open(img_path)
# Compose all into a BoxList instance
target = BoxList(boxes, img.size, mode="xyxy")
target.add_field("labels", torch.tensor(labels))
masks = SegmentationMask(segmentations, img.size, mode=self.mode)
target.add_field("masks", masks)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例10: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
# masks = [obj["segmentation"] for obj in anno]
# masks = SegmentationMask(masks, img.size)
# target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例11: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
# masks = [obj["segmentation"] for obj in anno]
# masks = SegmentationMask(masks, img.size, mode='poly')
# target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例12: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# ########################## add by hui ########################################
img_info = self.get_img_info(idx)
if 'corner' in img_info:
img = img.crop(img_info['corner'])
################################################################################
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
# ######################### add by hui ####################################
if self.filter_ignore and anno and "ignore" in anno[0]: # filter ignore out
anno = [obj for obj in anno if not obj["ignore"]]
###########################################################################
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
示例13: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
#img, anno = super(COCODataset, self).__getitem__(idx)
coco = self.coco
img_id = self.ids[idx]
ann_ids = coco.getAnnIds(imgIds=img_id)
anno = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
if isinstance(self.root, list):
root = [r for r in self.root if path.split('_')[1] in r][0]
else:
root = self.root
img = Image.open(os.path.join(root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
anno = self.target_transform(anno)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
if anno and "segmentation" in anno[0]:
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
示例14: __getitem__
# 需要导入模块: from maskrcnn_benchmark.structures import segmentation_mask [as 别名]
# 或者: from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask [as 别名]
def __getitem__(self, idx):
if self.mode == 0:
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
elif self.mode == 1:
img_id = self.ids[index]
feature_list = torch.load(self._split_feature % (self.backbone, self.resolution, img_id)
, map_location=torch.device("cpu"))
if self.special_deal:
label = torch.load(self._split_label % (self.backbone, self.resolution, img_id)
, map_location=torch.device("cpu"))
reg = torch.load(self._split_reg % (self.backbone, self.resolution, img_id)
, map_location=torch.device("cpu"))
return feature_list, label, reg, index
else:
target = torch.load(self._split_target % (self.backbone, self.resolution, img_id)
, map_location=torch.device("cpu"))
return feature_list, target, index
else:
raise ValueError("Mode {} do not support now".format(self.mode))