本文整理匯總了Python中PIL.ImageOps.mirror方法的典型用法代碼示例。如果您正苦於以下問題:Python ImageOps.mirror方法的具體用法?Python ImageOps.mirror怎麽用?Python ImageOps.mirror使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類PIL.ImageOps
的用法示例。
在下文中一共展示了ImageOps.mirror方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects if not obj.difficult]
labels = [VOC2007.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects if not obj.difficult]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename))
# random flip on only training mode
if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = VOC2007.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例2: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例3: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例4: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [obj.label for obj in annotation.objects]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例5: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [VOC2007CatDog.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename))
# random flip on only training mode
if self._mode == VOC2007CatDog.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = VOC2007CatDog.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例6: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
bboxes = torch.tensor(self.bboxes[index], dtype=torch.float)
labels = torch.tensor(self.labels[index], dtype=torch.long)
# print(int(self.image_position[index].split('/')[1]))
#image = Image.open(self.path_to_keyframe+'/'+image_index[index].split('/')[0]+'/'+str(int(image_index[index].split('/')[1]))+".jpg")
image = Image.open(self.path_to_keyframe+'/'+self.image_position[index]+".jpg")
# random flip on only training mode
# if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5:
# image = ImageOps.mirror(image)
# bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
self._image_min_side=600
self._image_max_side=1000
image, scale = self.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return self.image_position[index], image, scale, bboxes, labels
示例7: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Car.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Car.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Car.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例8: augment
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def augment(img_in, img_tar, img_tar_l, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if random.random() < 0.5 and flip_h:
img_in = [ImageOps.flip(j) for j in img_in]
img_tar = [ImageOps.flip(j) for j in img_tar]
img_tar_l = ImageOps.flip(img_tar_l)
info_aug['flip_h'] = True
if rot:
if random.random() < 0.5:
img_in = [ImageOps.mirror(j) for j in img_in]
img_tar = [ImageOps.mirror(j) for j in img_tar]
img_tar_l = ImageOps.mirror(img_tar_l)
info_aug['flip_v'] = True
if random.random() < 0.5:
img_in = [j.rotate(180) for j in img_in]
img_tar = [j.rotate(180) for j in img_tar]
img_tar_l = img_tar_l.rotate(180)
info_aug['trans'] = True
return img_in, img_tar, img_tar_l, info_aug
示例9: augment
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def augment(img_in, img_tar, img_nn, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if random.random() < 0.5 and flip_h:
img_in = ImageOps.flip(img_in)
img_tar = ImageOps.flip(img_tar)
img_nn = [ImageOps.flip(j) for j in img_nn]
info_aug['flip_h'] = True
if rot:
if random.random() < 0.5:
img_in = ImageOps.mirror(img_in)
img_tar = ImageOps.mirror(img_tar)
img_nn = [ImageOps.mirror(j) for j in img_nn]
info_aug['flip_v'] = True
if random.random() < 0.5:
img_in = img_in.rotate(180)
img_tar = img_tar.rotate(180)
img_nn = [j.rotate(180) for j in img_nn]
info_aug['trans'] = True
return img_in, img_tar, img_nn, info_aug
示例10: random_flip
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def random_flip(image, calib, objects):
if random.random() < 0.5:
return image, calib, objects
# Flip image
image = ImageOps.mirror(image)
# Modify calibration matrix
width, _ = image.size
calib[0, 2] = width - calib[0, 2] # cx' = w - cx
calib[0, 3] = width * calib[2, 3] - calib[0, 3] # tx' = w*tz - tx
# Flip object x-positions
flipped_objects = list()
for obj in objects:
position = [-obj.position[0]] + obj.position[1:]
angle = math.atan2(math.sin(obj.angle), -math.cos(obj.angle))
flipped_objects.append(utils.ObjectData(
obj.classname, position, obj.dimensions, angle, obj.score
))
return image, calib, flipped_objects
示例11: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects if not obj.difficult]
labels = [VOC2007.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects if not obj.difficult]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename))
# random flip on only training mode
if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = VOC2007.preprocess(image, self._image_min_side, self._image_max_side)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例12: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例13: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例14: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [obj.label for obj in annotation.objects]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017.preprocess(image, self._image_min_side, self._image_max_side)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例15: __getitem__
# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import mirror [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [VOC2007CatDog.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename))
# random flip on only training mode
if self._mode == VOC2007CatDog.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = VOC2007CatDog.preprocess(image, self._image_min_side, self._image_max_side)
bboxes *= scale
return image_id, image, scale, bboxes, labels