本文整理汇总了Python中albumentations.ShiftScaleRotate方法的典型用法代码示例。如果您正苦于以下问题:Python albumentations.ShiftScaleRotate方法的具体用法?Python albumentations.ShiftScaleRotate怎么用?Python albumentations.ShiftScaleRotate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类albumentations
的用法示例。
在下文中一共展示了albumentations.ShiftScaleRotate方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_training_augmentation
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import ShiftScaleRotate [as 别名]
def get_training_augmentation(resize_to=(320,640), crop_size=(288,576)):
print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:', resize_to)
train_transform = [
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0),
albu.GridDistortion(p=0.5),
albu.Resize(*resize_to),
albu.RandomCrop(*crop_size),
albu.ChannelShuffle(),
albu.InvertImg(),
albu.ToGray(),
albu.Normalize(),
]
return albu.Compose(train_transform)
示例2: augment
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import ShiftScaleRotate [as 别名]
def augment(image, boxes):
h, w, _ = image.shape
labels, boxes_coord = boxes[:, 0], boxes[:, 1:]
labels = labels.tolist()
boxes_coord = boxes_coord * h # 得到原图尺寸下的坐标(未归一化的坐标)
boxes_coord[:, 0] = np.clip(boxes_coord[:, 0]-boxes_coord[:, 2]/2, a_min=0, a_max=None) # 确保x_min和y_min有效
boxes_coord[:, 1] = np.clip(boxes_coord[:, 1]-boxes_coord[:, 3]/2, a_min=0, a_max=None)
boxes_coord = boxes_coord.tolist() # [x_min, y_min, width, height]
# 在这里设置数据增强的方法
aug = A.Compose([
A.HorizontalFlip(p=0.5),
# A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5),
# A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=5, border_mode=0, p=0.5)
], bbox_params={'format':'coco', 'label_fields': ['category_id']})
augmented = aug(image=image, bboxes=boxes_coord, category_id=labels)
# 经过aug之后,如果把boxes变没了,则返回原来的图片
if augmented['bboxes']:
image = augmented['image']
boxes_coord = np.array(augmented['bboxes']) # x_min, y_min, w, h → x, y, w, h
boxes_coord[:, 0] = boxes_coord[:, 0] + boxes_coord[:, 2]/2
boxes_coord[:, 1] = boxes_coord[:, 1] + boxes_coord[:, 3]/2
boxes_coord = boxes_coord / h
labels = np.array(augmented['category_id'])[:, None]
boxes = np.concatenate((labels, boxes_coord), 1)
return image, boxes
示例3: main
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import ShiftScaleRotate [as 别名]
def main():
image = cv2.imread("images/image_1.jpg")
keypoints = cv2.goodFeaturesToTrack(
cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), maxCorners=100, qualityLevel=0.5, minDistance=5
).squeeze(1)
bboxes = [(kp[0] - 10, kp[1] - 10, kp[0] + 10, kp[1] + 10) for kp in keypoints]
disp_image = visualize(image, keypoints, bboxes)
plt.figure(figsize=(10, 10))
plt.imshow(cv2.cvtColor(disp_image, cv2.COLOR_RGB2BGR))
plt.tight_layout()
plt.show()
aug = A.Compose(
[A.ShiftScaleRotate(scale_limit=0.1, shift_limit=0.2, rotate_limit=10, always_apply=True)],
bbox_params=A.BboxParams(format="pascal_voc", label_fields=["bbox_labels"]),
keypoint_params=A.KeypointParams(format="xy"),
)
for _i in range(10):
data = aug(image=image, keypoints=keypoints, bboxes=bboxes, bbox_labels=np.ones(len(bboxes)))
aug_image = data["image"]
aug_image = visualize(aug_image, data["keypoints"], data["bboxes"])
plt.figure(figsize=(10, 10))
plt.imshow(cv2.cvtColor(aug_image, cv2.COLOR_RGB2BGR))
plt.tight_layout()
plt.show()
示例4: test_shift_scale_rotate_interpolation
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import ShiftScaleRotate [as 别名]
def test_shift_scale_rotate_interpolation(interpolation):
image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
aug = A.ShiftScaleRotate(
shift_limit=(0.2, 0.2), scale_limit=(1.1, 1.1), rotate_limit=(45, 45), interpolation=interpolation, p=1
)
data = aug(image=image, mask=mask)
expected_image = F.shift_scale_rotate(
image, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101
)
expected_mask = F.shift_scale_rotate(
mask, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101
)
assert np.array_equal(data["image"], expected_image)
assert np.array_equal(data["mask"], expected_mask)