本文整理汇总了Python中albumentations.HorizontalFlip方法的典型用法代码示例。如果您正苦于以下问题:Python albumentations.HorizontalFlip方法的具体用法?Python albumentations.HorizontalFlip怎么用?Python albumentations.HorizontalFlip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类albumentations
的用法示例。
在下文中一共展示了albumentations.HorizontalFlip方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_training_augmentation
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def get_training_augmentation(resize_to=(320,640), crop_size=(288,576)):
print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:', resize_to)
train_transform = [
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0),
albu.GridDistortion(p=0.5),
albu.Resize(*resize_to),
albu.RandomCrop(*crop_size),
albu.ChannelShuffle(),
albu.InvertImg(),
albu.ToGray(),
albu.Normalize(),
]
return albu.Compose(train_transform)
示例2: __init__
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def __init__(
self,
input_key: str = "image",
output_key: str = "rotation_factor",
targets_key: str = None,
rotate_probability: float = 1.0,
hflip_probability: float = 0.5,
one_hot_classes: int = None,
):
"""
Args:
input_key (str): input key to use from annotation dict
output_key (str): output key to use to store the result
"""
self.input_key = input_key
self.output_key = output_key
self.targets_key = targets_key
self.rotate_probability = rotate_probability
self.hflip_probability = hflip_probability
self.rotate = albu.RandomRotate90()
self.hflip = albu.HorizontalFlip()
self.one_hot_classes = (
one_hot_classes * 8 if one_hot_classes is not None else None
)
示例3: test_transform_pipeline_serialization
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def test_transform_pipeline_serialization(seed, image, mask):
aug = A.Compose(
[
A.OneOrOther(
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
A.OneOf(
[
A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
]
),
]
),
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
]
),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
]
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
示例4: test_transform_pipeline_serialization_with_bboxes
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
],
bbox_params={"format": bbox_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, bboxes=bboxes, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
示例5: test_transform_pipeline_serialization_with_keypoints
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
],
keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, keypoints=keypoints, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
示例6: augment
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def augment(image, boxes):
h, w, _ = image.shape
labels, boxes_coord = boxes[:, 0], boxes[:, 1:]
labels = labels.tolist()
boxes_coord = boxes_coord * h # 得到原图尺寸下的坐标(未归一化的坐标)
boxes_coord[:, 0] = np.clip(boxes_coord[:, 0]-boxes_coord[:, 2]/2, a_min=0, a_max=None) # 确保x_min和y_min有效
boxes_coord[:, 1] = np.clip(boxes_coord[:, 1]-boxes_coord[:, 3]/2, a_min=0, a_max=None)
boxes_coord = boxes_coord.tolist() # [x_min, y_min, width, height]
# 在这里设置数据增强的方法
aug = A.Compose([
A.HorizontalFlip(p=0.5),
# A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5),
# A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=5, border_mode=0, p=0.5)
], bbox_params={'format':'coco', 'label_fields': ['category_id']})
augmented = aug(image=image, bboxes=boxes_coord, category_id=labels)
# 经过aug之后,如果把boxes变没了,则返回原来的图片
if augmented['bboxes']:
image = augmented['image']
boxes_coord = np.array(augmented['bboxes']) # x_min, y_min, w, h → x, y, w, h
boxes_coord[:, 0] = boxes_coord[:, 0] + boxes_coord[:, 2]/2
boxes_coord[:, 1] = boxes_coord[:, 1] + boxes_coord[:, 3]/2
boxes_coord = boxes_coord / h
labels = np.array(augmented['category_id'])[:, None]
boxes = np.concatenate((labels, boxes_coord), 1)
return image, boxes
示例7: test_rotate
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def test_rotate(self):
image = cv2.imread('/input/tests/data/dot.png')
aug = HorizontalFlip(p=1)
image_rotated = aug(image=image)['image']
示例8: parse_albu_short
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def parse_albu_short(config, always_apply=False):
if isinstance(config, str):
if config == 'hflip':
return A.HorizontalFlip(always_apply=always_apply)
if config == 'vflip':
return A.VerticalFlip(always_apply=always_apply)
if config == 'transpose':
return A.Transpose(always_apply=always_apply)
raise Exception(f'Unknwon augmentation {config}')
assert type(config) == dict
return parse_albu([config])
示例9: inverse
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def inverse(self, a: np.array):
last_dim = len(a.shape) - 1
for t in self.tfms:
if isinstance(t, A.HorizontalFlip):
a = flip(a, last_dim)
elif isinstance(t, A.VerticalFlip):
a = flip(a, last_dim - 1)
elif isinstance(t, A.Transpose):
axis = (0, 1, 3, 2) if len(a.shape) == 4 else (0, 2, 1)
a = a.permute(*axis)
return a
示例10: test_force_apply
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def test_force_apply():
"""
Unit test for https://github.com/albumentations-team/albumentations/issues/189
"""
aug = A.Compose(
[
A.OneOrOther(
A.Compose(
[
A.RandomSizedCrop(min_max_height=(256, 1025), height=512, width=512, p=1),
A.OneOf(
[
A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
]
),
]
),
A.Compose(
[
A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
]
),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
]
)
res = aug(image=np.zeros((1248, 1248, 3), dtype=np.uint8))
assert res["image"].shape[0] in (256, 384, 512)
assert res["image"].shape[1] in (256, 384, 512)
示例11: empty_aug1
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def empty_aug1():
return [
HorizontalFlip(p=0.001),
# IAAPiecewiseAffine(p=1.0),
OneOf(
[
# OpticalDistortion(p=0.1),
# GridDistortion(p=0.1),
# IAAPerspective(p=1.0),
# IAAAffine(p=1.0),
IAAPiecewiseAffine(p=1.0)
],
p=0.0,
),
]
示例12: empty_aug3
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def empty_aug3():
return [
# HorizontalFlip(p=0.001),
# IAAPiecewiseAffine(p=1.0),
OneOf(
[
OpticalDistortion(p=0.1),
GridDistortion(p=0.1),
# IAAPerspective(p=1.0),
# IAAAffine(p=1.0),
# IAAPiecewiseAffine(p=1.0),
],
p=0.0,
)
]
示例13: get_augumentation
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.):
list_transforms = []
if phase == 'train':
list_transforms.extend([
albu.augmentations.transforms.LongestMaxSize(
max_size=width, always_apply=True),
albu.PadIfNeeded(min_height=height, min_width=width,
always_apply=True, border_mode=0, value=[0, 0, 0]),
albu.augmentations.transforms.RandomResizedCrop(
height=height,
width=width, p=0.3),
albu.augmentations.transforms.Flip(),
albu.augmentations.transforms.Transpose(),
albu.OneOf([
albu.RandomBrightnessContrast(brightness_limit=0.5,
contrast_limit=0.4),
albu.RandomGamma(gamma_limit=(50, 150)),
albu.NoOp()
]),
albu.OneOf([
albu.RGBShift(r_shift_limit=20, b_shift_limit=15,
g_shift_limit=15),
albu.HueSaturationValue(hue_shift_limit=5,
sat_shift_limit=5),
albu.NoOp()
]),
albu.CLAHE(p=0.8),
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
])
if(phase == 'test' or phase == 'valid'):
list_transforms.extend([
albu.Resize(height=height, width=width)
])
list_transforms.extend([
albu.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), p=1),
ToTensor()
])
if(phase == 'test'):
return albu.Compose(list_transforms)
return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area,
min_visibility=min_visibility, label_fields=['category_id']))
示例14: __init__
# 需要导入模块: import albumentations [as 别名]
# 或者: from albumentations import HorizontalFlip [as 别名]
def __init__(self,
base_dir='../../data/apolloscape',
road_record_list=[{'road':'road02_seg','record':[22, 23, 24, 25, 26]}, {'road':'road03_seg', 'record':[7, 8, 9, 10, 11, 12]}],
split='train',
ignore_index=255,
debug=False):
self.debug = debug
self.base_dir = Path(base_dir)
self.ignore_index = ignore_index
self.split = split
self.img_paths = []
self.lbl_paths = []
for road_record in road_record_list:
self.road_dir = self.base_dir / Path(road_record['road'])
self.record_list = road_record['record']
for record in self.record_list:
img_paths_tmp = self.road_dir.glob(f'ColorImage/Record{record:03}/Camera 5/*.jpg')
lbl_paths_tmp = self.road_dir.glob(f'Label/Record{record:03}/Camera 5/*.png')
img_paths_basenames = {Path(img_path.name).stem for img_path in img_paths_tmp}
lbl_paths_basenames = {Path(lbl_path.name).stem.replace('_bin', '') for lbl_path in lbl_paths_tmp}
intersection_basenames = img_paths_basenames & lbl_paths_basenames
img_paths_intersection = [self.road_dir / Path(f'ColorImage/Record{record:03}/Camera 5/{intersection_basename}.jpg')
for intersection_basename in intersection_basenames]
lbl_paths_intersection = [self.road_dir / Path(f'Label/Record{record:03}/Camera 5/{intersection_basename}_bin.png')
for intersection_basename in intersection_basenames]
self.img_paths += img_paths_intersection
self.lbl_paths += lbl_paths_intersection
self.img_paths.sort()
self.lbl_paths.sort()
print(len(self.img_paths), len(self.lbl_paths))
assert len(self.img_paths) == len(self.lbl_paths)
self.resizer = albu.Resize(height=512, width=1024)
self.augmenter = albu.Compose([albu.HorizontalFlip(p=0.5),
# albu.RandomRotate90(p=0.5),
albu.Rotate(limit=10, p=0.5),
# albu.CLAHE(p=0.2),
# albu.RandomContrast(p=0.2),
# albu.RandomBrightness(p=0.2),
# albu.RandomGamma(p=0.2),
# albu.GaussNoise(p=0.2),
# albu.Cutout(p=0.2)
])
self.img_transformer = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.lbl_transformer = torch.LongTensor