當前位置: 首頁>>代碼示例>>Python>>正文


Python albumentations.Resize方法代碼示例

本文整理匯總了Python中albumentations.Resize方法的典型用法代碼示例。如果您正苦於以下問題:Python albumentations.Resize方法的具體用法?Python albumentations.Resize怎麽用?Python albumentations.Resize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在albumentations的用法示例。


在下文中一共展示了albumentations.Resize方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_training_augmentation

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def get_training_augmentation(resize_to=(320,640), crop_size=(288,576)):
    print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:', resize_to) 

    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0),
        albu.GridDistortion(p=0.5),
        albu.Resize(*resize_to),
        albu.RandomCrop(*crop_size),
        albu.ChannelShuffle(),
        albu.InvertImg(),
        albu.ToGray(),
        albu.Normalize(),
    ]

    return albu.Compose(train_transform) 
開發者ID:pudae,項目名稱:kaggle-understanding-clouds,代碼行數:19,代碼來源:cloud_transform.py

示例2: test_resize_keypoints

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def test_resize_keypoints():
    img = np.random.randint(0, 256, [50, 10], np.uint8)
    keypoints = [(9, 5, 0, 0)]

    aug = A.Resize(height=100, width=5, p=1)
    result = aug(image=img, keypoints=keypoints)
    assert result["keypoints"] == [(4.5, 10, 0, 0)]

    aug = A.Resize(height=50, width=10, p=1)
    result = aug(image=img, keypoints=keypoints)
    assert result["keypoints"] == [(9, 5, 0, 0)] 
開發者ID:albumentations-team,項目名稱:albumentations,代碼行數:13,代碼來源:test_transforms.py

示例3: test_transform_pipeline_serialization

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def test_transform_pipeline_serialization(seed, image, mask):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
                        A.OneOf(
                            [
                                A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
                                A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
                            ]
                        ),
                    ]
                ),
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
                        A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
                    ]
                ),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ]
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
開發者ID:albumentations-team,項目名稱:albumentations,代碼行數:38,代碼來源:test_serialization.py

示例4: get_test_augmentation

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def get_test_augmentation(resize_to=(320,640)):
    """Add paddings to make image shape divisible by 32"""
    test_transform = [
        albu.Resize(*resize_to),
        albu.Normalize(),
    ]
    return albu.Compose(test_transform) 
開發者ID:pudae,項目名稱:kaggle-understanding-clouds,代碼行數:9,代碼來源:cloud_transform.py

示例5: tta_transform

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def tta_transform(split,
                  size=512,
                  num_tta=4,
                  per_image_norm=False,
                  **_):
    resize = Resize(height=size, width=size, always_apply=True)
    means = np.array([127.5, 127.5, 127.5, 127.5])
    stds = np.array([255.0, 255.0, 255.0, 255.0])

    def transform(image):
        if size != image.shape[0]:
            image = resize(image=image)['image']
        image = image.astype(np.float32)

        if per_image_norm:
            mean = np.mean(image.reshape(-1, 4), axis=0)
            std = np.std(image.reshape(-1, 4), axis=0)
            image -= mean
            image /= (std + 0.0000001)
        else:
            image -= means
            image /= stds

        assert num_tta == 4 or num_tta == 8
        images = [image]
        images.append(np.fliplr(image))
        images.append(np.flipud(image))
        images.append(np.fliplr(images[-1]))
        if num_tta == 8:
            images.append(np.transpose(image, (1,0,2)))
            images.append(np.flipud(images[-1]))
            images.append(np.fliplr(images[-2]))
            images.append(np.flipud(images[-1]))

        images = np.stack(images, axis=0)
        images = np.transpose(images, (0, 3, 1, 2))
        assert images.shape == (num_tta, 4, size, size), 'shape: {}'.format(images.shape)

        return images

    return transform 
開發者ID:pudae,項目名稱:kaggle-hpa,代碼行數:43,代碼來源:tta_transform.py

示例6: get_augumentation

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.augmentations.transforms.LongestMaxSize(
                max_size=width, always_apply=True),
            albu.PadIfNeeded(min_height=height, min_width=width,
                             always_apply=True, border_mode=0, value=[0, 0, 0]),
            albu.augmentations.transforms.RandomResizedCrop(
                height=height,
                width=width, p=0.3),
            albu.augmentations.transforms.Flip(),
            albu.augmentations.transforms.Transpose(),
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.5,
                                              contrast_limit=0.4),
                albu.RandomGamma(gamma_limit=(50, 150)),
                albu.NoOp()
            ]),
            albu.OneOf([
                albu.RGBShift(r_shift_limit=20, b_shift_limit=15,
                              g_shift_limit=15),
                albu.HueSaturationValue(hue_shift_limit=5,
                                        sat_shift_limit=5),
                albu.NoOp()
            ]),
            albu.CLAHE(p=0.8),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
        ])
    if(phase == 'test' or phase == 'valid'):
        list_transforms.extend([
            albu.Resize(height=height, width=width)
        ])
    list_transforms.extend([
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225), p=1),
        ToTensor()
    ])
    if(phase == 'test'):
        return albu.Compose(list_transforms)
    return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area,
                                                                     min_visibility=min_visibility, label_fields=['category_id'])) 
開發者ID:toandaominh1997,項目名稱:EfficientDet.Pytorch,代碼行數:45,代碼來源:augmentation.py

示例7: policy_transform

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def policy_transform(split,
                     policies=None,
                     size=512,
                     per_image_norm=False,
                     mean_std=None,
                     **kwargs):
  means = np.array([127.5, 127.5, 127.5, 127.5])
  stds = np.array([255.0, 255.0, 255.0, 255.0])

  base_aug = Compose([
    RandomRotate90(),
    Flip(),
    Transpose(),
  ])

  if policies is None:
    policies = []

  if isinstance(policies, str):
    with open(policies, 'r') as fid:
      policies = eval(fid.read())
      policies = itertools.chain.from_iterable(policies)

  aug_list = []
  for policy in policies:
    op_1, params_1 = policy[0]
    op_2, params_2 = policy[1]
    aug = Compose([
      globals().get(op_1)(**params_1),
      globals().get(op_2)(**params_2),
    ])
    aug_list.append(aug)

  print('len(aug_list):', len(aug_list))
  resize = Resize(height=size, width=size, always_apply=True)

  def transform(image):
    if split == 'train':
      image = base_aug(image=image)['image']
      if len(aug_list) > 0:
        aug = random.choice(aug_list)
        image = aug(image=image)['image']
      image = resize(image=image)['image']
    else:
      if size != image.shape[0]:
        image = resize(image=image)['image']

    image = image.astype(np.float32)
    if per_image_norm:
        mean = np.mean(image.reshape(-1, 4), axis=0)
        std = np.std(image.reshape(-1, 4), axis=0)
        image -= mean
        image /= (std + 0.0000001)
    else:
        image -= means
        image /= stds
    image = np.transpose(image, (2, 0, 1))

    return image

  return transform 
開發者ID:pudae,項目名稱:kaggle-hpa,代碼行數:63,代碼來源:policy_transform.py

示例8: __init__

# 需要導入模塊: import albumentations [as 別名]
# 或者: from albumentations import Resize [as 別名]
def __init__(self,
                 base_dir='../../data/apolloscape',
                 road_record_list=[{'road':'road02_seg','record':[22, 23, 24, 25, 26]}, {'road':'road03_seg', 'record':[7, 8, 9, 10, 11, 12]}],
                 split='train',
                 ignore_index=255,
                 debug=False):
        self.debug = debug
        self.base_dir = Path(base_dir)
        self.ignore_index = ignore_index
        self.split = split
        self.img_paths = []
        self.lbl_paths = []

        for road_record in road_record_list:
          self.road_dir = self.base_dir / Path(road_record['road'])
          self.record_list = road_record['record']

          for record in self.record_list:
            img_paths_tmp = self.road_dir.glob(f'ColorImage/Record{record:03}/Camera 5/*.jpg')
            lbl_paths_tmp = self.road_dir.glob(f'Label/Record{record:03}/Camera 5/*.png')

            img_paths_basenames = {Path(img_path.name).stem for img_path in img_paths_tmp}
            lbl_paths_basenames = {Path(lbl_path.name).stem.replace('_bin', '') for lbl_path in lbl_paths_tmp}

            intersection_basenames = img_paths_basenames & lbl_paths_basenames

            img_paths_intersection = [self.road_dir / Path(f'ColorImage/Record{record:03}/Camera 5/{intersection_basename}.jpg')
                                      for intersection_basename in intersection_basenames]
            lbl_paths_intersection = [self.road_dir / Path(f'Label/Record{record:03}/Camera 5/{intersection_basename}_bin.png')
                                      for intersection_basename in intersection_basenames]

            self.img_paths += img_paths_intersection
            self.lbl_paths += lbl_paths_intersection

        self.img_paths.sort()
        self.lbl_paths.sort()
        print(len(self.img_paths), len(self.lbl_paths))
        assert len(self.img_paths) == len(self.lbl_paths)

        self.resizer = albu.Resize(height=512, width=1024)
        self.augmenter = albu.Compose([albu.HorizontalFlip(p=0.5),
                                       # albu.RandomRotate90(p=0.5),
                                       albu.Rotate(limit=10, p=0.5),
                                       # albu.CLAHE(p=0.2),
                                       # albu.RandomContrast(p=0.2),
                                       # albu.RandomBrightness(p=0.2),
                                       # albu.RandomGamma(p=0.2),
                                       # albu.GaussNoise(p=0.2),
                                       # albu.Cutout(p=0.2)
                                       ])
        self.img_transformer = transforms.Compose([transforms.ToTensor(),
                                                   transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                                        std=[0.229, 0.224, 0.225])])
        self.lbl_transformer = torch.LongTensor 
開發者ID:nyoki-mtl,項目名稱:pytorch-segmentation,代碼行數:56,代碼來源:apolloscape.py


注:本文中的albumentations.Resize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。