當前位置: 首頁>>代碼示例>>Python>>正文


Python functional.hflip方法代碼示例

本文整理匯總了Python中torchvision.transforms.functional.hflip方法的典型用法代碼示例。如果您正苦於以下問題:Python functional.hflip方法的具體用法?Python functional.hflip怎麽用?Python functional.hflip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.transforms.functional的用法示例。


在下文中一共展示了functional.hflip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: flip

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def flip(image, boxes):
    """
    Flip image horizontally.

    :param image: image, a PIL Image
    :param boxes: bounding boxes in boundary coordinates, a tensor of dimensions (n_objects, 4)
    :return: flipped image, updated bounding box coordinates
    """
    # Flip image
    new_image = FT.hflip(image)

    # Flip boxes
    new_boxes = boxes
    new_boxes[:, 0] = image.width - boxes[:, 0] - 1
    new_boxes[:, 2] = image.width - boxes[:, 2] - 1
    new_boxes = new_boxes[:, [2, 1, 0, 3]]

    return new_image, new_boxes 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:20,代碼來源:utils.py

示例2: cv_transform

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def cv_transform(img):
    # img = resize(img, size=(100, 300))
    # img = to_tensor(img)
    # img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = pad(img, padding=(100, 100, 100, 100), fill=5, padding_mode='symmetric')
    # img = crop(img, -40, -20, 1000, 1000)
    # img = center_crop(img, (310, 300))
    # img = resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = hflip(img)
    # img = vflip(img)
    # tl, tr, bl, br, center = five_crop(img, 100)
    # img = adjust_brightness(img, 2.1)
    # img = adjust_contrast(img, 1.5)
    # img = adjust_saturation(img, 2.3)
    # img = adjust_hue(img, 0.5)
    # img = adjust_gamma(img, gamma=3, gain=0.1)
    # img = rotate(img, 10, resample='BILINEAR', expand=True, center=None)
    # img = to_grayscale(img, 3)
    # img = affine(img, 10, (0, 0), 1, 0, resample='BICUBIC', fillcolor=(255,255,0))
    # img = gaussion_noise(img)
    # img = poisson_noise(img)
    img = salt_and_pepper(img)
    return to_tensor(img) 
開發者ID:YU-Zhiyang,項目名稱:opencv_transforms_torchvision,代碼行數:26,代碼來源:cvfunctional.py

示例3: pil_transform

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def pil_transform(img):
    # img = functional.resize(img, size=(100, 300))
    # img = functional.to_tensor(img)
    # img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric')
    # img = functional.crop(img, -40, -20, 1000, 1000)
    # img = functional.center_crop(img, (310, 300))
    # img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = functional.hflip(img)
    # img = functional.vflip(img)
    # tl, tr, bl, br, center = functional.five_crop(img, 100)
    # img = functional.adjust_brightness(img, 2.1)
    # img = functional.adjust_contrast(img, 1.5)
    # img = functional.adjust_saturation(img, 2.3)
    # img = functional.adjust_hue(img, 0.5)
    # img = functional.adjust_gamma(img, gamma=3, gain=0.1)
    # img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None)
    # img = functional.to_grayscale(img, 3)
    # img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0))

    return functional.to_tensor(img) 
開發者ID:YU-Zhiyang,項目名稱:opencv_transforms_torchvision,代碼行數:24,代碼來源:cvfunctional.py

示例4: getbyIdAndclass

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def getbyIdAndclass(self, imgid, cls, hflip=0):
        if (imgid not in self.imgId2idx) or (cls == 'bg'):
            maskTotal = np.zeros((128,128))
        else:
            index= self.imgId2idx[imgid]
            catId = self.dataset.getCatIds(cls)
            maskTotal = np.zeros((self.imgSizes[index][0], self.imgSizes[index][1]))
            if len(self.catsInImg[index]) and (catId[0] in self.catsInImg[index]) and (cls in self.imgToCatToAnns[imgid]):
                # Randomly sample an annotation
                for annIndex in self.imgToCatToAnns[imgid][cls]:
                    ann = self.mRCNN_results[annIndex]
                    cm = self.dataset.annToMask(ann)
                    maskTotal[:cm.shape[0],:cm.shape[1]] += cm
            if hflip:
                maskTotal = maskTotal[:,::-1]

        mask = torch.FloatTensor(np.asarray(self.transform(Image.fromarray(np.clip(maskTotal,0,1)))))[None,::]

        return mask 
開發者ID:rakshithShetty,項目名稱:adversarial-object-removal,代碼行數:21,代碼來源:data_loader_stargan.py

示例5: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, sample):
        image, label = sample['image'], sample['label']
        if self.rand_flip_index is None or self.image_mode:
            self.rand_flip_index = random.randint(-1,2)
        # 0: horizontal flip, 1: vertical flip, -1: horizontal and vertical flip
        if self.rand_flip_index == 0:
            image = F.hflip(image)
            label = F.hflip(label)
        elif self.rand_flip_index == 1:
            image = F.vflip(image)
            label = F.vflip(label)
        elif self.rand_flip_index == 2:
            image = F.vflip(F.hflip(image))
            label = F.vflip(F.hflip(label))
        sample['image'], sample['label'] = image, label
        return sample 
開發者ID:Kinpzz,項目名稱:RCRNet-Pytorch,代碼行數:18,代碼來源:transforms.py

示例6: center_crop_with_flip

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def center_crop_with_flip(img, size, vertical_flip=False):
    crop_h, crop_w = size
    first_crop = F.center_crop(img, (crop_h, crop_w))
    if vertical_flip:
        img = F.vflip(img)
    else:
         img = F.hflip(img)
    second_crop = F.center_crop(img, (crop_h, crop_w))
    return (first_crop, second_crop) 
開發者ID:jiangtaoxie,項目名稱:fast-MPN-COV,代碼行數:11,代碼來源:imagepreprocess.py

示例7: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, image, target):
        if random.random() < self.prob:
            image = F.hflip(image)
            target = target.transpose(0)
        return image, target 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:7,代碼來源:transforms.py

示例8: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, image, mask):
        # transforming to PIL image
        image, mask = F.to_pil_image(image), F.to_pil_image(mask)

        # random crop
        if self.crop:
            i, j, h, w = T.RandomCrop.get_params(image, self.crop)
            image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)

        if np.random.rand() < self.p_flip:
            image, mask = F.hflip(image), F.hflip(mask)

        # color transforms || ONLY ON IMAGE
        if self.color_jitter_params:
            image = self.color_tf(image)

        # random affine transform
        if np.random.rand() < self.p_random_affine:
            affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop)
            image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)

        # transforming to tensor
        image = F.to_tensor(image)
        if not self.long_mask:
            mask = F.to_tensor(mask)
        else:
            mask = to_long_tensor(mask)

        return image, mask 
開發者ID:cosmic-cortex,項目名稱:pytorch-UNet,代碼行數:31,代碼來源:dataset.py

示例9: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, image, target):
        if random.random() < self.prob:
            image = F.hflip(image)
            target = target.transpose(0, self.left_right)
        return image, target 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:7,代碼來源:transforms.py

示例10: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, image, target):
        if random.random() < self.flip_prob:
            image = F.hflip(image)
            target = F.hflip(target)
        return image, target 
開發者ID:paperswithcode,項目名稱:torchbench,代碼行數:7,代碼來源:transforms.py

示例11: _instance_process

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def _instance_process(self, img, flip_flag):
        """
        Args:
            img (PIL Image): Image to be flipped.
        Returns:
            PIL Image: Randomly flipped image.
        """
        if flip_flag:
            img.img = F.hflip(img.img)
            if img.x is not None:
                img.x = ImageOps.invert(img.x)
        return img 
開發者ID:yolomax,項目名稱:person-reid-lib,代碼行數:14,代碼來源:transforms.py

示例12: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, img, pt):
        if self.p > np.random.rand():
            w, _ = img.size
            img = tf.hflip(img)
            pt_new = np.zeros_like(pt)
            pt_mask = pt.sum(axis=1) > 0
            pt_new[pt_mask] = np.vstack((w - 1 - pt[pt_mask][:, 0], pt[pt_mask][:, 1])).T
            return img, pt_new
        return img, pt 
開發者ID:svip-lab,項目名稱:PPGNet,代碼行數:11,代碼來源:transforms.py

示例13: build_transforms

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def build_transforms(height, width, is_train, data_augment, **kwargs):
    """Build transforms

    Args:
    - height (int): target image height.
    - width (int): target image width.
    - is_train (bool): train or test phase.
    - data_augment (str)
    """

    # use imagenet mean and std as default
    imagenet_mean = [0.485, 0.456, 0.406]
    imagenet_std = [0.229, 0.224, 0.225]
    normalize = Normalize(mean=imagenet_mean, std=imagenet_std)

    transforms = []

    if is_train:
        transforms = build_training_transforms(height, width, data_augment)
    else:
        transforms += [Resize((height, width))]

        if kwargs.get('flip', False):
            transforms += [Lambda(lambda img: TF.hflip(img))]

        transforms += [ToTensor()]
        transforms += [normalize]

    transforms = Compose(transforms)
    if is_train:
        print('Using transform:', transforms)

    return transforms 
開發者ID:TAMU-VITA,項目名稱:ABD-Net,代碼行數:35,代碼來源:transforms.py

示例14: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, img):
        """
        Args:
            img (PIL.Image): Image to be flipped.
        Returns:
            PIL.Image: Randomly flipped image.
        """
        if self.random_p < self.p:
            return F.hflip(img)
        return img 
開發者ID:kenshohara,項目名稱:3D-ResNets-PyTorch,代碼行數:12,代碼來源:spatial_transforms.py

示例15: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import hflip [as 別名]
def __call__(self, data):
        """
        Args:
            img (PIL Image): Image to be flipped.
        Returns:
            PIL Image: Randomly flipped image.
        """
        hr, lr = data
        if random.random() < 0.5:
            return F.hflip(hr), F.hflip(lr)
        return hr, lr 
開發者ID:jacobgil,項目名稱:pytorch-zssr,代碼行數:13,代碼來源:source_target_transforms.py


注:本文中的torchvision.transforms.functional.hflip方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。