當前位置: 首頁>>代碼示例>>Python>>正文


Python functional.center_crop方法代碼示例

本文整理匯總了Python中torchvision.transforms.functional.center_crop方法的典型用法代碼示例。如果您正苦於以下問題:Python functional.center_crop方法的具體用法?Python functional.center_crop怎麽用?Python functional.center_crop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.transforms.functional的用法示例。


在下文中一共展示了functional.center_crop方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import center_crop [as 別名]
def __call__(self, sample):
        rdict = {}
        input_data = sample['input']

        w, h = input_data.size
        th, tw = self.size
        fh = int(round((h - th) / 2.))
        fw = int(round((w - tw) / 2.))

        params = (fh, fw, w, h)
        self.propagate_params(sample, params)

        input_data = F.center_crop(input_data, self.size)
        rdict['input'] = input_data

        if self.labeled:
            gt_data = sample['gt']
            gt_metadata = sample['gt_metadata']
            gt_data = F.center_crop(gt_data, self.size)
            gt_metadata["__centercrop"] = (fh, fw, w, h)
            rdict['gt'] = gt_data


        sample.update(rdict)
        return sample 
開發者ID:perone,項目名稱:medicaltorch,代碼行數:27,代碼來源:transforms.py

示例2: cv_transform

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import center_crop [as 別名]
def cv_transform(img):
    # img = resize(img, size=(100, 300))
    # img = to_tensor(img)
    # img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = pad(img, padding=(100, 100, 100, 100), fill=5, padding_mode='symmetric')
    # img = crop(img, -40, -20, 1000, 1000)
    # img = center_crop(img, (310, 300))
    # img = resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = hflip(img)
    # img = vflip(img)
    # tl, tr, bl, br, center = five_crop(img, 100)
    # img = adjust_brightness(img, 2.1)
    # img = adjust_contrast(img, 1.5)
    # img = adjust_saturation(img, 2.3)
    # img = adjust_hue(img, 0.5)
    # img = adjust_gamma(img, gamma=3, gain=0.1)
    # img = rotate(img, 10, resample='BILINEAR', expand=True, center=None)
    # img = to_grayscale(img, 3)
    # img = affine(img, 10, (0, 0), 1, 0, resample='BICUBIC', fillcolor=(255,255,0))
    # img = gaussion_noise(img)
    # img = poisson_noise(img)
    img = salt_and_pepper(img)
    return to_tensor(img) 
開發者ID:YU-Zhiyang,項目名稱:opencv_transforms_torchvision,代碼行數:26,代碼來源:cvfunctional.py

示例3: pil_transform

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import center_crop [as 別名]
def pil_transform(img):
    # img = functional.resize(img, size=(100, 300))
    # img = functional.to_tensor(img)
    # img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric')
    # img = functional.crop(img, -40, -20, 1000, 1000)
    # img = functional.center_crop(img, (310, 300))
    # img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = functional.hflip(img)
    # img = functional.vflip(img)
    # tl, tr, bl, br, center = functional.five_crop(img, 100)
    # img = functional.adjust_brightness(img, 2.1)
    # img = functional.adjust_contrast(img, 1.5)
    # img = functional.adjust_saturation(img, 2.3)
    # img = functional.adjust_hue(img, 0.5)
    # img = functional.adjust_gamma(img, gamma=3, gain=0.1)
    # img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None)
    # img = functional.to_grayscale(img, 3)
    # img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0))

    return functional.to_tensor(img) 
開發者ID:YU-Zhiyang,項目名稱:opencv_transforms_torchvision,代碼行數:24,代碼來源:cvfunctional.py

示例4: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import center_crop [as 別名]
def __call__(self, img_dict):
        keys = ['rgb', 'ir', 'depth']
        for k in keys:
            img = img_dict[k]
            w, h = img.size
            crop_h, crop_w = self.size
            if crop_w > w or crop_h > h:
                raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size,
                                                                                              (h, w)))
            if self.crop_index == 0:
                img_dict[k] = F.center_crop(img, (crop_h, crop_w))
            elif self.crop_index == 1:
                img_dict[k] = img.crop((0, 0, crop_w, crop_h))
            elif self.crop_index == 2:
                img_dict[k] = img.crop((w - crop_w, 0, w, crop_h))
            elif self.crop_index == 3:
                img_dict[k] = img.crop((0, h - crop_h, crop_w, h))
            elif self.crop_index == 4:
                img_dict[k] = img.crop((w - crop_w, h - crop_h, w, h))
            else:
                raise ValueError("Requested crop index is not in range(5)")
        return img_dict 
開發者ID:AlexanderParkin,項目名稱:ChaLearn_liveness_challenge,代碼行數:24,代碼來源:transforms.py

示例5: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import center_crop [as 別名]
def __call__(self, img1, img2):
        img1 = tvF.resize(img1, self.size, interpolation=Image.LANCZOS)
        img2 = tvF.resize(img2, self.size, interpolation=Image.LANCZOS)
        if random.random() < 0.5:
            img1 = tvF.hflip(img1)
            img2 = tvF.hflip(img2)
        if random.random() < 0.5:
            rot = random.uniform(-10, 10)
            crop_ratio = rot_crop(rot)
            img1 = tvF.rotate(img1, rot, resample=Image.BILINEAR)
            img2 = tvF.rotate(img2, rot, resample=Image.BILINEAR)
            img1 = tvF.center_crop(img1, int(img1.size[0] * crop_ratio))
            img2 = tvF.center_crop(img2, int(img2.size[0] * crop_ratio))

        i, j, h, w = self.get_params(img1, self.scale, self.ratio)

        # return the image with the same transformation
        return (tvF.resized_crop(img1, i, j, h, w, self.size, self.interpolation),
                tvF.resized_crop(img2, i, j, h, w, self.size, self.interpolation)) 
開發者ID:blandocs,項目名稱:Tag2Pix,代碼行數:21,代碼來源:dataloader.py


注:本文中的torchvision.transforms.functional.center_crop方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。