当前位置: 首页>>代码示例>>Python>>正文


Python transforms.RandomVerticalFlip方法代码示例

本文整理汇总了Python中torchvision.transforms.RandomVerticalFlip方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.RandomVerticalFlip方法的具体用法?Python transforms.RandomVerticalFlip怎么用?Python transforms.RandomVerticalFlip使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.RandomVerticalFlip方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, patches, use_cache, augment_data):
        super(PatchDataset, self).__init__()
        self.patches = patches
        self.crop = CenterCrop(config.CROP_SIZE)

        if augment_data:
            self.random_transforms = [RandomRotation((90, 90)), RandomVerticalFlip(1.0), RandomHorizontalFlip(1.0),
                                      (lambda x: x)]
            self.get_aug_transform = (lambda: random.sample(self.random_transforms, 1)[0])
        else:
            # Transform does nothing. Not sure if horrible or very elegant...
            self.get_aug_transform = (lambda: (lambda x: x))

        if use_cache:
            self.load_patch = data_manager.load_cached_patch
        else:
            self.load_patch = data_manager.load_patch

        print('Dataset ready with {} tuples.'.format(len(patches))) 
开发者ID:martkartasev,项目名称:sepconv,代码行数:21,代码来源:dataset.py

示例2: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, noisy_dir, crop_size, upscale_factor=4, cropped=False, flips=False, rotations=False, **kwargs):
        super(TrainDataset, self).__init__()
        # get all directories used for training
        if isinstance(noisy_dir, str):
            noisy_dir = [noisy_dir]
        self.files = []
        for n_dir in noisy_dir:
            self.files += [join(n_dir, x) for x in listdir(n_dir) if utils.is_image_file(x)]
        # intitialize image transformations and variables
        self.input_transform = T.Compose([
            T.RandomVerticalFlip(0.5 if flips else 0.0),
            T.RandomHorizontalFlip(0.5 if flips else 0.0),
            T.RandomCrop(crop_size)
        ])
        self.crop_transform = T.RandomCrop(crop_size // upscale_factor)
        self.upscale_factor = upscale_factor
        self.cropped = cropped
        self.rotations = rotations 
开发者ID:ManuelFritsche,项目名称:real-world-sr,代码行数:20,代码来源:data_loader.py

示例3: get_data_transforms

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def get_data_transforms():
	
	data_transforms = {
	    'train': transforms.Compose([
	        transforms.CenterCrop(config.patch_size),
	        transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.2),
	        transforms.RandomHorizontalFlip(),
	        transforms.RandomVerticalFlip(),
	        Random90Rotation(),
	        transforms.ToTensor(),
	        transforms.Normalize([0.7, 0.6, 0.7], [0.15, 0.15, 0.15]) #mean and standard deviations for lung adenocarcinoma resection slides
	    ]),
	    'val': transforms.Compose([
	        transforms.CenterCrop(config.patch_size),
	        transforms.ToTensor(),
	        transforms.Normalize([0.7, 0.6, 0.7], [0.15, 0.15, 0.15])
	    ]),
	    'unnormalize': transforms.Compose([
	        transforms.Normalize([1/0.15, 1/0.15, 1/0.15], [1/0.15, 1/0.15, 1/0.15])
	    ]),
	}

	return data_transforms

#printing the model 
开发者ID:BMIRDS,项目名称:HistoGAN,代码行数:27,代码来源:utils_model.py

示例4: data_transforms

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def data_transforms(dataset, cutout_length):
    dataset = dataset.lower()
    if dataset == 'cifar10':
        MEAN = [0.49139968, 0.48215827, 0.44653124]
        STD = [0.24703233, 0.24348505, 0.26158768]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'mnist':
        MEAN = [0.13066051707548254]
        STD = [0.30810780244715075]
        transf = [
            transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1)
        ]
    elif dataset == 'fashionmnist':
        MEAN = [0.28604063146254594]
        STD = [0.35302426207299326]
        transf = [
            transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1),
            transforms.RandomVerticalFlip()
        ]
    else:
        raise ValueError('not expected dataset = {}'.format(dataset))

    normalize = [
        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD)
    ]

    train_transform = transforms.Compose(transf + normalize)
    valid_transform = transforms.Compose(normalize)

    if cutout_length > 0:
        train_transform.transforms.append(Cutout(cutout_length))

    return train_transform, valid_transform 
开发者ID:khanrc,项目名称:pt.darts,代码行数:39,代码来源:preproc.py

示例5: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, folder_path):
        self.files = sorted(glob.glob('%s/*.*' % folder_path))
        self.transform = transforms.Compose([
            transforms.RandomCrop(128),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor()
        ]) 
开发者ID:JasonZHM,项目名称:CAE-ADMM,代码行数:10,代码来源:utils.py

示例6: get_tta_transforms

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def get_tta_transforms(index, pad_mode):
    tta_transforms = {
        0: [],
        1: [transforms.RandomHorizontalFlip(p=2.)],
        2: [transforms.RandomVerticalFlip(p=2.)],
        3: [transforms.RandomHorizontalFlip(p=2.), transforms.RandomVerticalFlip(p=2.)]
    }
    if pad_mode == 'resize':
        return transforms.Compose([transforms.Resize((H, W)), *(tta_transforms[index]), *img_transforms])
    else:
        return transforms.Compose([*(tta_transforms[index]), *img_transforms]) 
开发者ID:microsoft,项目名称:nni,代码行数:13,代码来源:loader.py

示例7: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, data_dir, scale_factor, patch_size=0, mode='train'):

        assert patch_size % scale_factor == 0
        assert (mode == 'train' and patch_size != 0) or mode == 'eval'

        if isinstance(data_dir, str):
            data_dir = Path(data_dir)

        self.filenames = [f for f in data_dir.glob('*') if is_image(f)]
        self.scale_factor = scale_factor

        if mode == 'train':
            self.transforms = transforms.Compose([
                transforms.RandomCrop(
                    patch_size, pad_if_needed=True, padding_mode='reflect'),
                transforms.RandomApply([
                    functools.partial(TF.rotate, angle=0),
                    functools.partial(TF.rotate, angle=90),
                    functools.partial(TF.rotate, angle=180),
                    functools.partial(TF.rotate, angle=270),
                ]),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
            ])
        elif mode == 'eval':
            self.filenames.sort()
            if patch_size > 0:
                self.transforms = transforms.Compose([
                    transforms.CenterCrop(patch_size)
                ])
            else:
                self.transforms = transforms.Compose([
                    functools.partial(pad, scale=scale_factor)
                ])
        else:
            raise NotImplementedError 
开发者ID:S-aiueo32,项目名称:srntt-pytorch,代码行数:38,代码来源:basic_dataset.py

示例8: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, opts, scale=0.875, random_crop=False, random_hflip=False, random_vflip=False):
        if type(opts) == dict:
            opts = munchify(opts)
        self.input_size = opts.input_size
        self.input_space = opts.input_space
        self.input_range = opts.input_range
        self.mean = opts.mean
        self.std = opts.std

        # https://github.com/tensorflow/models/blob/master/research/inception/inception/image_processing.py#L294
        self.scale = scale
        self.random_crop = random_crop
        self.random_hflip = random_hflip
        self.random_vflip = random_vflip

        tfs = []
        tfs.append(transforms.Resize(int(math.floor(max(self.input_size)/self.scale))))

        if random_crop:
            tfs.append(transforms.RandomCrop(max(self.input_size)))
        else:
            tfs.append(transforms.CenterCrop(max(self.input_size)))

        if random_hflip:
            tfs.append(transforms.RandomHorizontalFlip())

        if random_vflip:
            tfs.append(transforms.RandomVerticalFlip())

        tfs.append(transforms.ToTensor())
        tfs.append(ToSpaceBGR(self.input_space=='BGR'))
        tfs.append(ToRange255(max(self.input_range)==255))
        tfs.append(transforms.Normalize(mean=self.mean, std=self.std))

        self.tf = transforms.Compose(tfs) 
开发者ID:CeLuigi,项目名称:models-comparison.pytorch,代码行数:37,代码来源:utils.py

示例9: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def __init__(self, opts, scale=0.875, random_crop=False,
                 random_hflip=False, random_vflip=False,
                 preserve_aspect_ratio=True):
        if type(opts) == dict:
            opts = munchify(opts)
        self.input_size = opts.input_size
        self.input_space = opts.input_space
        self.input_range = opts.input_range
        self.mean = opts.mean
        self.std = opts.std

        # https://github.com/tensorflow/models/blob/master/research/inception/inception/image_processing.py#L294
        self.scale = scale
        self.random_crop = random_crop
        self.random_hflip = random_hflip
        self.random_vflip = random_vflip

        tfs = []
        if preserve_aspect_ratio:
            tfs.append(transforms.Resize(int(math.floor(max(self.input_size)/self.scale))))
        else:
            height = int(self.input_size[1] / self.scale)
            width = int(self.input_size[2] / self.scale)
            tfs.append(transforms.Resize((height, width)))

        if random_crop:
            tfs.append(transforms.RandomCrop(max(self.input_size)))
        else:
            tfs.append(transforms.CenterCrop(max(self.input_size)))

        if random_hflip:
            tfs.append(transforms.RandomHorizontalFlip())

        if random_vflip:
            tfs.append(transforms.RandomVerticalFlip())

        tfs.append(transforms.ToTensor())
        tfs.append(ToSpaceBGR(self.input_space=='BGR'))
        tfs.append(ToRange255(max(self.input_range)==255))
        tfs.append(transforms.Normalize(mean=self.mean, std=self.std))

        self.tf = transforms.Compose(tfs) 
开发者ID:alexandonian,项目名称:pretorched-x,代码行数:44,代码来源:utils.py

示例10: data_transforms

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def data_transforms(dataset, cutout_length):
    dataset = dataset.lower()
    if dataset == 'cifar10' or dataset == 'cifar100':
        MEAN = [0.49139968, 0.48215827, 0.44653124]
        STD = [0.24703233, 0.24348505, 0.26158768]
        transf_train = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
        transf_val = []
    elif dataset == 'mnist':
        MEAN = [0.13066051707548254]
        STD = [0.30810780244715075]
        transf_train = [
            transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1)
        ]
        transf_val=[]
    elif dataset == 'fashionmnist':
        MEAN = [0.28604063146254594]
        STD = [0.35302426207299326]
        transf_train = [
            transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1),
            transforms.RandomVerticalFlip()
        ]
        transf_val = []
    #Same preprocessing for ImageNet, Sport8 and MIT67
    elif dataset in utils.LARGE_DATASETS:
        MEAN = [0.485, 0.456, 0.406]
        STD = [0.229, 0.224, 0.225]
        transf_train = [
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(
                brightness=0.4,
                contrast=0.4,
                saturation=0.4,
                hue=0.2)
        ]
        transf_val = [
            transforms.Resize(256),
            transforms.CenterCrop(224),
        ]
    else:
        raise ValueError('not expected dataset = {}'.format(dataset))

    normalize = [
        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD)
    ]
    
    train_transform = transforms.Compose(transf_train + normalize)
    valid_transform = transforms.Compose(transf_val + normalize)  # FIXME validation is not set to square proportions, is this an issue?

    if cutout_length > 0:
        train_transform.transforms.append(Cutout(cutout_length))

    return train_transform, valid_transform 
开发者ID:antoyang,项目名称:NAS-Benchmark,代码行数:59,代码来源:preproc.py

示例11: image_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomVerticalFlip [as 别名]
def image_transform(
    image_size: Union[int, List[int]],
    augmentation: dict = {},
    mean: List[float] = [0.485, 0.456, 0.406],
    std: List[float] = [0.229, 0.224, 0.225]) -> Callable:
    """Image transforms.
    """

    if isinstance(image_size, int):
        image_size = (image_size, image_size)
    else:
        image_size = tuple(image_size)

    # data augmentations
    horizontal_flip = augmentation.pop('horizontal_flip', None)
    if horizontal_flip is not None:
        assert isinstance(horizontal_flip, float) and 0 <= horizontal_flip <= 1

    vertical_flip = augmentation.pop('vertical_flip', None)
    if vertical_flip is not None:
        assert isinstance(vertical_flip, float) and 0 <= vertical_flip <= 1

    random_crop = augmentation.pop('random_crop', None)
    if random_crop is not None:
        assert isinstance(random_crop, dict)

    center_crop = augmentation.pop('center_crop', None)
    if center_crop is not None:
        assert isinstance(center_crop, (int, list))

    if len(augmentation) > 0:
        raise NotImplementedError('Invalid augmentation options: %s.' % ', '.join(augmentation.keys()))
    
    t = [
        transforms.Resize(image_size) if random_crop is None else transforms.RandomResizedCrop(image_size[0], **random_crop),
        transforms.CenterCrop(center_crop) if center_crop is not None else None,
        transforms.RandomHorizontalFlip(horizontal_flip) if horizontal_flip is not None else None,
        transforms.RandomVerticalFlip(vertical_flip) if vertical_flip is not None else None,
        transforms.ToTensor(),
        transforms.Normalize(mean, std)]
    
    return transforms.Compose([v for v in t if v is not None]) 
开发者ID:chuchienshu,项目名称:ultra-thin-PRM,代码行数:44,代码来源:datasets.py


注:本文中的torchvision.transforms.RandomVerticalFlip方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。