当前位置: 首页>>代码示例>>Python>>正文


Python transforms.RandomSizedCrop方法代码示例

本文整理汇总了Python中torchvision.transforms.RandomSizedCrop方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.RandomSizedCrop方法的具体用法?Python transforms.RandomSizedCrop怎么用?Python transforms.RandomSizedCrop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.RandomSizedCrop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_transformations

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def init_transformations(self):

        if self.hps.torchvision_version_major == 0 and self.hps.torchvision_version_minor < 2:
            _resize = transforms.Scale
            _rnd_resize_crop = transforms.RandomSizedCrop
        else:
            _resize = transforms.Resize
            _rnd_resize_crop = transforms.RandomResizedCrop

        self.train_transform = transforms.Compose([
            _resize([264, 264]),
            _rnd_resize_crop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=self.hps.img_mean, std=self.hps.img_std)
        ])

        # Test
        self.test_transform = transforms.Compose([
            _resize([224, 224]),
            transforms.ToTensor(),
            transforms.Normalize(mean=self.hps.img_mean, std=self.hps.img_std)
        ])

        return 
开发者ID:ok1zjf,项目名称:AMNet,代码行数:27,代码来源:amnet.py

示例2: _get_label

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def _get_label(self, train_dir):
		# Normalize on RGB Value
		normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
										 std=[0.229, 0.224, 0.225])
		# Train -> Preprocessing -> Tensor
		train_dataset = datasets.ImageFolder(
			train_dir,
			transforms.Compose([
				transforms.RandomSizedCrop(self._size[0]), #224 , 299
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				normalize,
			]))

		# Get number of labels
		return train_dataset.classes 
开发者ID:floydhub,项目名称:imagenet,代码行数:18,代码来源:imagenet_models.py

示例3: getTrainLoader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def getTrainLoader(self, batch_size, shuffle=True, num_workers=4):

        # first we define the training transform we will apply to the dataset
        list_of_transforms = []
        list_of_transforms.append(vision_transforms.RandomSizedCrop(self.size_images))
        list_of_transforms.append(vision_transforms.RandomHorizontalFlip())

        if self.type_of_data_augmentation == 'extended':
            list_of_transforms.append(vision_transforms.ColorJitter(brightness=0.4,
                                                                             contrast=0.4,
                                                                             saturation=0.4))
        list_of_transforms.append(vision_transforms.ToTensor())
        if self.type_of_data_augmentation == 'extended':
            list_of_transforms.append(vision_transforms_extension.Lighting(alphastd=0.1,
                                                                          eigval=self.pca['eigval'],
                                                                          eigvec=self.pca['eigvec']))

        list_of_transforms.append(vision_transforms.Normalize(mean=self.meanstd['mean'],
                                                             std=self.meanstd['std']))
        train_transform = vision_transforms.Compose(list_of_transforms)
        train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform)
        train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle,
                                        num_workers=num_workers, pin_memory=self.pin_memory)

        return train_loader 
开发者ID:antspy,项目名称:quantized_distillation,代码行数:27,代码来源:ImageNet12.py

示例4: get_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def get_transform(data_name, split_name, opt):
    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
    t_list = []
    if split_name == "train":
        t_list = [transforms.RandomSizedCrop(opt.crop_size),
                  transforms.RandomHorizontalFlip()]
    elif split_name == "val":
        t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
        #t_list = [transforms.Resize((224, 224))]
    elif split_name == "test":
        t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
        #t_list = [transforms.Resize((224, 224))]

    """if "CUHK" in data_name:
        t_end = [transforms.ToTensor()]
    else:"""
    t_end = [transforms.ToTensor(), normalizer]
    
    transform = transforms.Compose(t_list + t_end)
    return transform 
开发者ID:ZihaoWang-CV,项目名称:CAMP_iccv19,代码行数:23,代码来源:data.py

示例5: test_f30k_dataloader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def test_f30k_dataloader():
    data_name = "f30k"
    data_path = "./data/f30k"
    vocab_path = "./vocab/"

    vocab = pickle.load(open(os.path.join(vocab_path,
            '%s_vocab.pkl' % data_name), 'rb'))
    roots, ids = data.get_paths(data_path, data_name, False)
    transform = transforms.Compose([transforms.RandomSizedCrop(224),
                                    transforms.ToTensor()])
    print (roots, ids)
    train_loader = data.get_loader_single(data_name, "train", # !!!
                                     roots["train"]["img"],
                                     roots["train"]["cap"],
                                     vocab, transform, ids=ids["train"],
                                     batch_size=16, shuffle=False,
                                     num_workers=1,
                                     collate_fn=data.collate_fn,
                                     distributed=False)
    print ("f30k dataloader output:", train_loader.dataset.img_num)
    #for (id, x) in enumerate(train_loader):
        #if id > 0 : break
        #print (id, x) 
开发者ID:ZihaoWang-CV,项目名称:CAMP_iccv19,代码行数:25,代码来源:test_modules.py

示例6: get_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def get_transform(data_name, split_name, opt):
    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
    t_list = []
    if split_name == 'train':
        t_list = [transforms.RandomSizedCrop(opt.crop_size),
                  transforms.RandomHorizontalFlip()]
    elif split_name == 'val':
        t_list = [transforms.Scale(256), transforms.CenterCrop(224)]
    elif split_name == 'test':
        t_list = [transforms.Scale(256), transforms.CenterCrop(224)]

    t_end = [transforms.ToTensor(), normalizer]
    transform = transforms.Compose(t_list + t_end)
    return transform 
开发者ID:ExplorerFreda,项目名称:VSE-C,代码行数:17,代码来源:data.py

示例7: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def __init__(self, args, train=True):
        self.root_dir = args.data
        root_dir = self.root_dir
        if train:
            self.data_set_list = os.path.join(root_dir,
                                              args.trainset_image_list)
        else:
            self.data_set_list = os.path.join(root_dir, args.testset_image_list)

        self.categ_dict = get_class_names(
            os.path.join(root_dir, 'ClassName.txt'))

        self.data_set_list = parse_file(self.data_set_list, self.categ_dict)

        self.args = args
        self.read_features = args.read_features

        self.features_dir = args.features_dir
        if train:
            self.transform = transforms.Compose([
                transforms.RandomSizedCrop(args.image_size),
                transforms.RandomHorizontalFlip(),
                transforms.Scale((args.image_size, args.image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])
        else:
            self.transform = transforms.Compose([
                transforms.Scale((args.image_size, args.image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:36,代码来源:sun_dataset.py

示例8: inception_preproccess

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def inception_preproccess(input_size, normalize=__imagenet_stats):
    return transforms.Compose([
        transforms.RandomSizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**normalize)
    ]) 
开发者ID:JiaRenChang,项目名称:PSMNet,代码行数:9,代码来源:preprocess.py

示例9: inception_color_preproccess

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
    return transforms.Compose([
        #transforms.RandomSizedCrop(input_size),
        #transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        ColorJitter(
            brightness=0.4,
            contrast=0.4,
            saturation=0.4,
        ),
        Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
        transforms.Normalize(**normalize)
    ]) 
开发者ID:JiaRenChang,项目名称:PSMNet,代码行数:15,代码来源:preprocess.py

示例10: Imagenet_train

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def Imagenet_train():
    return transforms.Compose([
                transforms.Scale(256),             # 重新改变大小为size=(w, h) 或 (size, size)
                transforms.RandomSizedCrop(224),   # 随机剪切并resize成给定的size大小
                transforms.RandomHorizontalFlip(),  # 概率为0.5,随机水平翻转。
                transforms.ToTensor(),              # 转化为tensor数据
                ColorJitter(Jitter=0.4, group=1, same_group=False),
                Lighting(alphastd=0.1, group=1, same_group=False),
                Normalize_Imagenet(), 
                ]) 
开发者ID:wyf2017,项目名称:DSMnet,代码行数:12,代码来源:__init__.py

示例11: test_input_block

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def test_input_block():
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    dataset = datasets.ImageFolder('/sequoia/data1/yhasson/datasets/test-dataset',
            transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    densenet = torchvision.models.densenet121(pretrained=True)
    features = densenet.features
    seq2d = torch.nn.Sequential(
        features.conv0, features.norm0, features.relu0, features.pool0)
    seq3d = torch.nn.Sequential(
        inflate.inflate_conv(features.conv0, 3),
        inflate.inflate_batch_norm(features.norm0),
        features.relu0,
        inflate.inflate_pool(features.pool0, 1))

    loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False)
    frame_nb = 4
    for i, (input_2d, target) in enumerate(loader):
        target = target.cuda()
        target_var = torch.autograd.Variable(target)
        input_2d_var = torch.autograd.Variable(input_2d)
        out2d = seq2d(input_2d_var)
        time_pad = torch.nn.ReplicationPad3d((0, 0, 0, 0, 1, 1))
        input_3d = input_2d.unsqueeze(2).repeat(1, 1, frame_nb, 1, 1)
        input_3d_var = time_pad(input_3d) 
        out3d = seq3d(input_3d_var)
        expected_out_3d = out2d.data.unsqueeze(2).repeat(1, 1, frame_nb, 1, 1)
        out_diff = expected_out_3d - out3d.data
        print(out_diff.max())
        assert(out_diff.max() < 0.0001) 
开发者ID:hassony2,项目名称:kinetics_i3d_pytorch,代码行数:38,代码来源:test_first_block.py

示例12: scale_crop

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
    t_list = [
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ]
    #if scale_size != input_size:
    #t_list = [transforms.Scale((960,540))] + t_list

    return transforms.Compose(t_list)


# def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
#     t_list = [
#         transforms.RandomCrop(input_size),
#         transforms.ToTensor(),
#         transforms.Normalize(**normalize),
#     ]
#     if scale_size != input_size:
#         t_list = [transforms.Scale(scale_size)] + t_list
# 
#     transforms.Compose(t_list)
# 
# 
# def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
#     padding = int((scale_size - input_size) / 2)
#     return transforms.Compose([
#         transforms.RandomCrop(input_size, padding=padding),
#         transforms.RandomHorizontalFlip(),
#         transforms.ToTensor(),
#         transforms.Normalize(**normalize),
#     ])
# 
# 
# def inception_preproccess(input_size, normalize=__imagenet_stats):
#     return transforms.Compose([
#         transforms.RandomSizedCrop(input_size),
#         transforms.RandomHorizontalFlip(),
#         transforms.ToTensor(),
#         transforms.Normalize(**normalize)
#     ]) 
开发者ID:albert100121,项目名称:360SD-Net,代码行数:42,代码来源:preprocess.py

示例13: inception_color_preproccess

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
    return transforms.Compose([
        # transforms.RandomSizedCrop(input_size),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        ColorJitter(
            brightness=0.4,
            contrast=0.4,
            saturation=0.4,
        ),
        Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
        transforms.Normalize(**normalize)
    ]) 
开发者ID:meteorshowers,项目名称:StereoNet-ActiveStereoNet,代码行数:15,代码来源:preprocess.py

示例14: inception_color_preproccess

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
    return transforms.Compose([
        transforms.RandomSizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        ColorJitter(
            brightness=0.4,
            contrast=0.4,
            saturation=0.4,
        ),
        Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
        transforms.Normalize(**normalize)
    ]) 
开发者ID:eladhoffer,项目名称:bigBatch,代码行数:15,代码来源:preprocess.py

示例15: get_reproducible_rand_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import RandomSizedCrop [as 别名]
def get_reproducible_rand_transform(opt):
    """ Image data and side info can be transformed identically. """
    return [
        reproducible_transforms.RandomSizedCrop(opt.image_size),
        reproducible_transforms.RandomHorizontalFlip(),
        reproducible_transforms.ToTensor(),
        reproducible_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
    ] 
开发者ID:johnwlambert,项目名称:dlupi-heteroscedastic-dropout,代码行数:11,代码来源:modular_transforms.py


注:本文中的torchvision.transforms.RandomSizedCrop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。