当前位置: 首页>>代码示例>>Python>>正文


Python transforms.Scale方法代码示例

本文整理汇总了Python中torchvision.transforms.Scale方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.Scale方法的具体用法?Python transforms.Scale怎么用?Python transforms.Scale使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.Scale方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __init__(self, args, train=True):
        self.root_dir = args.data

        if train:
            self.data_set_list = train_set_list
        elif args.use_test_for_val:
            self.data_set_list = test_set_list
        else:
            self.data_set_list = val_set_list

        self.data_set_list = ['%06d.png' % (x) for x in self.data_set_list]
        self.args = args
        self.read_features = args.read_features

        self.features_dir = args.features_dir
        self.transform = transforms.Compose([
            transforms.Scale((args.image_size, args.image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        self.transform_segmentation = transforms.Compose([
            transforms.Scale((args.segmentation_size, args.segmentation_size)),
            transforms.ToTensor(),
        ]) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:27,代码来源:nyu_walkable_surface_dataset.py

示例2: get_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list) 
开发者ID:aayushbansal,项目名称:Recycle-GAN,代码行数:25,代码来源:base_dataset.py

示例3: transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def transform(is_train=True, normalize=True):
    """
    Returns a transform object
    """
    filters = []
    filters.append(Scale(256))

    if is_train:
        filters.append(RandomCrop(224))
    else:
        filters.append(CenterCrop(224))

    if is_train:
        filters.append(RandomHorizontalFlip())

    filters.append(ToTensor())
    if normalize:
        filters.append(Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]))
    return Compose(filters) 
开发者ID:uwnlp,项目名称:verb-attributes,代码行数:22,代码来源:imsitu_loader.py

示例4: get_loader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def get_loader(config):
    """Builds and returns Dataloader for MNIST and SVHN dataset."""
    
    transform = transforms.Compose([
                    transforms.Scale(config.image_size),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    
    svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform)
    mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform)

    svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
                                              batch_size=config.batch_size,
                                              shuffle=True,
                                              num_workers=config.num_workers)

    mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=config.num_workers)
    return svhn_loader, mnist_loader 
开发者ID:yunjey,项目名称:mnist-svhn-transfer,代码行数:23,代码来源:data_loader.py

示例5: test_getitem

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def test_getitem(self):
        import torchvision.transforms as t
        from reid.datasets.viper import VIPeR
        from reid.utils.data.preprocessor import Preprocessor

        root, split_id, num_val = '/tmp/open-reid/viper', 0, 100
        dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True)

        preproc = Preprocessor(dataset.train, root=dataset.images_dir,
                               transform=t.Compose([
                                   t.Scale(256),
                                   t.CenterCrop(224),
                                   t.ToTensor(),
                                   t.Normalize(mean=[0.485, 0.456, 0.406],
                                               std=[0.229, 0.224, 0.225])
                               ]))
        self.assertEquals(len(preproc), len(dataset.train))
        img, pid, camid = preproc[0]
        self.assertEquals(img.size(), (3, 224, 224)) 
开发者ID:Cysu,项目名称:open-reid,代码行数:21,代码来源:test_preprocessor.py

示例6: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __init__(self, cuda=False, model='resnet-18', layer='default', layer_output_size=512):
        """ Img2Vec
        :param cuda: If set to True, will run forward pass on GPU
        :param model: String name of requested model
        :param layer: String or Int depending on model.  See more docs: https://github.com/christiansafka/img2vec.git
        :param layer_output_size: Int depicting the output size of the requested layer
        """
        self.device = torch.device("cuda" if cuda else "cpu")
        self.layer_output_size = layer_output_size
        self.model_name = model
        
        self.model, self.extraction_layer = self._get_model_and_layer(model, layer)

        self.model = self.model.to(self.device)

        self.model.eval()

        self.scaler = transforms.Scale((224, 224))
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.to_tensor = transforms.ToTensor() 
开发者ID:christiansafka,项目名称:img2vec,代码行数:23,代码来源:img_to_vec.py

示例7: get_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    # if opt.isTrain and not opt.no_flip:
    #     transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list) 
开发者ID:jessemelpolio,项目名称:non-stationary_texture_syn,代码行数:25,代码来源:base_dataset.py

示例8: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __init__(self, model='inception', layer='default', layer_output_size=2048, data="top10", transform=None):
        """ Img2Vec
        :param model: String name of requested model
        :param layer: String or Int depending on model.  See more docs: https://github.com/christiansafka/img2vec.git
        :param layer_output_size: Int depicting the output size of the requested layer
        """
        cuda = True if torch.cuda.is_available() else False

        self.device = torch.device("cuda" if cuda else "cpu")
        self.layer_output_size = layer_output_size
        # self.model_path = '/dccstor/alfassy/saved_models/inception_traincocoInceptionT10Half2018.9.1.9:30epoch:71'
        # self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.3.13:39best'
        # self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.8.12:46best'
        self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf642018.10.9.13:44epoch:30'
        self.model, self.extraction_layer = self._get_model_and_layer(model, layer, data)
        self.model = self.model.to(self.device)
        self.model.eval()
        #self.scaler = transforms.Resize(224, 224)
        #self.scaler = transforms.Scale((224, 224))
        self.transform = transform
        self.model_name = model 
开发者ID:leokarlin,项目名称:LaSO,代码行数:23,代码来源:img_to_vec.py

示例9: __call__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __call__(self, img):
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.9, 1.) * area
            aspect_ratio = random.uniform(7. / 8, 8. / 7)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                assert (img.size == (w, h))

                return img.resize((self.size, self.size), self.interpolation)

        # Fallback
        scale = Scale(self.size, interpolation=self.interpolation)
        crop = CenterCrop(self.size)
        return crop(scale(img)) 
开发者ID:orashi,项目名称:AlacGAN,代码行数:27,代码来源:train.py

示例10: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __init__(self, root, scale_size, data_type, skip_pix2pix_processing=False):
        self.root = root
        if not os.path.exists(self.root):
            raise Exception("[!] {} not exists.".format(root))

        self.name = os.path.basename(root)
        if self.name in PIX2PIX_DATASETS and not skip_pix2pix_processing:
            pix2pix_split_images(self.root)

        self.paths = glob(os.path.join(self.root, '{}/*'.format(data_type)))
        if len(self.paths) == 0:
            raise Exception("No images are found in {}".format(self.root))
        self.shape = list(Image.open(self.paths[0]).size) + [3]

        self.transform = transforms.Compose([
            transforms.Scale(scale_size), 
            transforms.ToTensor(), 
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]) 
开发者ID:BMIRDS,项目名称:HistoGAN,代码行数:21,代码来源:data_loader.py

示例11: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def __init__(self, split='train'):
        self.split = split
        assert(split=='train' or split=='val')
        self.name = 'ImageNet_Split_' + split

        print('Loading ImageNet dataset - split {0}'.format(split))
        transforms_list = []
        transforms_list.append(transforms.Scale(256))
        transforms_list.append(transforms.CenterCrop(224))
        transforms_list.append(lambda x: np.asarray(x))
        transforms_list.append(transforms.ToTensor())
        mean_pix = [0.485, 0.456, 0.406]
        std_pix = [0.229, 0.224, 0.225]
        transforms_list.append(transforms.Normalize(mean=mean_pix, std=std_pix))
        self.transform = transforms.Compose(transforms_list)

        traindir = os.path.join(_IMAGENET_DATASET_DIR, 'train')
        valdir = os.path.join(_IMAGENET_DATASET_DIR, 'val')
        self.data = datasets.ImageFolder(
            traindir if split=='train' else valdir, self.transform)
        self.labels = [item[1] for item in self.data.imgs] 
开发者ID:gidariss,项目名称:FewShotWithoutForgetting,代码行数:23,代码来源:dataloader.py

示例12: get_imgs

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def get_imgs(imageIndex, imsize, file_name,transform=None, normalize=None):

    f = h5py.File(file_name,'r')
    images = f['images']
    img = images[imageIndex]
    # rotate axis to (256,256,3)
    img = np.moveaxis(img, 0, -1)
    # convert to PIL Image
    img = Image.fromarray(img, 'RGB')

    if transform is not None:
        img = transform(img)

    ret = []
    for i in range(cfg.TREE.BRANCH_NUM):
        if i < (cfg.TREE.BRANCH_NUM - 1):
            re_img = transforms.Scale(imsize[i])(img)
        else:
            re_img = img
        ret.append(normalize(re_img))
        
    rec_id = f['recIDs'][imageIndex]
    img_id = f['imagesIDs'][imageIndex]

    return ret, rec_id, img_id 
开发者ID:netanelyo,项目名称:Recipe2ImageGAN,代码行数:27,代码来源:datasets.py

示例13: initialize

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def initialize(self, source, target, batch_size1, batch_size2, scale=32):
        transform = transforms.Compose([
            transforms.Scale(scale),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset_source = Dataset(source['imgs'], source['labels'], transform=transform)
        dataset_target = Dataset(target['imgs'], target['labels'], transform=transform)
        # dataset_source = tnt.dataset.TensorDataset([source['imgs'], source['labels']])
        # dataset_target = tnt.dataset.TensorDataset([target['imgs'], target['labels']])
        data_loader_s = torch.utils.data.DataLoader(
            dataset_source,
            batch_size=batch_size1,
            shuffle=True,
            num_workers=4)

        data_loader_t = torch.utils.data.DataLoader(
            dataset_target,
            batch_size=batch_size2,
            shuffle=True,
            num_workers=4)
        self.dataset_s = dataset_source
        self.dataset_t = dataset_target
        self.paired_data = PairedData(data_loader_s, data_loader_t,
                                      float("inf")) 
开发者ID:mil-tokyo,项目名称:MCD_DA,代码行数:27,代码来源:unaligned_data_loader.py

示例14: _init_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def _init_transform(self):
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]

        if self.split == 'train':
            self.img_transform = transforms.Compose([
                transforms.Scale(int(self.imgSize * 1.2)),
                transforms.RandomCrop(self.imgSize),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)])
        else:
            self.img_transform = transforms.Compose([
                transforms.Scale(self.imgSize),
                transforms.CenterCrop(self.imgSize),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)]) 
开发者ID:hangzhaomit,项目名称:Sound-of-Pixels,代码行数:19,代码来源:base.py

示例15: feed_interpolated_input

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import Scale [as 别名]
def feed_interpolated_input(self, x):
        if self.phase == 'gtrns' and floor(self.resl)>2 and floor(self.resl)<=self.max_resl:
            alpha = self.complete['gen']/100.0
            transform = transforms.Compose( [   transforms.ToPILImage(),
                                                transforms.Scale(size=int(pow(2,floor(self.resl)-1)), interpolation=0),      # 0: nearest
                                                transforms.Scale(size=int(pow(2,floor(self.resl))), interpolation=0),      # 0: nearest
                                                transforms.ToTensor(),
                                            ] )
            x_low = x.clone().add(1).mul(0.5)
            for i in range(x_low.size(0)):
                x_low[i] = transform(x_low[i]).mul(2).add(-1)
            x = torch.add(x.mul(alpha), x_low.mul(1-alpha)) # interpolated_x

        if self.use_cuda:
            return x.cuda()
        else:
            return x 
开发者ID:nashory,项目名称:pggan-pytorch,代码行数:19,代码来源:trainer.py


注:本文中的torchvision.transforms.Scale方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。