當前位置: 首頁>>代碼示例>>Python>>正文


Python transforms.Scale方法代碼示例

本文整理匯總了Python中torchvision.transforms.Scale方法的典型用法代碼示例。如果您正苦於以下問題:Python transforms.Scale方法的具體用法?Python transforms.Scale怎麽用?Python transforms.Scale使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.Scale方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __init__(self, args, train=True):
        self.root_dir = args.data

        if train:
            self.data_set_list = train_set_list
        elif args.use_test_for_val:
            self.data_set_list = test_set_list
        else:
            self.data_set_list = val_set_list

        self.data_set_list = ['%06d.png' % (x) for x in self.data_set_list]
        self.args = args
        self.read_features = args.read_features

        self.features_dir = args.features_dir
        self.transform = transforms.Compose([
            transforms.Scale((args.image_size, args.image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        self.transform_segmentation = transforms.Compose([
            transforms.Scale((args.segmentation_size, args.segmentation_size)),
            transforms.ToTensor(),
        ]) 
開發者ID:ehsanik,項目名稱:dogTorch,代碼行數:27,代碼來源:nyu_walkable_surface_dataset.py

示例2: get_transform

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list) 
開發者ID:aayushbansal,項目名稱:Recycle-GAN,代碼行數:25,代碼來源:base_dataset.py

示例3: transform

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def transform(is_train=True, normalize=True):
    """
    Returns a transform object
    """
    filters = []
    filters.append(Scale(256))

    if is_train:
        filters.append(RandomCrop(224))
    else:
        filters.append(CenterCrop(224))

    if is_train:
        filters.append(RandomHorizontalFlip())

    filters.append(ToTensor())
    if normalize:
        filters.append(Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]))
    return Compose(filters) 
開發者ID:uwnlp,項目名稱:verb-attributes,代碼行數:22,代碼來源:imsitu_loader.py

示例4: get_loader

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def get_loader(config):
    """Builds and returns Dataloader for MNIST and SVHN dataset."""
    
    transform = transforms.Compose([
                    transforms.Scale(config.image_size),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    
    svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform)
    mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform)

    svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
                                              batch_size=config.batch_size,
                                              shuffle=True,
                                              num_workers=config.num_workers)

    mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=config.num_workers)
    return svhn_loader, mnist_loader 
開發者ID:yunjey,項目名稱:mnist-svhn-transfer,代碼行數:23,代碼來源:data_loader.py

示例5: test_getitem

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def test_getitem(self):
        import torchvision.transforms as t
        from reid.datasets.viper import VIPeR
        from reid.utils.data.preprocessor import Preprocessor

        root, split_id, num_val = '/tmp/open-reid/viper', 0, 100
        dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True)

        preproc = Preprocessor(dataset.train, root=dataset.images_dir,
                               transform=t.Compose([
                                   t.Scale(256),
                                   t.CenterCrop(224),
                                   t.ToTensor(),
                                   t.Normalize(mean=[0.485, 0.456, 0.406],
                                               std=[0.229, 0.224, 0.225])
                               ]))
        self.assertEquals(len(preproc), len(dataset.train))
        img, pid, camid = preproc[0]
        self.assertEquals(img.size(), (3, 224, 224)) 
開發者ID:Cysu,項目名稱:open-reid,代碼行數:21,代碼來源:test_preprocessor.py

示例6: __init__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __init__(self, cuda=False, model='resnet-18', layer='default', layer_output_size=512):
        """ Img2Vec
        :param cuda: If set to True, will run forward pass on GPU
        :param model: String name of requested model
        :param layer: String or Int depending on model.  See more docs: https://github.com/christiansafka/img2vec.git
        :param layer_output_size: Int depicting the output size of the requested layer
        """
        self.device = torch.device("cuda" if cuda else "cpu")
        self.layer_output_size = layer_output_size
        self.model_name = model
        
        self.model, self.extraction_layer = self._get_model_and_layer(model, layer)

        self.model = self.model.to(self.device)

        self.model.eval()

        self.scaler = transforms.Scale((224, 224))
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.to_tensor = transforms.ToTensor() 
開發者ID:christiansafka,項目名稱:img2vec,代碼行數:23,代碼來源:img_to_vec.py

示例7: get_transform

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    # if opt.isTrain and not opt.no_flip:
    #     transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:25,代碼來源:base_dataset.py

示例8: __init__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __init__(self, model='inception', layer='default', layer_output_size=2048, data="top10", transform=None):
        """ Img2Vec
        :param model: String name of requested model
        :param layer: String or Int depending on model.  See more docs: https://github.com/christiansafka/img2vec.git
        :param layer_output_size: Int depicting the output size of the requested layer
        """
        cuda = True if torch.cuda.is_available() else False

        self.device = torch.device("cuda" if cuda else "cpu")
        self.layer_output_size = layer_output_size
        # self.model_path = '/dccstor/alfassy/saved_models/inception_traincocoInceptionT10Half2018.9.1.9:30epoch:71'
        # self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.3.13:39best'
        # self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf2018.10.8.12:46best'
        self.model_path = '/dccstor/alfassy/saved_models/inception_trainCocoIncHalf642018.10.9.13:44epoch:30'
        self.model, self.extraction_layer = self._get_model_and_layer(model, layer, data)
        self.model = self.model.to(self.device)
        self.model.eval()
        #self.scaler = transforms.Resize(224, 224)
        #self.scaler = transforms.Scale((224, 224))
        self.transform = transform
        self.model_name = model 
開發者ID:leokarlin,項目名稱:LaSO,代碼行數:23,代碼來源:img_to_vec.py

示例9: __call__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __call__(self, img):
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.9, 1.) * area
            aspect_ratio = random.uniform(7. / 8, 8. / 7)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                assert (img.size == (w, h))

                return img.resize((self.size, self.size), self.interpolation)

        # Fallback
        scale = Scale(self.size, interpolation=self.interpolation)
        crop = CenterCrop(self.size)
        return crop(scale(img)) 
開發者ID:orashi,項目名稱:AlacGAN,代碼行數:27,代碼來源:train.py

示例10: __init__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __init__(self, root, scale_size, data_type, skip_pix2pix_processing=False):
        self.root = root
        if not os.path.exists(self.root):
            raise Exception("[!] {} not exists.".format(root))

        self.name = os.path.basename(root)
        if self.name in PIX2PIX_DATASETS and not skip_pix2pix_processing:
            pix2pix_split_images(self.root)

        self.paths = glob(os.path.join(self.root, '{}/*'.format(data_type)))
        if len(self.paths) == 0:
            raise Exception("No images are found in {}".format(self.root))
        self.shape = list(Image.open(self.paths[0]).size) + [3]

        self.transform = transforms.Compose([
            transforms.Scale(scale_size), 
            transforms.ToTensor(), 
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]) 
開發者ID:BMIRDS,項目名稱:HistoGAN,代碼行數:21,代碼來源:data_loader.py

示例11: __init__

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def __init__(self, split='train'):
        self.split = split
        assert(split=='train' or split=='val')
        self.name = 'ImageNet_Split_' + split

        print('Loading ImageNet dataset - split {0}'.format(split))
        transforms_list = []
        transforms_list.append(transforms.Scale(256))
        transforms_list.append(transforms.CenterCrop(224))
        transforms_list.append(lambda x: np.asarray(x))
        transforms_list.append(transforms.ToTensor())
        mean_pix = [0.485, 0.456, 0.406]
        std_pix = [0.229, 0.224, 0.225]
        transforms_list.append(transforms.Normalize(mean=mean_pix, std=std_pix))
        self.transform = transforms.Compose(transforms_list)

        traindir = os.path.join(_IMAGENET_DATASET_DIR, 'train')
        valdir = os.path.join(_IMAGENET_DATASET_DIR, 'val')
        self.data = datasets.ImageFolder(
            traindir if split=='train' else valdir, self.transform)
        self.labels = [item[1] for item in self.data.imgs] 
開發者ID:gidariss,項目名稱:FewShotWithoutForgetting,代碼行數:23,代碼來源:dataloader.py

示例12: get_imgs

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def get_imgs(imageIndex, imsize, file_name,transform=None, normalize=None):

    f = h5py.File(file_name,'r')
    images = f['images']
    img = images[imageIndex]
    # rotate axis to (256,256,3)
    img = np.moveaxis(img, 0, -1)
    # convert to PIL Image
    img = Image.fromarray(img, 'RGB')

    if transform is not None:
        img = transform(img)

    ret = []
    for i in range(cfg.TREE.BRANCH_NUM):
        if i < (cfg.TREE.BRANCH_NUM - 1):
            re_img = transforms.Scale(imsize[i])(img)
        else:
            re_img = img
        ret.append(normalize(re_img))
        
    rec_id = f['recIDs'][imageIndex]
    img_id = f['imagesIDs'][imageIndex]

    return ret, rec_id, img_id 
開發者ID:netanelyo,項目名稱:Recipe2ImageGAN,代碼行數:27,代碼來源:datasets.py

示例13: initialize

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def initialize(self, source, target, batch_size1, batch_size2, scale=32):
        transform = transforms.Compose([
            transforms.Scale(scale),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset_source = Dataset(source['imgs'], source['labels'], transform=transform)
        dataset_target = Dataset(target['imgs'], target['labels'], transform=transform)
        # dataset_source = tnt.dataset.TensorDataset([source['imgs'], source['labels']])
        # dataset_target = tnt.dataset.TensorDataset([target['imgs'], target['labels']])
        data_loader_s = torch.utils.data.DataLoader(
            dataset_source,
            batch_size=batch_size1,
            shuffle=True,
            num_workers=4)

        data_loader_t = torch.utils.data.DataLoader(
            dataset_target,
            batch_size=batch_size2,
            shuffle=True,
            num_workers=4)
        self.dataset_s = dataset_source
        self.dataset_t = dataset_target
        self.paired_data = PairedData(data_loader_s, data_loader_t,
                                      float("inf")) 
開發者ID:mil-tokyo,項目名稱:MCD_DA,代碼行數:27,代碼來源:unaligned_data_loader.py

示例14: _init_transform

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def _init_transform(self):
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]

        if self.split == 'train':
            self.img_transform = transforms.Compose([
                transforms.Scale(int(self.imgSize * 1.2)),
                transforms.RandomCrop(self.imgSize),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)])
        else:
            self.img_transform = transforms.Compose([
                transforms.Scale(self.imgSize),
                transforms.CenterCrop(self.imgSize),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)]) 
開發者ID:hangzhaomit,項目名稱:Sound-of-Pixels,代碼行數:19,代碼來源:base.py

示例15: feed_interpolated_input

# 需要導入模塊: from torchvision import transforms [as 別名]
# 或者: from torchvision.transforms import Scale [as 別名]
def feed_interpolated_input(self, x):
        if self.phase == 'gtrns' and floor(self.resl)>2 and floor(self.resl)<=self.max_resl:
            alpha = self.complete['gen']/100.0
            transform = transforms.Compose( [   transforms.ToPILImage(),
                                                transforms.Scale(size=int(pow(2,floor(self.resl)-1)), interpolation=0),      # 0: nearest
                                                transforms.Scale(size=int(pow(2,floor(self.resl))), interpolation=0),      # 0: nearest
                                                transforms.ToTensor(),
                                            ] )
            x_low = x.clone().add(1).mul(0.5)
            for i in range(x_low.size(0)):
                x_low[i] = transform(x_low[i]).mul(2).add(-1)
            x = torch.add(x.mul(alpha), x_low.mul(1-alpha)) # interpolated_x

        if self.use_cuda:
            return x.cuda()
        else:
            return x 
開發者ID:nashory,項目名稱:pggan-pytorch,代碼行數:19,代碼來源:trainer.py


注:本文中的torchvision.transforms.Scale方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。