當前位置: 首頁>>代碼示例>>Python>>正文


Python torchvision.transforms方法代碼示例

本文整理匯總了Python中torchvision.transforms方法的典型用法代碼示例。如果您正苦於以下問題:Python torchvision.transforms方法的具體用法?Python torchvision.transforms怎麽用?Python torchvision.transforms使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision的用法示例。


在下文中一共展示了torchvision.transforms方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
開發者ID:dnddnjs,項目名稱:pytorch-multigpu,代碼行數:38,代碼來源:train.py

示例2: test

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def test(model, test_loader, epoch, test_list, save_dir):
    model.eval()
    if not isdir(save_dir):
        os.makedirs(save_dir)
    for idx, image in enumerate(test_loader):
        image = image.cuda()
        # rescale image to [0, 255] and then substract the mean
        # https://github.com/pytorch/vision/blob/c74b79c83fc99d0b163d8381f7aa1296e4cb23d0/torchvision/transforms/functional.py#L51
        _, _, H, W = image.shape
        results = model(image)
        result = torch.squeeze(results[-1].detach()).cpu().numpy()
        results_all = torch.zeros((len(results), 1, H, W))
        for i in range(len(results)):
          results_all[i, 0, :, :] = results[i]
        filename = splitext(test_list[idx])[0]
        torchvision.utils.save_image(results_all, join(save_dir, "%s.jpg" % filename))
        result_b = Image.fromarray(((1- result) * 255).astype(np.uint8))
        result = Image.fromarray((result * 255).astype(np.uint8))
        result.save(join(save_dir, "%s.png" % filename))
        result_b.save(join(save_dir, "%s.jpg" % filename))
        print("Running test [%d/%d]" % (idx + 1, len(test_loader))) 
開發者ID:meteorshowers,項目名稱:hed-pytorch,代碼行數:23,代碼來源:train_hed.py

示例3: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self, root=os.path.expanduser('~/.encoding/data/citys/'), split='train',
                 mode=None, transform=None, target_transform=None, **kwargs):
        super(CitySegmentation, self).__init__(
            root, split, mode, transform, target_transform, **kwargs)
        #self.root = os.path.join(root, self.BASE_DIR)
        self.images, self.mask_paths = get_city_pairs(self.root, self.split)
        assert (len(self.images) == len(self.mask_paths))
        if len(self.images) == 0:
            raise RuntimeError("Found 0 images in subfolders of: \
                " + self.root + "\n")
        self._indices = np.array(range(-1, 19))
        self._classes = np.array([0, 7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
                                  23, 24, 25, 26, 27, 28, 31, 32, 33])
        self._key = np.array([-1, -1, -1, -1, -1, -1,
                              -1, -1,  0,  1, -1, -1, 
                              2,   3,  4, -1, -1, -1,
                              5,  -1,  6,  7,  8,  9,
                              10, 11, 12, 13, 14, 15,
                              -1, -1, 16, 17, 18])
        self._mapping = np.array(range(-1, len(self._key)-1)).astype('int32') 
開發者ID:zhanghang1989,項目名稱:PyTorch-Encoding,代碼行數:22,代碼來源:cityscapes.py

示例4: __getitem__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __getitem__(self, index):
        img = Image.open(self.images[index]).convert('RGB')
        if self.mode == 'test':
            if self.transform is not None:
                img = self.transform(img)
            return img, os.path.basename(self.images[index])
        #mask = self.masks[index]
        mask = Image.open(self.mask_paths[index])
        # synchrosized transform
        if self.mode == 'train':
            img, mask = self._sync_transform(img, mask)
        elif self.mode == 'val':
            img, mask = self._val_sync_transform(img, mask)
        else:
            assert self.mode == 'testval'
            mask = self._mask_transform(mask)
        # general resize, normalize and toTensor
        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            mask = self.target_transform(mask)
        return img, mask 
開發者ID:zhanghang1989,項目名稱:PyTorch-Encoding,代碼行數:24,代碼來源:cityscapes.py

示例5: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self, root_dir, transform=None, loader = pil_loader):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        if transform == None :
            transform = torchvision.transforms.Compose([torchvision.transforms.Resize(224),
                                                        torchvision.transforms.RandomHorizontalFlip(p=0.5),
                                                        torchvision.transforms.RandomVerticalFlip(p=0.5),
                                                        torchvision.transforms.ToTensor()])
        self.root_dir = root_dir
        self.transform = transform
        self.loader = loader

        self.images = os.listdir(os.path.join(self.root_dir))

        self.image_class = np.array(pd.read_csv('val_details.txt', sep='\t')[['mage','class']]).astype('str')
        self.class_dic = {}
        for i in self.image_class :
            self.class_dic[i[0]]=i[1] 
開發者ID:SathwikTejaswi,項目名稱:deep-ranking,代碼行數:25,代碼來源:test_embedding.py

示例6: get_stage_dataset_from_config

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def get_stage_dataset_from_config(self, stage):
        assert(stage in ['train', 'valid', 'test'])
        root_dir  = self.config['dataset']['root_dir']
        modal_num = self.config['dataset']['modal_num']
        if(stage == "train" or stage == "valid"):
            transform_names = self.config['dataset']['train_transform']
        elif(stage == "test"):
            transform_names = self.config['dataset']['test_transform']
        else:
            raise ValueError("Incorrect value for stage: {0:}".format(stage))

        self.transform_list = [get_transform(name, self.config['dataset']) \
                    for name in transform_names ]    
        csv_file = self.config['dataset'].get(stage + '_csv', None)
        dataset  = NiftyDataset(root_dir=root_dir,
                                csv_file  = csv_file,
                                modal_num = modal_num,
                                with_label= not (stage == 'test'),
                                transform = transforms.Compose(self.transform_list))
        return dataset 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:22,代碼來源:net_run.py

示例7: main

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def main():

	print "==> Loading data and model..."
	tranfrom_test = transforms.Compose([
		transforms.ToTensor(),
		transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
		])
	test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=tranfrom_test)
	testloader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=True, num_workers=2)

	class_names = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
	assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
	checkpoint = torch.load('./checkpoint/%s.t7'%args.model)
	net = checkpoint['net']
	net.cuda()
	cudnn.benchmark = True

	print "==> Starting attck..."

	results = attack_all(net, testloader, pixels=args.pixels, targeted=args.targeted, maxiter=args.maxiter, popsize=args.popsize, verbose=args.verbose)
	print "Final success rate: %.4f"%results 
開發者ID:DebangLi,項目名稱:one-pixel-attack-pytorch,代碼行數:23,代碼來源:attack.py

示例8: _lazy_import_torch

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def _lazy_import_torch(self):
        try:
            import torch
        except ImportError:
            raise ImportError('Need to install Pytorch: go to pytorch.org')
        import torchvision
        import torchvision.transforms as transforms
        import torch.nn as nn

        self.use_cuda = not self.opt.get('no_cuda', False) and torch.cuda.is_available()
        if self.use_cuda:
            logging.debug(f'Using CUDA')
            torch.cuda.set_device(self.opt.get('gpu', -1))
        self.torch = torch
        self.torchvision = torchvision
        self.transforms = transforms
        self.nn = nn 
開發者ID:facebookresearch,項目名稱:ParlAI,代碼行數:19,代碼來源:image_featurizers.py

示例9: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self, N_train = 8192, N_valid = 1024, **kwargs):
		super().__init__(**kwargs)

		batch_size = 64

		# Load the MNIST Data here
		train_dataset = torchvision.datasets.MNIST(root='../../data', train=True, transform=transforms.ToTensor(), download=True)
		test_dataset = torchvision.datasets.MNIST(root='../../data', train=False, transform=transforms.ToTensor())
		
		train_sampler = torch.utils.data.sampler.SubsetRandomSampler(range(N_train))
		validation_sampler = torch.utils.data.sampler.SubsetRandomSampler(range(N_train, N_train+N_valid))

		
		self.train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, sampler=train_sampler)
		self.validation_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=1024, sampler=validation_sampler)

		self.test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=1024, shuffle=False) 
開發者ID:automl,項目名稱:HpBandSter,代碼行數:19,代碼來源:example_5_pytorch_worker.py

示例10: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self, transform=None, transform_label=None):
		self.root = KITTI_2015_TRAIN_PATH_IMAGE
		self.root_label = KITTI_2015_TRAIN_PATH_LABEL
		self.camera = [
			'image_2/',
			'image_3/'
		]

		if transform is None:
			self.transform = transforms.Compose(
				[
					transforms.ToTensor()
				]
			)
		else:
			self.transform = transform

		self.transform_label = transform_label

		return 
開發者ID:rairyuu,項目名稱:SCVNet,代碼行數:22,代碼來源:SCVNet.py

示例11: get_loaders

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def get_loaders(traindir, valdir, sz, bs, fp16=True, val_bs=None, workers=8, rect_val=False, min_scale=0.08, distributed=False):
    val_bs = val_bs or bs
    train_tfms = [
            transforms.RandomResizedCrop(sz, scale=(min_scale, 1.0)),
            transforms.RandomHorizontalFlip()
    ]
    train_dataset = datasets.ImageFolder(traindir, transforms.Compose(train_tfms))
    train_sampler = (DistributedSampler(train_dataset, num_replicas=env_world_size(), rank=env_rank()) if distributed else None)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=bs, shuffle=(train_sampler is None),
        num_workers=workers, pin_memory=True, collate_fn=fast_collate, 
        sampler=train_sampler)

    val_dataset, val_sampler = create_validation_set(valdir, val_bs, sz, rect_val=rect_val, distributed=distributed)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        num_workers=workers, pin_memory=True, collate_fn=fast_collate, 
        batch_sampler=val_sampler)

    train_loader = BatchTransformDataLoader(train_loader, fp16=fp16)
    val_loader = BatchTransformDataLoader(val_loader, fp16=fp16)

    return train_loader, val_loader, train_sampler, val_sampler 
開發者ID:cybertronai,項目名稱:imagenet18_old,代碼行數:26,代碼來源:dataloader.py

示例12: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self):
        self.resize = ResizeClip(resize_shape = [2,2])
        self.crop = CropClip(0,0,0,0, crop_shape=[2,2])
        self.rand_crop = RandomCropClip(crop_shape=[2,2])
        self.cent_crop = CenterCropClip(crop_shape=[2,2])
        self.rand_flip_h = RandomFlipClip(direction='h', p=1.0)
        self.rand_flip_v = RandomFlipClip(direction='v', p=1.0)
        self.rand_rot = RandomRotateClip(angles=[90])
        self.rand_trans = RandomTranslateClip(translate=(0.5,0.5))
        self.rand_zoom  = RandomZoomClip(scale=(1.25,1.25)) 
        self.sub_mean = SubtractMeanClip(clip_mean=np.zeros(1))
        self.applypil = ApplyToPIL(transform=torchvision.transforms.ColorJitter, class_kwargs=dict(brightness=1))
        self.applypil2 = ApplyToPIL(transform=torchvision.transforms.FiveCrop, class_kwargs=dict(size=(64,64)))
        self.applytensor = ApplyToTensor(transform=torchvision.transforms.Normalize, class_kwargs=dict(mean=torch.tensor([0.,0.,0.]), std=torch.tensor([1.,1.,1.])))
        self.applycv = ApplyOpenCV(transform=cv2.threshold, class_kwargs=dict(thresh=100, maxval=100, type=cv2.THRESH_TRUNC))
        self.preproc = PreprocTransform() 
開發者ID:MichiganCOG,項目名稱:ViP,代碼行數:18,代碼來源:preprocessing_transforms.py

示例13: _lazy_import_torch

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def _lazy_import_torch(self):
        try:
            import torch
        except ImportError:
            raise ImportError('Need to install Pytorch: go to pytorch.org')
        import torchvision
        import torchvision.transforms as transforms
        import torch.nn as nn

        self.use_cuda = not self.opt.get('no_cuda', False) and torch.cuda.is_available()
        if self.use_cuda:
            print('[ Using CUDA ]')
            torch.cuda.set_device(self.opt.get('gpu', -1))
        self.torch = torch
        self.torchvision = torchvision
        self.transforms = transforms
        self.nn = nn 
開發者ID:natashamjaques,項目名稱:neural_chat,代碼行數:19,代碼來源:image_featurizers.py

示例14: main

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, 
                          momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
開發者ID:dnddnjs,項目名稱:pytorch-multigpu,代碼行數:36,代碼來源:train.py

示例15: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import transforms [as 別名]
def __init__(self, transforms):
        self.transforms = transforms 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:4,代碼來源:transforms.py


注:本文中的torchvision.transforms方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。