當前位置: 首頁>>代碼示例>>Python>>正文


Python torchvision.utils方法代碼示例

本文整理匯總了Python中torchvision.utils方法的典型用法代碼示例。如果您正苦於以下問題:Python torchvision.utils方法的具體用法?Python torchvision.utils怎麽用?Python torchvision.utils使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision的用法示例。


在下文中一共展示了torchvision.utils方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_test_imgs

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import utils [as 別名]
def get_test_imgs(args):


    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((args.resize, args.resize)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    domA_test = CustomDataset(os.path.join(args.root, 'testA.txt'), transform=transform)
    domB_test = CustomDataset(os.path.join(args.root, 'testB.txt'), transform=transform)

    domA_test_loader = torch.utils.data.DataLoader(domA_test, batch_size=64,
                                                   shuffle=False, num_workers=0)
    domB_test_loader = torch.utils.data.DataLoader(domB_test, batch_size=64,
                                                   shuffle=False, num_workers=0)

    for domA_img in domA_test_loader:
        if torch.cuda.is_available():
            domA_img = domA_img.cuda()
        domA_img = domA_img.view((-1, 3, args.resize, args.resize))
        domA_img = domA_img[:]
        break

    for domB_img in domB_test_loader:
        if torch.cuda.is_available():
            domB_img = domB_img.cuda()
        domB_img = domB_img.view((-1, 3, args.resize, args.resize))
        domB_img = domB_img[:]
        break

    return domA_img, domB_img 
開發者ID:rmokady,項目名稱:mbu-content-tansfer,代碼行數:34,代碼來源:mask_utils.py

示例2: cluster_acc

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import utils [as 別名]
def cluster_acc(Y_pred, Y):
  from sklearn.utils.linear_assignment_ import linear_assignment
  assert Y_pred.size == Y.size
  D = max(Y_pred.max(), Y.max())+1
  w = np.zeros((D,D), dtype=np.int64)
  for i in range(Y_pred.size):
    w[Y_pred[i], Y[i]] += 1
  ind = linear_assignment(w.max() - w)
  return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w 
開發者ID:eelxpeng,項目名稱:UnsupervisedDeepLearning-Pytorch,代碼行數:11,代碼來源:vade.py

示例3: removal

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import utils [as 別名]
def removal(args, e1, e2, d_a, d_b):

    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((args.resize, args.resize)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    if args.eval_folder != '':

        class Faces(data.Dataset):
            """Faces."""

            def __init__(self, root_dir, transform, size, ext):
                self.root_dir = root_dir
                self.transform = transform
                self.size = size
                self.ext = ext
                self.files = [f for f in os.listdir(root_dir) if f.endswith(ext)]

            def __len__(self):
                return self.size  # number of images

            def __getitem__(self, idx):
                img_name = os.path.join(self.root_dir, self.files[idx])
                image = Image.open(img_name)
                sample = self.transform(image)
                return sample

        test_data = Faces(args.eval_folder, transform, args.amount, args.ext)
        domA_test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=args.bs, shuffle=False)
    else:
        domA_test = CustomDataset(os.path.join(args.root, 'testA.txt'), transform=transform)
        domA_test_loader = torch.utils.data.DataLoader(domA_test, batch_size=args.bs, shuffle=False)

    cnt = 0
    for test_domA in domA_test_loader:
        if torch.cuda.is_available():
            test_domA = test_domA.cuda()
        else:
            test_domA = test_domA

        test_domA = test_domA.view((-1, 3, args.resize, args.resize))
        for i in range(args.bs):
            separate_A = e2(test_domA[i].unsqueeze(0))
            common_A = e1(test_domA[i].unsqueeze(0))
            A_encoding = torch.cat([common_A, separate_A], dim=1)
            A_decoding = d_a(A_encoding)
            BA_decoding, mask = d_b(A_encoding, test_domA[i], A_decoding, args.threshold)

            exps = torch.cat([test_domA[i].unsqueeze(0), BA_decoding], 0)
            vutils.save_image(exps, '%s/%0d.png' % (args.out, cnt), normalize=True)
            print(cnt)
            cnt += 1
            if cnt == args.amount:
                break 
開發者ID:rmokady,項目名稱:mbu-content-tansfer,代碼行數:58,代碼來源:mask_utils.py

示例4: __init__

# 需要導入模塊: import torchvision [as 別名]
# 或者: from torchvision import utils [as 別名]
def __init__(self):
			super(DCGAN_D, self).__init__()
			main = torch.nn.Sequential()

			### Start block
			# Size = n_colors x image_size x image_size
			if param.spectral:
				main.add_module('Start-SpectralConv2d', torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=1, bias=False)))
			else:
				main.add_module('Start-Conv2d', torch.nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=1, bias=False))
			if param.SELU:
				main.add_module('Start-SELU', torch.nn.SELU(inplace=True))
			else:
				if param.Tanh_GD:
					main.add_module('Start-Tanh', torch.nn.Tanh())
				else:
					main.add_module('Start-LeakyReLU', Activation())
			image_size_new = param.image_size // 2
			# Size = D_h_size x image_size/2 x image_size/2

			### Middle block (Done until we reach ? x 4 x 4)
			mult = 1
			ii = 0
			while image_size_new > 4:
				if param.spectral:
					main.add_module('Middle-SpectralConv2d [%d]' % ii, torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.D_h_size * mult, param.D_h_size * (2*mult), kernel_size=4, stride=2, padding=1, bias=False)))
				else:
					main.add_module('Middle-Conv2d [%d]' % ii, torch.nn.Conv2d(param.D_h_size * mult, param.D_h_size * (2*mult), kernel_size=4, stride=2, padding=1, bias=False))
				if param.SELU:
					main.add_module('Middle-SELU [%d]' % ii, torch.nn.SELU(inplace=True))
				else:
					if not param.no_batch_norm_D and not param.spectral:
						main.add_module('Middle-BatchNorm2d [%d]' % ii, torch.nn.BatchNorm2d(param.D_h_size * (2*mult)))
					if param.Tanh_GD:
						main.add_module('Start-Tanh [%d]' % ii, torch.nn.Tanh())
					else:
						main.add_module('Middle-LeakyReLU [%d]' % ii, Activation())
				# Size = (D_h_size*(2*i)) x image_size/(2*i) x image_size/(2*i)
				image_size_new = image_size_new // 2
				mult *= 2
				ii += 1

			### End block
			# Size = (D_h_size * mult) x 4 x 4
			if param.spectral:
				main.add_module('End-SpectralConv2d', torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0, bias=False)))
			else:
				main.add_module('End-Conv2d', torch.nn.Conv2d(param.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
			# Size = 1 x 1 x 1 (Is a real cat or not?)
			self.main = main 
開發者ID:AlexiaJM,項目名稱:MaximumMarginGANs,代碼行數:52,代碼來源:GAN.py


注:本文中的torchvision.utils方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。