当前位置: 首页>>代码示例>>Python>>正文


Python torchvision.utils方法代码示例

本文整理汇总了Python中torchvision.utils方法的典型用法代码示例。如果您正苦于以下问题:Python torchvision.utils方法的具体用法?Python torchvision.utils怎么用?Python torchvision.utils使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision的用法示例。


在下文中一共展示了torchvision.utils方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_test_imgs

# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import utils [as 别名]
def get_test_imgs(args):


    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((args.resize, args.resize)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    domA_test = CustomDataset(os.path.join(args.root, 'testA.txt'), transform=transform)
    domB_test = CustomDataset(os.path.join(args.root, 'testB.txt'), transform=transform)

    domA_test_loader = torch.utils.data.DataLoader(domA_test, batch_size=64,
                                                   shuffle=False, num_workers=0)
    domB_test_loader = torch.utils.data.DataLoader(domB_test, batch_size=64,
                                                   shuffle=False, num_workers=0)

    for domA_img in domA_test_loader:
        if torch.cuda.is_available():
            domA_img = domA_img.cuda()
        domA_img = domA_img.view((-1, 3, args.resize, args.resize))
        domA_img = domA_img[:]
        break

    for domB_img in domB_test_loader:
        if torch.cuda.is_available():
            domB_img = domB_img.cuda()
        domB_img = domB_img.view((-1, 3, args.resize, args.resize))
        domB_img = domB_img[:]
        break

    return domA_img, domB_img 
开发者ID:rmokady,项目名称:mbu-content-tansfer,代码行数:34,代码来源:mask_utils.py

示例2: cluster_acc

# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import utils [as 别名]
def cluster_acc(Y_pred, Y):
  from sklearn.utils.linear_assignment_ import linear_assignment
  assert Y_pred.size == Y.size
  D = max(Y_pred.max(), Y.max())+1
  w = np.zeros((D,D), dtype=np.int64)
  for i in range(Y_pred.size):
    w[Y_pred[i], Y[i]] += 1
  ind = linear_assignment(w.max() - w)
  return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w 
开发者ID:eelxpeng,项目名称:UnsupervisedDeepLearning-Pytorch,代码行数:11,代码来源:vade.py

示例3: removal

# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import utils [as 别名]
def removal(args, e1, e2, d_a, d_b):

    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((args.resize, args.resize)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    if args.eval_folder != '':

        class Faces(data.Dataset):
            """Faces."""

            def __init__(self, root_dir, transform, size, ext):
                self.root_dir = root_dir
                self.transform = transform
                self.size = size
                self.ext = ext
                self.files = [f for f in os.listdir(root_dir) if f.endswith(ext)]

            def __len__(self):
                return self.size  # number of images

            def __getitem__(self, idx):
                img_name = os.path.join(self.root_dir, self.files[idx])
                image = Image.open(img_name)
                sample = self.transform(image)
                return sample

        test_data = Faces(args.eval_folder, transform, args.amount, args.ext)
        domA_test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=args.bs, shuffle=False)
    else:
        domA_test = CustomDataset(os.path.join(args.root, 'testA.txt'), transform=transform)
        domA_test_loader = torch.utils.data.DataLoader(domA_test, batch_size=args.bs, shuffle=False)

    cnt = 0
    for test_domA in domA_test_loader:
        if torch.cuda.is_available():
            test_domA = test_domA.cuda()
        else:
            test_domA = test_domA

        test_domA = test_domA.view((-1, 3, args.resize, args.resize))
        for i in range(args.bs):
            separate_A = e2(test_domA[i].unsqueeze(0))
            common_A = e1(test_domA[i].unsqueeze(0))
            A_encoding = torch.cat([common_A, separate_A], dim=1)
            A_decoding = d_a(A_encoding)
            BA_decoding, mask = d_b(A_encoding, test_domA[i], A_decoding, args.threshold)

            exps = torch.cat([test_domA[i].unsqueeze(0), BA_decoding], 0)
            vutils.save_image(exps, '%s/%0d.png' % (args.out, cnt), normalize=True)
            print(cnt)
            cnt += 1
            if cnt == args.amount:
                break 
开发者ID:rmokady,项目名称:mbu-content-tansfer,代码行数:58,代码来源:mask_utils.py

示例4: __init__

# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import utils [as 别名]
def __init__(self):
			super(DCGAN_D, self).__init__()
			main = torch.nn.Sequential()

			### Start block
			# Size = n_colors x image_size x image_size
			if param.spectral:
				main.add_module('Start-SpectralConv2d', torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=1, bias=False)))
			else:
				main.add_module('Start-Conv2d', torch.nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=1, bias=False))
			if param.SELU:
				main.add_module('Start-SELU', torch.nn.SELU(inplace=True))
			else:
				if param.Tanh_GD:
					main.add_module('Start-Tanh', torch.nn.Tanh())
				else:
					main.add_module('Start-LeakyReLU', Activation())
			image_size_new = param.image_size // 2
			# Size = D_h_size x image_size/2 x image_size/2

			### Middle block (Done until we reach ? x 4 x 4)
			mult = 1
			ii = 0
			while image_size_new > 4:
				if param.spectral:
					main.add_module('Middle-SpectralConv2d [%d]' % ii, torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.D_h_size * mult, param.D_h_size * (2*mult), kernel_size=4, stride=2, padding=1, bias=False)))
				else:
					main.add_module('Middle-Conv2d [%d]' % ii, torch.nn.Conv2d(param.D_h_size * mult, param.D_h_size * (2*mult), kernel_size=4, stride=2, padding=1, bias=False))
				if param.SELU:
					main.add_module('Middle-SELU [%d]' % ii, torch.nn.SELU(inplace=True))
				else:
					if not param.no_batch_norm_D and not param.spectral:
						main.add_module('Middle-BatchNorm2d [%d]' % ii, torch.nn.BatchNorm2d(param.D_h_size * (2*mult)))
					if param.Tanh_GD:
						main.add_module('Start-Tanh [%d]' % ii, torch.nn.Tanh())
					else:
						main.add_module('Middle-LeakyReLU [%d]' % ii, Activation())
				# Size = (D_h_size*(2*i)) x image_size/(2*i) x image_size/(2*i)
				image_size_new = image_size_new // 2
				mult *= 2
				ii += 1

			### End block
			# Size = (D_h_size * mult) x 4 x 4
			if param.spectral:
				main.add_module('End-SpectralConv2d', torch.nn.utils.spectral_norm(torch.nn.Conv2d(param.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0, bias=False)))
			else:
				main.add_module('End-Conv2d', torch.nn.Conv2d(param.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
			# Size = 1 x 1 x 1 (Is a real cat or not?)
			self.main = main 
开发者ID:AlexiaJM,项目名称:MaximumMarginGANs,代码行数:52,代码来源:GAN.py


注:本文中的torchvision.utils方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。