当前位置: 首页>>代码示例>>Python>>正文


Python transforms.ToPILImage方法代码示例

本文整理汇总了Python中torchvision.transforms.ToPILImage方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.ToPILImage方法的具体用法?Python transforms.ToPILImage怎么用?Python transforms.ToPILImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.transforms的用法示例。


在下文中一共展示了transforms.ToPILImage方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: regenerate_cache

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def regenerate_cache(self):
        """
        Resamples the big matrix and resets the counter of the total
        number of elements in the returned masks.
        """
        low_size = int(self.resolution * self.max_size)
        low_pattern = self.rng.uniform(0, 1, size=(low_size, low_size)) * 255
        low_pattern = torch.from_numpy(low_pattern.astype('float32'))
        pattern = transforms.Compose([
                        transforms.ToPILImage(),
                        transforms.Resize(self.max_size, Image.BICUBIC),
                        transforms.ToTensor(),
        ])(low_pattern[None])[0]
        pattern = torch.lt(pattern, self.density).byte()
        self.pattern = pattern.byte()
        self.points_used = 0 
开发者ID:tigvarts,项目名称:vaeac,代码行数:18,代码来源:mask_generators.py

示例2: visualize_output

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def visualize_output(img, output, templates, proc, prob_thresh=0.55, nms_thresh=0.1):
    tensor_to_image = transforms.ToPILImage()

    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    for t, m, s in zip(img[0], mean, std):
        t.mul_(s).add_(m)

    image = tensor_to_image(img[0])  # Index into the batch

    cls_map = nnfunc.sigmoid(output[:, 0:templates.shape[0], :, :]).data.cpu(
    ).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
    reg_map = output[:, templates.shape[0]:, :, :].data.cpu(
    ).numpy().transpose((0, 2, 3, 1))[0, :, :, :]

    print(np.sort(np.unique(cls_map))[::-1])
    proc.visualize_heatmaps(image, cls_map, reg_map, templates,
                            prob_thresh=prob_thresh, nms_thresh=nms_thresh)

    p = input("Continue? [Yn]")
    if p.lower().strip() == 'n':
        exit(0) 
开发者ID:varunagrawal,项目名称:tiny-faces-pytorch,代码行数:24,代码来源:trainer.py

示例3: __getitem__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def __getitem__(self, index):

        outputs = []

        for i, d in enumerate(self.datasets):

            outputs += d.get_item(self.indices[i][index], self.flips[i][index])

        self.counter += 1

        # Shuffle datasets after each epoch
        if self.counter == len(self):
            if self.phase == 'train': self.shuffle()
            self.counter = 0

        if len(outputs) == 1 and self.aligned:

            # Super resolution
            outputs[0] = ToPILImage()((outputs[0] + 1) / 2)
            outputs.insert(0, self.down(outputs[0]))
            for i, o in enumerate(outputs):
                outputs[i] = ToTensor()(o) * 2 - 1

        return outputs 
开发者ID:egorzakharov,项目名称:PerceptualGAN,代码行数:26,代码来源:dataset.py

示例4: test

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def test(model, img, sr_factor):
    model.eval()

    img = img.resize((int(img.size[0]*sr_factor), \
        int(img.size[1]*sr_factor)), resample=PIL.Image.BICUBIC)
    img.save('low_res.png')

    img = transforms.ToTensor()(img)
    img = torch.unsqueeze(img, 0)
    input = Variable(img.cuda())
    residual = model(input)
    output = input + residual

    output = output.cpu().data[0, :, :, :]
    o = output.numpy()
    o[np.where(o < 0)] = 0.0
    o[np.where(o > 1)] = 1.0
    output = torch.from_numpy(o)
    output = transforms.ToPILImage()(output) 
    output.save('zssr.png') 
开发者ID:jacobgil,项目名称:pytorch-zssr,代码行数:22,代码来源:train.py

示例5: get_imgs

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def get_imgs(img_path, imsize, bbox=None,
             transform=None, normalize=None):
    img = Image.open(img_path).convert('RGB')
    if transform is not None:
        img = transform(img)

    img, bbox_scaled = crop_imgs(img, bbox)

    ret = []
    if cfg.GAN.B_DCGAN:
        ret = [normalize(img)]
    else:
        for i in range(cfg.TREE.BRANCH_NUM):
            # print(imsize[i])
            if i < (cfg.TREE.BRANCH_NUM - 1):
                re_img = transforms.ToPILImage()(img)
                re_img = transforms.Resize((imsize[i], imsize[i]))(re_img)
            else:
                re_img = transforms.ToPILImage()(img)
            ret.append(normalize(re_img))

    return ret, bbox_scaled 
开发者ID:tohinz,项目名称:multiple-objects-gan,代码行数:24,代码来源:datasets.py

示例6: image

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def image(self, img_tensors: torch.Tensor, global_step: int, tag: str = "Train/input",
              grid_size: Union[list, tuple] = (3, 1), shuffle=True, save_file=False):

        if len(img_tensors.size()) != 4:
            raise TypeError("img_tensors rank should be 4, got %d instead" % len(img_tensors.size()))
        self._build_dir(os.path.join(self.logdir, "plots", tag))
        rows, columns = grid_size[0], grid_size[1]
        batch_size = len(img_tensors)  # img_tensors =>(batchsize, 3, 256, 256)
        num_samples: int = min(batch_size, rows * columns)
        sampled_tensor = self._sample(img_tensors, num_samples, shuffle).detach().cpu()
        # (sample_num, 3, 32,32)  tensors
        # sampled_images = map(transforms.Normalize(mean, std), sampled_tensor)  # (sample_num, 3, 32,32) images
        sampled_images: torch.Tensor = make_grid(sampled_tensor, nrow=rows, normalize=True, scale_each=True)
        self.writer.add_image(tag, sampled_images, global_step)

        if save_file:
            img = transforms.ToPILImage()(sampled_images)
            filename = "%s/plots/%s/E%03d.png" % (self.logdir, tag, global_step)
            img.save(filename) 
开发者ID:dingguanglei,项目名称:jdit,代码行数:21,代码来源:super.py

示例7: test_segmentation_pipeline

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def test_segmentation_pipeline(self):
        class DrawSquare:
            def __init__(self, side):
                self.side = side

            def __call__(self, x, **kwargs):
                x, canvas = x  # x is a [int, ndarray]
                canvas[:self.side, :self.side] = x
                return canvas

        target_trans = BaaLCompose(
            [GetCanvas(), DrawSquare(3), ToPILImage(mode=None), Resize(60, interpolation=0),
             RandomRotation(10, resample=NEAREST, fill=0.0), PILToLongTensor()])
        file_dataset = FileDataset(self.paths, [1] * len(self.paths), self.transform, target_trans)

        x, y = file_dataset[0]
        assert np.allclose(np.unique(y), [0, 1])
        assert y.shape[1:] == x.shape[1:] 
开发者ID:ElementAI,项目名称:baal,代码行数:20,代码来源:file_dataset_test.py

示例8: __init__

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def __init__(self, model, loss, resume, config, train_loader, val_loader=None, train_logger=None, prefetch=True):
        super(Trainer, self).__init__(model, loss, resume, config, train_loader, val_loader, train_logger)
        
        self.wrt_mode, self.wrt_step = 'train_', 0
        self.log_step = config['trainer'].get('log_per_iter', int(np.sqrt(self.train_loader.batch_size)))
        if config['trainer']['log_per_iter']: self.log_step = int(self.log_step / self.train_loader.batch_size) + 1

        self.num_classes = self.train_loader.dataset.num_classes

        # TRANSORMS FOR VISUALIZATION
        self.restore_transform = transforms.Compose([
            local_transforms.DeNormalize(self.train_loader.MEAN, self.train_loader.STD),
            transforms.ToPILImage()])
        self.viz_transform = transforms.Compose([
            transforms.Resize((400, 400)),
            transforms.ToTensor()])
        
        if self.device ==  torch.device('cpu'): prefetch = False
        if prefetch:
            self.train_loader = DataPrefetcher(train_loader, device=self.device)
            self.val_loader = DataPrefetcher(val_loader, device=self.device)

        torch.backends.cudnn.benchmark = True 
开发者ID:yassouali,项目名称:pytorch_segmentation,代码行数:25,代码来源:trainer.py

示例9: im_show

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def im_show(img_list):
    """
    It receives a list of images and plots them together
    :param img_list:
    :return:
    """
    to_PIL = transforms.ToPILImage()
    if len(img_list) >= 10:
        raise Exception("len(img_list) must be smaller than 10")

    for idx, img in enumerate(img_list):
        img = np.array(to_PIL(img))
        plt.subplot(100 + 10 * len(img_list) + (idx + 1))
        fig = plt.imshow(img)
        fig.axes.get_xaxis().set_visible(False)
        fig.axes.get_yaxis().set_visible(False)

    plt.show() 
开发者ID:asanakoy,项目名称:kaggle_carvana_segmentation,代码行数:20,代码来源:dataset.py

示例10: get_imgs

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def get_imgs(img_path, imsize, max_objects, bbox=None, transform=None, normalize=None):
    img = Image.open(img_path).convert('RGB')
    if transform is not None:
        img = transform(img)

    img, bbox_scaled = crop_imgs(img, bbox, max_objects=max_objects)

    ret = []
    if cfg.GAN.B_DCGAN:
        ret = [normalize(img)]
    else:
        for i in range(cfg.TREE.BRANCH_NUM):
            # print(imsize[i])
            if i < (cfg.TREE.BRANCH_NUM - 1):
                re_img = transforms.ToPILImage()(img)
                re_img = transforms.Resize((imsize[i], imsize[i]))(re_img)
            else:
                re_img = transforms.ToPILImage()(img)
            ret.append(normalize(re_img))

    return ret, bbox_scaled 
开发者ID:tohinz,项目名称:semantic-object-accuracy-for-generative-text-to-image-synthesis,代码行数:23,代码来源:datasets.py

示例11: get_dataloader

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def get_dataloader(batch_size, root="data/cifar10"):
    root = Path(root).expanduser()
    if not root.exists():
        root.mkdir()
    root = str(root)

    to_normalized_tensor = [transforms.ToTensor(),
                            transforms.ToPILImage(),
                            transforms.Resize((224, 224)),
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]
    data_augmentation = [transforms.RandomHorizontalFlip(),]

    train_loader = DataLoader(
        datasets.CIFAR10(root, train=True, download=True,
                         transform=transforms.Compose(data_augmentation + to_normalized_tensor)),
        batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(
        datasets.CIFAR10(root, train=False, transform=transforms.Compose(to_normalized_tensor)),
        batch_size=batch_size, shuffle=True)
    return train_loader, test_loader 
开发者ID:zhouyuangan,项目名称:SE_DenseNet,代码行数:23,代码来源:cifar10.py

示例12: feed_interpolated_input

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def feed_interpolated_input(self, x):
        if self.phase == 'gtrns' and floor(self.resl)>2 and floor(self.resl)<=self.max_resl:
            alpha = self.complete['gen']/100.0
            transform = transforms.Compose( [   transforms.ToPILImage(),
                                                transforms.Scale(size=int(pow(2,floor(self.resl)-1)), interpolation=0),      # 0: nearest
                                                transforms.Scale(size=int(pow(2,floor(self.resl))), interpolation=0),      # 0: nearest
                                                transforms.ToTensor(),
                                            ] )
            x_low = x.clone().add(1).mul(0.5)
            for i in range(x_low.size(0)):
                x_low[i] = transform(x_low[i]).mul(2).add(-1)
            x = torch.add(x.mul(alpha), x_low.mul(1-alpha)) # interpolated_x

        if self.use_cuda:
            return x.cuda()
        else:
            return x 
开发者ID:nashory,项目名称:pggan-pytorch,代码行数:19,代码来源:trainer.py

示例13: build_transform

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def build_transform(self):
        """
        Creates a basic transformation that was used to train the models
        """
        cfg = self.cfg

        # we are loading images with OpenCV, so we don't need to convert them
        # to BGR, they are already! So all we need to do is to normalize
        # by 255 if we want to convert to BGR255 format, or flip the channels
        # if we want it to be in RGB in [0-1] range.
        if cfg.INPUT.TO_BGR255:
            to_bgr_transform = T.Lambda(lambda x: x * 255)
        else:
            to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])

        normalize_transform = T.Normalize(
            mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
        )

        transform = T.Compose(
            [
                T.ToPILImage(),
                T.Resize(self.min_image_size),
                T.ToTensor(),
                to_bgr_transform,
                normalize_transform,
            ]
        )
        return transform 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:31,代码来源:predictor.py

示例14: add_to_confMatrix

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def add_to_confMatrix(prediction, groundtruth, confMatrix, perImageStats, nbPixels):
    if isinstance(prediction, list):   #merge multi-gpu tensors
        outputs_cpu = prediction[0].cpu()
        for i in range(1,len(outputs)):
            outputs_cpu = torch.cat((outputs_cpu, prediction[i].cpu()), 0)
    else:
        outputs_cpu = prediction.cpu()
    for i in range(0, outputs_cpu.size(0)):   #args.batch_size,evaluate iou of each batch
        prediction = ToPILImage()(outputs_cpu[i].max(0)[1].data.unsqueeze(0).byte())
        groundtruth_image = ToPILImage()(groundtruth[i].cpu().byte())
        nbPixels += evalIoU.evaluatePairPytorch(prediction, groundtruth_image, confMatrix, perImageStats, evalIoU.args) 
开发者ID:mapleneverfade,项目名称:pytorch-semantic-segmentation,代码行数:13,代码来源:eval.py

示例15: img2label

# 需要导入模块: from torchvision import transforms [as 别名]
# 或者: from torchvision.transforms import ToPILImage [as 别名]
def img2label(img,label,count):
    count+=1
    img = np.array(img)
    label = np.array(label)
    for i in range(label.shape[0]):
        for j in range(label.shape[1]):
            if label[i,j]==0:
                img[i,j,:]=0
    image = ToPILImage()(img)
    image.save('./results/imglabel_'+str(count)+'.jpg') 
开发者ID:mapleneverfade,项目名称:pytorch-semantic-segmentation,代码行数:12,代码来源:transform.py


注:本文中的torchvision.transforms.ToPILImage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。