當前位置: 首頁>>代碼示例>>Python>>正文


Python functional.to_pil_image方法代碼示例

本文整理匯總了Python中torchvision.transforms.functional.to_pil_image方法的典型用法代碼示例。如果您正苦於以下問題:Python functional.to_pil_image方法的具體用法?Python functional.to_pil_image怎麽用?Python functional.to_pil_image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.transforms.functional的用法示例。


在下文中一共展示了functional.to_pil_image方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: process

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def process(eval_img, device='cpu'):
    (img, origin, unpadder), file_name = eval_img
    with torch.no_grad():
        out = model(img.to(device))

    prob = F.sigmoid(out)
    mask = prob > 0.5
    mask = torch.nn.MaxPool2d(kernel_size=(3, 3), padding=(1, 1), stride=1)(mask.float()).byte()
    mask = unpadder(mask)
    mask = mask.float().cpu()

    save_image(mask, file_name + ' _mask.jpg')
    origin_np = np.array(to_pil_image(origin[0]))
    mask_np = to_pil_image(mask[0]).convert("L")
    mask_np = np.array(mask_np, dtype='uint8')
    mask_np = draw_bounding_box(origin_np, mask_np, 500)
    mask_ = Image.fromarray(mask_np)
    mask_.save(file_name + "_contour.jpg")
    # ret, mask_np = cv2.threshold(mask_np, 127, 255, 0)
    # dst = cv2.inpaint(origin_np, mask_np, 1, cv2.INPAINT_NS)
    # out = Image.fromarray(dst)
    # out.save(file_name + ' _box.jpg') 
開發者ID:yu45020,項目名稱:Text_Segmentation_Image_Inpainting,代碼行數:24,代碼來源:demo_segmentation.py

示例2: test_transform_goturn

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def test_transform_goturn(self):
        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        transform = TransformGOTURN()
        dataset = Pairwise(
            base_dataset, transform, pairs_per_video=1,
            frame_range=1, causal=True)
        self.assertGreater(len(dataset), 0)

        for crop_z, crop_x, labels in dataset:
            self.assertEqual(crop_z.size(), crop_x.size())

        if self.visualize:
            for t in range(10):
                crop_z, crop_x, labels = random.choice(dataset)
                mean_color = torch.tensor(
                    transform.mean_color).float().view(3, 1, 1)
                crop_z = F.to_pil_image((crop_z + mean_color) / 255.0)
                crop_x = F.to_pil_image((crop_x + mean_color) / 255.0)
                labels = labels.cpu().numpy()
                labels *= transform.out_size / transform.label_scale_factor

                bndbox = np.concatenate([
                    labels[:2], labels[2:] - labels[:2]])
                show_frame(crop_x, bndbox, fig_n=1, pause=1) 
開發者ID:huanglianghua,項目名稱:open-vot,代碼行數:26,代碼來源:test_transform_goturn.py

示例3: inference

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def inference():
    model.eval()
    print("Begin inference on {} {}.".format(args.dataset, args.split))
    for data in tqdm(dataloader):
        images = [frame['image'].to(device) for frame in data]
        with torch.no_grad():
            preds = model(images)
            preds = [torch.sigmoid(pred) for pred in preds]
        # save predicted saliency maps
        for i, pred_ in enumerate(preds):
            for j, pred in enumerate(pred_.detach().cpu()):
                dataset = data[i]['dataset'][j]
                image_id = data[i]['image_id'][j]
                height = data[i]['height'].item()
                width = data[i]['width'].item()
                result_path = os.path.join(args.results_folder, "{}/{}.png".format(dataset, image_id))

                result = TF.to_pil_image(pred)
                result = result.resize((height, width))
                dirname = os.path.dirname(result_path)
                if not os.path.exists(dirname):
                    os.makedirs(dirname)
                result.save(result_path) 
開發者ID:Kinpzz,項目名稱:RCRNet-Pytorch,代碼行數:25,代碼來源:inference.py

示例4: test_tta

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def test_tta():
    img_f = os.path.join(settings.TEST_IMG_DIR, '0c2637aa9.jpg')
    img = Image.open(img_f)
    img = img.convert('RGB')

    tta_index = 7
    trans1 = TTATransform(tta_index)
    img = trans1(img)
    #img.show()

    img_np = np.array(img)
    img_np = np.expand_dims(img_np, 0)
    print(img_np.shape)
    img_np = tta_back_mask_np(img_np, tta_index)
    img_np = np.reshape(img_np, (768, 768, 3))
    img_back = F.to_pil_image(img_np)
    img_back.show() 
開發者ID:microsoft,項目名稱:nni,代碼行數:19,代碼來源:augmentation.py

示例5: plot_samples

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def plot_samples(images, targets, ignore_index=None):
    # Unnormalize image
    nb_samples = 4
    _, axes = plt.subplots(2, nb_samples, figsize=(20, 5))
    for idx in range(nb_samples):
        img = images[idx]
        img *= torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
        img += torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
        img = F.to_pil_image(img)
        target = targets[idx]
        if isinstance(ignore_index, int):
            target[target == ignore_index] = 0

        axes[0][idx].imshow(img)
        axes[0][idx].axis('off')
        axes[1][idx].imshow(target)
        axes[1][idx].axis('off')
    plt.show() 
開發者ID:frgfm,項目名稱:Holocron,代碼行數:20,代碼來源:train.py

示例6: plot_samples

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def plot_samples(images, targets):
    # Unnormalize image
    nb_samples = 4
    _, axes = plt.subplots(1, nb_samples, figsize=(20, 5))
    for idx in range(nb_samples):
        img = images[idx]
        img *= torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
        img += torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
        img = F.to_pil_image(img)

        axes[idx].imshow(img)
        axes[idx].axis('off')
        for box, label in zip(targets[idx]['boxes'], targets[idx]['labels']):
            xmin = int(box[0] * images[idx].shape[-1])
            ymin = int(box[1] * images[idx].shape[-2])
            xmax = int(box[2] * images[idx].shape[-1])
            ymax = int(box[3] * images[idx].shape[-2])

            rect = Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
                             linewidth=2, edgecolor='lime', facecolor='none')
            axes[idx].add_patch(rect)
            axes[idx].text(xmin, ymin, classes[label.item()], color='lime', fontsize=12)

    plt.show() 
開發者ID:frgfm,項目名稱:Holocron,代碼行數:26,代碼來源:train.py

示例7: __call__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def __call__(self, image, mask):
        # transforming to PIL image
        image, mask = F.to_pil_image(image), F.to_pil_image(mask)

        # random crop
        if self.crop:
            i, j, h, w = T.RandomCrop.get_params(image, self.crop)
            image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)

        if np.random.rand() < self.p_flip:
            image, mask = F.hflip(image), F.hflip(mask)

        # color transforms || ONLY ON IMAGE
        if self.color_jitter_params:
            image = self.color_tf(image)

        # random affine transform
        if np.random.rand() < self.p_random_affine:
            affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop)
            image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)

        # transforming to tensor
        image = F.to_tensor(image)
        if not self.long_mask:
            mask = F.to_tensor(mask)
        else:
            mask = to_long_tensor(mask)

        return image, mask 
開發者ID:cosmic-cortex,項目名稱:pytorch-UNet,代碼行數:31,代碼來源:dataset.py

示例8: __getitem__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def __getitem__(self, index):
        name = self.img_names[index]

        # LOAD IMG, POINT, and ROI
        image = imread(os.path.join(self.path, name + ".jpg"))
        points = imread(os.path.join(self.path, name + "dots.png"))[:,:,:1].clip(0,1)
        roi = loadmat(os.path.join(self.path, name + "mask.mat"))["BW"][:,:,np.newaxis]
        
        # LOAD IMG AND POINT
        image = image * roi
        image = hu.shrink2roi(image, roi)
        points = hu.shrink2roi(points, roi).astype("uint8")

        counts = torch.LongTensor(np.array([int(points.sum())]))   
        
        collection = list(map(FT.to_pil_image, [image, points]))
        image, points = transformers.apply_transform(self.split, image, points, 
                   transform_name=self.exp_dict['dataset']['transform'])
            
        return {"images":image, 
                "points":points.squeeze(), 
                "counts":counts, 
                'meta':{"index":index}} 
開發者ID:ElementAI,項目名稱:LCFCN,代碼行數:25,代碼來源:trancos.py

示例9: __getitem__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def __getitem__(self, index):        
        name = self.img_names[index]
      
        # LOAD IMG, POINT, and ROI
        image = imread(os.path.join(self.path, "images", name))
        if image.ndim == 2:
            image = image[:,:,None].repeat(3,2)
        pointList = hu.load_mat(os.path.join(self.path, 
                        "ground-truth", 
          "GT_" + name.replace(".jpg", "") +".mat"))
        pointList = pointList["image_info"][0][0][0][0][0] 
        
        points = np.zeros(image.shape[:2], "uint8")[:,:,None]
        H, W = image.shape[:2]
        for x, y in pointList:
            points[min(int(y), H-1), min(int(x), W-1)] = 1

        counts = torch.LongTensor(np.array([pointList.shape[0]]))

        collection = list(map(FT.to_pil_image, [image, points]))
        image, points = transformers.apply_transform(self.split, image, points, 
                   transform_name=self.exp_dict['dataset']['transform'])
            
        return {"images":image, 
                "points":points.squeeze(), 
                "counts":counts, 
                'meta':{"index":index}} 
開發者ID:ElementAI,項目名稱:LCFCN,代碼行數:29,代碼來源:shanghai.py

示例10: apply

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def apply(image_path, model_name, model_path):
  transformer = ut.ComposeJoint(
                    [
                         [transforms.ToTensor(), None],
                         [transforms.Normalize(*ut.mean_std), None],
                         [None,  ut.ToLong() ]
                    ])  

  # Load best model
  model = model_dict[model_name](n_classes=2).cuda()
  model.load_state_dict(torch.load(model_path))

  # Read Image
  image_raw = imread(image_path)
  collection = list(map(FT.to_pil_image, [image_raw, image_raw]))
  image, _ = transformer(collection)

  batch = {"images":image[None]}
  
  # Make predictions
  pred_blobs = model.predict(batch, method="blobs").squeeze()
  pred_counts = int(model.predict(batch, method="counts").ravel()[0])

  # Save Output
  save_path = image_path + "_blobs_count:{}.png".format(pred_counts)

  imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs))
  print("| Counts: {}\n| Output saved in: {}".format(pred_counts, save_path)) 
開發者ID:ElementAI,項目名稱:LCFCN,代碼行數:30,代碼來源:test_on_image.py

示例11: __getitem__

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def __getitem__(self, index):
        """Retrieves image from folder and corrupts it."""

        # Use converged image, if requested
        if self.clean_targets:
            target = self.reference
        else:
            target_fname = self.imgs[index].replace('render', 'target')
            file_ext = '.exr' if self.hdr_targets else '.png'
            target_fname = os.path.splitext(target_fname)[0] + file_ext
            target_path = os.path.join(self.root_dir, 'target', target_fname)
            if self.hdr_targets:
                target = tvF.to_pil_image(load_hdr_as_tensor(target_path))
            else:
                target = Image.open(target_path).convert('RGB')

        # Get buffers
        render_path = os.path.join(self.root_dir, 'render', self.imgs[index])
        albedo_path = os.path.join(self.root_dir, 'albedo', self.albedos[index])
        normal_path =  os.path.join(self.root_dir, 'normal', self.normals[index])

        if self.hdr_buffers:
            render = tvF.to_pil_image(load_hdr_as_tensor(render_path))
            albedo = tvF.to_pil_image(load_hdr_as_tensor(albedo_path))
            normal = tvF.to_pil_image(load_hdr_as_tensor(normal_path))
        else:
            render = Image.open(render_path).convert('RGB')
            albedo = Image.open(albedo_path).convert('RGB')
            normal = Image.open(normal_path).convert('RGB')

        # Crop
        if self.crop_size != 0:
            buffers = [render, albedo, normal, target]
            buffers = [tvF.to_tensor(b) for b in self._random_crop(buffers)]

        # Stack buffers to create input volume
        source = torch.cat(buffers[:3], dim=0)
        target = buffers[3]

        return source, target 
開發者ID:joeylitalien,項目名稱:noise2noise-pytorch,代碼行數:42,代碼來源:datasets.py

示例12: create_montage

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def create_montage(img_name, noise_type, save_path, source_t, denoised_t, clean_t, show):
    """Creates montage for easy comparison."""

    fig, ax = plt.subplots(1, 3, figsize=(9, 3))
    fig.canvas.set_window_title(img_name.capitalize()[:-4])

    # Bring tensors to CPU
    source_t = source_t.cpu().narrow(0, 0, 3)
    denoised_t = denoised_t.cpu()
    clean_t = clean_t.cpu()
    
    source = tvF.to_pil_image(source_t)
    denoised = tvF.to_pil_image(torch.clamp(denoised_t, 0, 1))
    clean = tvF.to_pil_image(clean_t)

    # Build image montage
    psnr_vals = [psnr(source_t, clean_t), psnr(denoised_t, clean_t)]
    titles = ['Input: {:.2f} dB'.format(psnr_vals[0]),
              'Denoised: {:.2f} dB'.format(psnr_vals[1]),
              'Ground truth']
    zipped = zip(titles, [source, denoised, clean])
    for j, (title, img) in enumerate(zipped):
        ax[j].imshow(img)
        ax[j].set_title(title)
        ax[j].axis('off')

    # Open pop up window, if requested
    if show > 0:
        plt.show()

    # Save to files
    fname = os.path.splitext(img_name)[0]
    source.save(os.path.join(save_path, f'{fname}-{noise_type}-noisy.png'))
    denoised.save(os.path.join(save_path, f'{fname}-{noise_type}-denoised.png'))
    fig.savefig(os.path.join(save_path, f'{fname}-{noise_type}-montage.png'), bbox_inches='tight') 
開發者ID:joeylitalien,項目名稱:noise2noise-pytorch,代碼行數:37,代碼來源:utils.py

示例13: _instance_process

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def _instance_process(self, pic_list, params):
        if isinstance(pic_list, np.ndarray):
            if pic_list.ndim == 3:
                return self.to_pil_image(pic_list)
            elif pic_list.ndim == 4:
                return [self.to_pil_image(pic_i) for pic_i in range(pic_list.shape[0])]
            else:
                raise TypeError
        raise TypeError 
開發者ID:yolomax,項目名稱:person-reid-lib,代碼行數:11,代碼來源:transforms.py

示例14: to_pil_image

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def to_pil_image(self, pic):
        if pic.shape[2] == 3:
            return ImageData(F.to_pil_image(pic, self.mode))
        elif pic.shape[2] == 1:
            return ImageData(F.to_pil_image(pic))
        elif pic.shape[2] == 5:
            if self.use_flow:
                pic_rgb = F.to_pil_image(pic[..., :3], self.mode)
                pic_x = F.to_pil_image(pic[..., 3:4])
                pic_y = F.to_pil_image(pic[..., 4:5])
                return ImageData(pic_rgb, pic_x, pic_y)
            else:
                return ImageData(F.to_pil_image(pic[..., :3], self.mode))
        else:
            raise ValueError 
開發者ID:yolomax,項目名稱:person-reid-lib,代碼行數:17,代碼來源:transforms.py

示例15: convert_cell_to_img

# 需要導入模塊: from torchvision.transforms import functional [as 別名]
# 或者: from torchvision.transforms.functional import to_pil_image [as 別名]
def convert_cell_to_img(t, padding=16):
    """Converts pytorch tensor into a Pillow Image. The padding will be removed
    from the resulting image"""
    std = torch.Tensor([0.229, 0.224, 0.225]).reshape(-1, 1, 1)
    mu = torch.Tensor([0.485, 0.456, 0.406]).reshape(-1, 1, 1)
    output = t.mul(std)
    output.add_(mu)
    img = to_pil_image(output)
    w, h = img.size
    return img.crop((padding, padding, w - padding, h - padding)) 
開發者ID:skorch-dev,項目名稱:skorch,代碼行數:12,代碼來源:utils.py


注:本文中的torchvision.transforms.functional.to_pil_image方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。