当前位置: 首页>>代码示例>>Python>>正文


Python utils.make_grid方法代码示例

本文整理汇总了Python中torchvision.utils.make_grid方法的典型用法代码示例。如果您正苦于以下问题:Python utils.make_grid方法的具体用法?Python utils.make_grid怎么用?Python utils.make_grid使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.utils的用法示例。


在下文中一共展示了utils.make_grid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: tensor2img

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
    '''
    Converts a torch Tensor into an image Numpy array of BGR channel order
    Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
    Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
    '''
    tensor = tensor.squeeze().float().cpu().clamp_(*min_max)  # squeeze first, then clamp
    tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])  # to range [0,1]
    n_dim = tensor.dim()
    if n_dim == 4:
        n_img = len(tensor)
        img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 3:
        img_np = tensor.numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 2:
        img_np = tensor.numpy()
    else:
        raise TypeError(
            'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
    if out_type == np.uint8:
        img_np = (img_np * 255.0).round()
        # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
    return img_np.astype(out_type) 
开发者ID:cszn,项目名称:KAIR,代码行数:27,代码来源:utils_image.py

示例2: plot_images_grid

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def plot_images_grid(x: torch.tensor, export_img, title: str = '', nrow=8, padding=2, normalize=False, pad_value=0):
    """Plot 4D Tensor of images of shape (B x C x H x W) as a grid."""

    grid = make_grid(x, nrow=nrow, padding=padding, normalize=normalize, pad_value=pad_value)
    npgrid = grid.cpu().numpy()

    plt.imshow(np.transpose(npgrid, (1, 2, 0)), interpolation='nearest')

    ax = plt.gca()
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)

    if not (title == ''):
        plt.title(title)

    plt.savefig(export_img, bbox_inches='tight', pad_inches=0.1)
    plt.clf() 
开发者ID:lukasruff,项目名称:Deep-SAD-PyTorch,代码行数:19,代码来源:plot_images_grid.py

示例3: get_image

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def get_image(gen, point):
    """
    obtain an All-resolution grid of images from the given point
    :param gen: the generator object
    :param point: random latent point for generation
    :return: img => generated image
    """
    images = list(map(lambda x: x.detach(), gen(point)))[1:]
    images = [adjust_dynamic_range(image) for image in images]
    images = progressive_upscaling(images)
    images = list(map(lambda x: x.squeeze(dim=0), images))
    image = make_grid(
        images,
        nrow=int(ceil(sqrt(len(images))))
    )
    return image.cpu().numpy().transpose(1, 2, 0) 
开发者ID:akanimax,项目名称:BMSG-GAN,代码行数:18,代码来源:demo.py

示例4: train

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def train(self, data_loader, epochs, save_training_gif=True):
        if save_training_gif:
            # Fix latents to see how image generation improves during training
            fixed_latents = Variable(self.G.sample_latent(64))
            if self.use_cuda:
                fixed_latents = fixed_latents.cuda()
            training_progress_images = []

        for epoch in range(epochs):
            print("\nEpoch {}".format(epoch + 1))
            self._train_epoch(data_loader)

            if save_training_gif:
                # Generate batch of images and convert to grid
                img_grid = make_grid(self.G(fixed_latents).cpu().data)
                # Convert to numpy and transpose axes to fit imageio convention
                # i.e. (width, height, channels)
                img_grid = np.transpose(img_grid.numpy(), (1, 2, 0))
                # Add image grid to training progress
                training_progress_images.append(img_grid)

        if save_training_gif:
            imageio.mimsave('./training_{}_epochs.gif'.format(epochs),
                            training_progress_images) 
开发者ID:vandit15,项目名称:Self-Supervised-Gans-Pytorch,代码行数:26,代码来源:training.py

示例5: tensor2img

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
    '''
    Converts a torch Tensor into an image Numpy array
    Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
    Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
    '''
    tensor = tensor.squeeze().float().cpu().clamp_(*min_max)  # clamp
    tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])  # to range [0,1]
    n_dim = tensor.dim()
    if n_dim == 4:
        n_img = len(tensor)
        img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 3:
        img_np = tensor.numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 2:
        img_np = tensor.numpy()
    else:
        raise TypeError(
            'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
    if out_type == np.uint8:
        img_np = (img_np * 255.0).round()
        # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
    return img_np.astype(out_type) 
开发者ID:xinntao,项目名称:BasicSR,代码行数:27,代码来源:util.py

示例6: batch_lab2rgb_transpose_mc

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def batch_lab2rgb_transpose_mc(img_l_mc, img_ab_mc):
    if isinstance(img_l_mc, Variable):
        img_l_mc = img_l_mc.data.cpu()
    if isinstance(img_ab_mc, Variable):
        img_ab_mc = img_ab_mc.data.cpu()

    if img_l_mc.is_cuda:
        img_l_mc = img_l_mc.cpu()
    if img_ab_mc.is_cuda:
        img_ab_mc = img_ab_mc.cpu()

    assert img_l_mc.dim()==4 and img_ab_mc.dim()==4, 'only for batch input'

    img_l = img_l_mc*l_norm + l_mean
    img_ab = img_ab_mc*ab_norm + ab_mean
    pred_lab = torch.cat((img_l, img_ab), dim=1)
    grid_lab = vutils.make_grid(pred_lab).numpy().astype('float64')
    grid_rgb = (np.clip(color.lab2rgb(grid_lab.transpose((1, 2, 0))), 0, 1)*255).astype('uint8')
    return grid_rgb 
开发者ID:msracver,项目名称:Deep-Exemplar-based-Colorization,代码行数:21,代码来源:util.py

示例7: show_landmarks_batch

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def show_landmarks_batch(sample_batched):
    images_batch, landmarks_batch = sample_batched['image'], sample_batched['landmarks']
    batch_size = len(images_batch)
    im_size = images_batch.size(2)
    grid_border_size = 2

    grid = utils.make_grid(images_batch)
    plt.imshow(grid.numpy().transpose(1, 2, 0))

    for i in range(batch_size):
        plt.scatter(landmarks_batch[i, :, 0].numpy() + i * im_size + (i + 1) * grid_border_size,
                    landmarks_batch[i, :, 1].numpy() + grid_border_size,
                    s=10,
                    marker='.',
                    c='r')
        plt.title('Batch form dataloader') 
开发者ID:gutouyu,项目名称:ML_CIA,代码行数:18,代码来源:5_Data Loading And Processing.py

示例8: sample_images

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def sample_images(batches_done):
    """Saves a generated sample from the test set"""
    imgs = next(iter(val_dataloader))
    G_AB.eval()
    G_BA.eval()
    real_A = Variable(imgs["A"].type(Tensor))
    fake_B = G_AB(real_A)
    real_B = Variable(imgs["B"].type(Tensor))
    fake_A = G_BA(real_B)
    # Arange images along x-axis
    real_A = make_grid(real_A, nrow=5, normalize=True)
    real_B = make_grid(real_B, nrow=5, normalize=True)
    fake_A = make_grid(fake_A, nrow=5, normalize=True)
    fake_B = make_grid(fake_B, nrow=5, normalize=True)
    # Arange images along y-axis
    image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)
    save_image(image_grid, "images/%s/%s.png" % (opt.dataset_name, batches_done), normalize=False)


# ----------
#  Training
# ---------- 
开发者ID:eriklindernoren,项目名称:PyTorch-GAN,代码行数:24,代码来源:cyclegan.py

示例9: save_images

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def save_images(logger, mode_tag, images_dict, global_step):
    images_dict = tensor2numpy(images_dict)
    for tag, values in images_dict.items():
        if not isinstance(values, list) and not isinstance(values, tuple):
            values = [values]
        for idx, value in enumerate(values):
            if len(value.shape) == 3:
                value = value[:, np.newaxis, :, :]
            value = value[:1]
            value = torch.from_numpy(value)

            image_name = '{}/{}'.format(mode_tag, tag)
            if len(values) > 1:
                image_name = image_name + "_" + str(idx)
            logger.add_image(image_name, vutils.make_grid(value, padding=0, nrow=1, normalize=True, scale_each=True),
                             global_step) 
开发者ID:xy-guo,项目名称:GwcNet,代码行数:18,代码来源:experiment.py

示例10: plot_spikes

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def plot_spikes(
        self,
        spike_record: Dict[str, torch.Tensor],
        tag: str = "spike",
        step: int = None,
    ) -> None:
        # language=rst
        """
        Plots all spike records inside of ``spike_record``. Keeps unique
        plots for all unique tags that are given.

        :param spike_record: Dictionary of spikes to be rasterized.
        :param tag: A unique tag to associate the data with.
        :param step: The step of the pipeline.
        """
        for k, spikes in spike_record.items():
            # shuffle spikes into 1x1x#NueronsxT
            spikes = spikes.view(1, 1, -1, spikes.shape[-1]).float()
            spike_grid_img = make_grid(spikes, nrow=1, pad_value=0.5)

            self.writer.add_image(tag + "_" + str(k), spike_grid_img, step) 
开发者ID:BindsNET,项目名称:bindsnet,代码行数:23,代码来源:pipeline_analysis.py

示例11: plot_voltages

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def plot_voltages(
        self,
        voltage_record: Dict[str, torch.Tensor],
        thresholds: Optional[Dict[str, torch.Tensor]] = None,
        tag: str = "voltage",
        step: int = None,
    ) -> None:
        # language=rst
        """
        Plots all voltage records and given thresholds. Keeps unique
        plots for all unique tags that are given.

        :param voltage_record: Dictionary of voltages for neurons inside of networks
                               organized by the layer they correspond to.
        :param thresholds: Optional dictionary of threshold values for neurons.
        :param tag: A unique tag to associate the data with.
        :param step: The step of the pipeline.
        """
        for k, v in voltage_record.items():
            # Shuffle voltages into 1x1x#neuronsxT
            v = v.view(1, 1, -1, v.shape[-1])
            voltage_grid_img = make_grid(v, nrow=1, pad_value=0)

            self.writer.add_image(tag + "_" + str(k), voltage_grid_img, step) 
开发者ID:BindsNET,项目名称:bindsnet,代码行数:26,代码来源:pipeline_analysis.py

示例12: image

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def image(self, img_tensors: torch.Tensor, global_step: int, tag: str = "Train/input",
              grid_size: Union[list, tuple] = (3, 1), shuffle=True, save_file=False):

        if len(img_tensors.size()) != 4:
            raise TypeError("img_tensors rank should be 4, got %d instead" % len(img_tensors.size()))
        self._build_dir(os.path.join(self.logdir, "plots", tag))
        rows, columns = grid_size[0], grid_size[1]
        batch_size = len(img_tensors)  # img_tensors =>(batchsize, 3, 256, 256)
        num_samples: int = min(batch_size, rows * columns)
        sampled_tensor = self._sample(img_tensors, num_samples, shuffle).detach().cpu()
        # (sample_num, 3, 32,32)  tensors
        # sampled_images = map(transforms.Normalize(mean, std), sampled_tensor)  # (sample_num, 3, 32,32) images
        sampled_images: torch.Tensor = make_grid(sampled_tensor, nrow=rows, normalize=True, scale_each=True)
        self.writer.add_image(tag, sampled_images, global_step)

        if save_file:
            img = transforms.ToPILImage()(sampled_images)
            filename = "%s/plots/%s/E%03d.png" % (self.logdir, tag, global_step)
            img.save(filename) 
开发者ID:dingguanglei,项目名称:jdit,代码行数:21,代码来源:super.py

示例13: show_segmentation

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def show_segmentation(img, gt, pred, mean, std, colormap):
    colormap = colormap.to(img.device)
    gt = F.embedding(gt, colormap).permute(2, 0, 1).div(255)
    pred = F.embedding(pred, colormap).permute(2, 0, 1).div(255)
    mean = torch.as_tensor(mean, dtype=torch.float32, device=img.device)
    std = torch.as_tensor(std, dtype=torch.float32, device=img.device)
    img = img * std[:, None, None] + mean[:, None, None]
    grid = torch.stack([img, gt, pred], 0)
    grid = make_grid(grid, nrow=3)
    grid = (
        grid.mul_(255)
        .add_(0.5)
        .clamp_(0, 255)
        .permute(1, 2, 0)
        .to('cpu', torch.uint8)
        .numpy()
    )
    img = Image.fromarray(grid)

    return img 
开发者ID:rosinality,项目名称:ocr-pytorch,代码行数:22,代码来源:util.py

示例14: sample

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def sample(self, z, y_onehot, eps_std=0.5):
        """
        Sample image

        :param z: latent feature vector
        :type z: torch.Tensor or None
        :param y_onehot: one-hot vector of label
        :type y_onehot: torch.Tensor or None
        :param eps_std: standard deviation of eps
        :type eps_std: float
        :return: generated image
        :rtype: torch.Tensor
        """
        with torch.no_grad():
            # generate sample from model
            img = self.graph(z=z, y_onehot=y_onehot, eps_std=eps_std, reverse=True)

            # create image grid
            grid = make_grid(img)

            return grid 
开发者ID:corenel,项目名称:pytorch-glow,代码行数:23,代码来源:inferer.py

示例15: get_filter_images

# 需要导入模块: from torchvision import utils [as 别名]
# 或者: from torchvision.utils import make_grid [as 别名]
def get_filter_images(self):
        """
        Generate a grid of images representing the convolution layer weights
        :return: list of images
        """
        images = []
        x = 0
        for mod in self.conv:
            if type(mod) == nn.modules.conv.Conv2d:
                orig_shape = mod.weight.data.shape
                weights = mod.weight.data.view(
                    [orig_shape[0] * orig_shape[1], orig_shape[2], orig_shape[3]]).unsqueeze(1)
                rows = 2 ** math.ceil(math.sqrt(math.sqrt(weights.shape[0])))
                images.append(("CNN.{}".format(x),
                               vutils.make_grid(weights, nrow=rows, padding=1, normalize=True, scale_each=True)))
            x += 1
        return images 
开发者ID:ryanleary,项目名称:patter,代码行数:19,代码来源:deepspeech.py


注:本文中的torchvision.utils.make_grid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。