当前位置: 首页>>代码示例>>Python>>正文


Python cv2.INTER_NEAREST属性代码示例

本文整理汇总了Python中cv2.INTER_NEAREST属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.INTER_NEAREST属性的具体用法?Python cv2.INTER_NEAREST怎么用?Python cv2.INTER_NEAREST使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.INTER_NEAREST属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: resize

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def resize(video, size, interpolation):
  if interpolation == 'bilinear':
    inter = cv2.INTER_LINEAR
  elif interpolation == 'nearest':
    inter = cv2.INTER_NEAREST
  else:
    raise NotImplementedError

  shape = video.shape[:-3]
  video = video.reshape((-1, *video.shape[-3:]))
  resized_video = np.zeros((video.shape[0], size[1], size[0], video.shape[-1]))
  for i in range(video.shape[0]):
    img = cv2.resize(video[i], size, inter)
    if len(img.shape) == 2:
      img = img[:, :, np.newaxis]
    resized_video[i] = img
  return resized_video.reshape((*shape, size[1], size[0], video.shape[-1])) 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:19,代码来源:video_transforms.py

示例2: parsing_on_boxes

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[ymin:ymax, xmin:xmax]
        resized_parsing = cv2.resize(
            cropped_parsing,
            (heatmap_size[1], heatmap_size[0]),
            interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:20,代码来源:parsing.py

示例3: grid_batch_images

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def grid_batch_images(self, images):
        n, h, w, c = images.shape
        a = int(math.floor(np.sqrt(n)))
        # images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
        images = images.astype(np.uint8)
        images_in_square = np.reshape(images[:a * a], (a, a, h, w, c))
        new_img = np.zeros((h * a, w * a, c), dtype=np.uint8)
        for col_i, col_images in enumerate(images_in_square):
            for row_i, image in enumerate(col_images):
                new_img[col_i * h: (1 + col_i) * h, row_i * w: (1 + row_i) * w] = image
        resolution = self.cfg.resolution
        if self.cfg.resolution != h:
            scale = resolution / h
            new_img = cv2.resize(new_img, None, fx=scale, fy=scale,
                                 interpolation=cv2.INTER_NEAREST)
        return new_img 
开发者ID:preritj,项目名称:progressive_growing_of_GANs,代码行数:18,代码来源:utils.py

示例4: spline_transform_multi

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def spline_transform_multi(img, mask):
    bimask=mask>0
    M,N=np.where(bimask)
    w=np.ptp(N)+1
    h=np.ptp(M)+1
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask
    y,x=np.where(bound>0)

    if x.size>4:
        newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5)

        new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR)
        new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST)
    elif x.size>0:
        new_img=img
        new_msk=mask
    return new_img,new_msk 
开发者ID:yelantingfeng,项目名称:pyLucid,代码行数:20,代码来源:lucidDream.py

示例5: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def __call__(self, sample):

        # Fixed range of scales
        sc = self.scales[random.randint(0, len(self.scales) - 1)]

        for elem in sample.keys():
            if 'fname' in elem:
                continue
            tmp = sample[elem]

            if tmp.ndim == 2:
                flagval = cv2.INTER_NEAREST
            else:
                flagval = cv2.INTER_CUBIC

            tmp = cv2.resize(tmp, None, fx=sc, fy=sc, interpolation=flagval)

            sample[elem] = tmp

        return sample 
开发者ID:omkar13,项目名称:MaskTrack,代码行数:22,代码来源:custom_transforms.py

示例6: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]
        if self.f_scale != 1:
            image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation=cv2.INTER_LINEAR)
            label = cv2.resize(label, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_NEAREST)

        label[label == 11] = self.ignore_label

        image = np.asarray(image, np.float32)

        if self.rgb:
            image = image[:, :, ::-1]  ## BGR -> RGB
            image /= 255  ## using pytorch pretrained models

        image -= self.mean
        image /= self.vars

        image = image.transpose((2, 0, 1))  # HWC -> CHW

        # print('image.shape:',image.shape)
        return image.copy(), label.copy(), np.array(size), name 
开发者ID:lxtGH,项目名称:Fast_Seg,代码行数:27,代码来源:camvid.py

示例7: parsing_on_boxes

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[max(0, ymin):ymax, max(0, xmin):xmax]
        resized_parsing = cv2.resize(
            cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:18,代码来源:loss.py

示例8: fixed_resize

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def fixed_resize(sample, resolution, flagval=None):

    if flagval is None:
        if ((sample == 0) | (sample == 1)).all():
            flagval = cv2.INTER_NEAREST
        else:
            flagval = cv2.INTER_CUBIC

    if isinstance(resolution, int):
        tmp = [resolution, resolution]
        tmp[np.argmax(sample.shape[:2])] = int(round(float(resolution)/np.min(sample.shape[:2])*np.max(sample.shape[:2])))
        resolution = tuple(tmp)

    if sample.ndim == 2 or (sample.ndim == 3 and sample.shape[2] == 3):
        sample = cv2.resize(sample, resolution[::-1], interpolation=flagval)
    else:
        tmp = sample
        sample = np.zeros(np.append(resolution, tmp.shape[2]), dtype=np.float32)
        for ii in range(sample.shape[2]):
            sample[:, :, ii] = cv2.resize(tmp[:, :, ii], resolution[::-1], interpolation=flagval)
    return sample 
开发者ID:scaelles,项目名称:DEXTR-KerasTensorflow,代码行数:23,代码来源:helpers.py

示例9: fixInterpolation

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def fixInterpolation(interpolation):
    """Converts interpolation given as string to cv2 interpolation object
    
    Arguments:
        interpolation (str or object): interpolation string or cv2 object
    
    Returns:
        object: cv2 interpolation type
    """
    
    if interpolation == 'nn' or interpolation is None or interpolation == cv2.INTER_NEAREST:
        interpolation = cv2.INTER_NEAREST;
    else:
        interpolation = cv2.INTER_LINEAR;
        
    return interpolation; 
开发者ID:ChristophKirst,项目名称:ClearMap,代码行数:18,代码来源:Resampling.py

示例10: load_label

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def load_label(self, idx):
        """
        Load label image as 1 x height x width integer array of label indices.
        The leading singleton dimension is required by the loss.
        """
        im = Image.open(self.data_root + self.label_lst[idx])
        label = np.array(im) / 255#cv2.imread(self.data_root + self.label_lst[idx], 0) / 255
        #if self.scales != None:
        #    label = cv2.resize(label, None, None, fx=self.scales[self.scale_ind], fy=self.scales[self.scale_ind], \
        #            interpolation=cv2.INTER_NEAREST)
        #height, width = label.shape[:2]
        #h_off = self.crop_size - height
        #w_off = self.crop_size - width
        #label = cv2.copyMakeBorder(label, 0, max(0, h_off), 0, max(0, w_off), cv2.BORDER_CONSTANT, value=[-1,])
        #label = label[self.h_off:self.h_off+self.height, self.w_off:self.w_off+self.width]
        label = label[np.newaxis, ...]
        if self.flip == 1:
            label = label[:,:,::-1]
        return label 
开发者ID:Andrew-Qibin,项目名称:DSS,代码行数:21,代码来源:sal_data_layer.py

示例11: load_region

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def load_region(self, idx):
        """
        Load label image as 1 x height x width integer array of label indices.
        The leading singleton dimension is required by the loss.
        """
        im = Image.open(self.data_root + self.region_lst[idx])
        region = np.array(im, dtype=np.float32) / 15.0
        #print np.unique(region)
        #if self.scales != None:
        #    label = cv2.resize(label, None, None, fx=self.scales[self.scale_ind], fy=self.scales[self.scale_ind], \
        #            interpolation=cv2.INTER_NEAREST)
        #height, width = label.shape[:2]
        #h_off = self.crop_size - height
        #w_off = self.crop_size - width
        #label = cv2.copyMakeBorder(label, 0, max(0, h_off), 0, max(0, w_off), cv2.BORDER_CONSTANT, value=[-1,])
        region = region[np.newaxis, ...]
        if self.flip == 1:
            region = region[:,:,::-1]
        return region 
开发者ID:Andrew-Qibin,项目名称:DSS,代码行数:21,代码来源:sal_data_layer.py

示例12: _resize_cv2

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def _resize_cv2(img, size, interpolation):
    img = img.transpose((1, 2, 0))
    if interpolation == PIL.Image.NEAREST:
        cv_interpolation = cv2.INTER_NEAREST
    elif interpolation == PIL.Image.BILINEAR:
        cv_interpolation = cv2.INTER_LINEAR
    elif interpolation == PIL.Image.BICUBIC:
        cv_interpolation = cv2.INTER_CUBIC
    elif interpolation == PIL.Image.LANCZOS:
        cv_interpolation = cv2.INTER_LANCZOS4
    H, W = size
    img = cv2.resize(img, dsize=(W, H), interpolation=cv_interpolation)

    # If input is a grayscale image, cv2 returns a two-dimentional array.
    if len(img.shape) == 2:
        img = img[:, :, np.newaxis]
    return img.transpose((2, 0, 1)) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:19,代码来源:resize.py

示例13: write_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def write_data(h5py_file, mode, x_paths, y_paths):
    num_data = len(x_paths)

    uint8_dt = h5py.special_dtype(vlen=np.uint8)
    string_dt = h5py.special_dtype(vlen=str)

    group = h5py_file.create_group(mode)
    h5_name = group.create_dataset('name', shape=(num_data,), dtype=string_dt)
    h5_image = group.create_dataset('image', shape=(num_data,), dtype=uint8_dt)
    h5_label = group.create_dataset('label', shape=(num_data,), dtype=uint8_dt)

    h5_image.attrs['size'] = [256,512,3]
    h5_label.attrs['size'] = [256,512,1]

    for i in range(num_data):
        x_img = cv2.imread(x_paths[i], 1)
        y_img = cv2.imread(y_paths[i], 0)
        x_img = cv2.resize(x_img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_LINEAR)
        y_img = cv2.resize(y_img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)

        h5_image[i] = x_img.flatten()
        h5_label[i] = y_img.flatten()
        h5_name[i] = os.path.basename(x_paths[i])

        # break 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:27,代码来源:h5_test.py

示例14: image_copy_to_dir

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def image_copy_to_dir(mode, x_paths, y_paths):
    target_path = '/run/media/tkwoo/myWorkspace/workspace/01.dataset/03.Mask_data/cityscape'
    target_path = os.path.join(target_path, mode)

    for idx in trange(len(x_paths)):
        image = cv2.imread(x_paths[idx], 1)
        mask = cv2.imread(y_paths[idx], 0)

        image = cv2.resize(image, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_LINEAR)
        mask = cv2.resize(mask, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)

        cv2.imwrite(os.path.join(target_path, 'image', os.path.basename(x_paths[idx])), image)
        cv2.imwrite(os.path.join(target_path, 'mask', os.path.basename(y_paths[idx])), mask)

        # show = image.copy()
        # mask = (mask.astype(np.float32)*255/33).astype(np.uint8)
        # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET)
        # show = cv2.addWeighted(show, 0.5, mask_color, 0.5, 0.0)
        # cv2.imshow('show', show)
        # key = cv2.waitKey(1)
        # if key == 27:
        #     return 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:24,代码来源:h5_test.py

示例15: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_NEAREST [as 别名]
def __getitem__(self, index):
        path, label, h_label = self.imgs[index]
        path = os.path.join(self.dir_path, path)
        img = cv2.imread(path).astype(np.float32)
        img = img[:,:,:3]
        img = cv2.resize(img, (self.width, self.height))
        img -= [104, 117, 123]
        img = img.transpose(2, 0, 1)
        gt = cv2.imread(label,-1)
        gt = cv2.resize(gt, (self.label_width, self.label_height), interpolation = cv2.INTER_NEAREST)  
        if len(gt.shape) == 3:
            gt = gt[:,:,0]
        thining_gt = cv2.imread(h_label,-1)
        gt_num_list = list(np.unique(gt))
        gt_num_list.remove(0)
        target_ins = np.zeros((4, gt.shape[0],gt.shape[1])).astype('uint8')
        for index, ins in enumerate(gt_num_list):
            target_ins[index,:,:] += (gt==ins)
        return img, target_ins, len(gt_num_list), thining_gt 
开发者ID:dingmyu,项目名称:Pytorch-Instance-Lane-Segmentation,代码行数:21,代码来源:dataloader_thinning.py


注:本文中的cv2.INTER_NEAREST属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。