當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.INTER_LINEAR屬性代碼示例

本文整理匯總了Python中cv2.INTER_LINEAR屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.INTER_LINEAR屬性的具體用法?Python cv2.INTER_LINEAR怎麽用?Python cv2.INTER_LINEAR使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.INTER_LINEAR屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: resize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def resize(video, size, interpolation):
  if interpolation == 'bilinear':
    inter = cv2.INTER_LINEAR
  elif interpolation == 'nearest':
    inter = cv2.INTER_NEAREST
  else:
    raise NotImplementedError

  shape = video.shape[:-3]
  video = video.reshape((-1, *video.shape[-3:]))
  resized_video = np.zeros((video.shape[0], size[1], size[0], video.shape[-1]))
  for i in range(video.shape[0]):
    img = cv2.resize(video[i], size, inter)
    if len(img.shape) == 2:
      img = img[:, :, np.newaxis]
    resized_video[i] = img
  return resized_video.reshape((*shape, size[1], size[0], video.shape[-1])) 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:19,代碼來源:video_transforms.py

示例2: _elastic

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def _elastic(image, p, alpha=None, sigma=None, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.
     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
     From: 
     https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
    """
    if random.random() > p:
        return image
    if alpha == None:
        alpha = image.shape[0] * random.uniform(0.5,2)
    if sigma == None:
        sigma = int(image.shape[0] * random.uniform(0.5,1))
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape[:2]
    
    dx, dy = [cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1) * alpha, (sigma|1, sigma|1), 0) for _ in range(2)]
    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
    x, y = np.clip(x+dx, 0, shape[1]-1).astype(np.float32), np.clip(y+dy, 0, shape[0]-1).astype(np.float32)
    return cv2.remap(image, x, y, interpolation=cv2.INTER_LINEAR, borderValue= 0, borderMode=cv2.BORDER_REFLECT) 
開發者ID:ShuangXieIrene,項目名稱:ssds.pytorch,代碼行數:27,代碼來源:data_augment.py

示例3: step

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def step(self, amt=1):
        image = self._capFrame()

        if self.crop:
            image = image[self._cropY + self.yoff:self._ih - self._cropY +
                          self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
        else:
            t, b, l, r = self._pad
            image = cv2.copyMakeBorder(
                image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])

        resized = cv2.resize(image, (self.width, self.height),
                             interpolation=cv2.INTER_LINEAR)
        if self.mirror:
            resized = cv2.flip(resized, 1)

        for y in range(self.height):
            for x in range(self.width):
                self.layout.set(x, y, tuple(resized[y, x][0:3])) 
開發者ID:ManiacalLabs,項目名稱:BiblioPixelAnimations,代碼行數:21,代碼來源:ScreenGrab.py

示例4: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def detect(self, img):
        """
        img: rgb 3 channel
        """
        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor

        bounding_boxes, _ = FaceDet.detect_face(
                img, minsize, self.pnet, self.rnet, self.onet, threshold, factor)
        area = (bounding_boxes[:, 2] - bounding_boxes[:, 0]) * (bounding_boxes[:, 3] - bounding_boxes[:, 1])
        face_idx = area.argmax()
        bbox = bounding_boxes[face_idx][:4]  # xy,xy

        margin = 32
        x0 = np.maximum(bbox[0] - margin // 2, 0)
        y0 = np.maximum(bbox[1] - margin // 2, 0)
        x1 = np.minimum(bbox[2] + margin // 2, img.shape[1])
        y1 = np.minimum(bbox[3] + margin // 2, img.shape[0])
        x0, y0, x1, y1 = bbox = [int(k + 0.5) for k in [x0, y0, x1, y1]]
        cropped = img[y0:y1, x0:x1, :]
        scaled = cv2.resize(cropped, (160, 160), interpolation=cv2.INTER_LINEAR)
        return scaled, bbox 
開發者ID:ppwwyyxx,項目名稱:Adversarial-Face-Attack,代碼行數:25,代碼來源:face_attack.py

示例5: resize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def resize(im, short, max_size):
    """
    only resize input image to target size and return scale
    :param im: BGR image input by opencv
    :param short: one dimensional size (the short side)
    :param max_size: one dimensional max size (the long side)
    :return: resized image (NDArray) and scale (float)
    """
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(short) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
    return im, im_scale 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:19,代碼來源:image.py

示例6: resize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def resize(src, size, interpolation=cv2.INTER_LINEAR):
    """Decode image from str buffer.
    Wrapper for cv2.imresize that uses mx.nd.NDArray

    Parameters
    ----------
    src : NDArray
        image in (width, height, channels)
    size : tuple
        target size in (width, height)
    interpolation : int
        same as interpolation for cv2.imresize

    Returns
    -------
    img : NDArray
        resized image
    """
    hdl = NDArrayHandle()
    check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]),
                               interpolation, ctypes.byref(hdl)))
    return mx.nd.NDArray(hdl) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:24,代碼來源:opencv.py

示例7: resize_maps

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def resize_maps(map, map_scales, resize_method):
  scaled_maps = []
  for i, sc in enumerate(map_scales):
    if resize_method == 'antialiasing':
      # Resize using open cv so that we can compute the size.
      # Use PIL resize to use anti aliasing feature.
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
      w = map_.shape[1]; h = map_.shape[0]

      map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
      map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
      map_ = np.asarray(map__img).astype(np.float32)
      map_ = map_/255.
      map_ = np.minimum(map_, 1.0)
      map_ = np.maximum(map_, 0.0)
    elif resize_method == 'linear_noantialiasing':
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
    else:
      logging.error('Unknown resizing method')
    scaled_maps.append(map_)
  return scaled_maps 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:map_utils.py

示例8: generate_egocentric_maps

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc,
                             x_axis, y_axis, theta):
  maps = []
  for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)):
    maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_,
                                         map_crop_size,
                                         interpolation=cv2.INTER_LINEAR)[0])
    maps_i[np.isnan(maps_i)] = 0
    maps.append(maps_i)
  return maps 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:12,代碼來源:map_utils.py

示例9: __next__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def __next__(self):
        self.count += 1
        img0 = self.imgs.copy()
        if cv2.waitKey(1) == ord('q'):  # q to quit
            cv2.destroyAllWindows()
            raise StopIteration

        # Letterbox
        img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]

        # Stack
        img = np.stack(img, 0)

        # Normalize RGB
        img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)  # BGR to RGB
        img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)  # uint8 to fp16/fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0

        return self.sources, img, img0, None 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:21,代碼來源:datasets.py

示例10: load_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def load_image(self, index):
    # loads 1 image from dataset
    img = self.imgs[index]
    if img is None:
        img_path = self.img_files[index]
        img = cv2.imread(img_path)  # BGR
        assert img is not None, 'Image Not Found ' + img_path
        r = self.img_size / max(img.shape)  # size ratio
        if self.augment and r < 1:  # if training (NOT testing), downsize to inference shape
            h, w, _ = img.shape
            img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR)  # _LINEAR fastest

    # Augment colorspace
    if self.augment:
        augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])

    return img 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:19,代碼來源:datasets.py

示例11: prep_im_for_blob

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def prep_im_for_blob(im, pixel_means, pixel_stds, target_size, max_size):
    """Mean subtract and scale an image for use in a blob."""
    
    im = im.astype(np.float32, copy=False)
    im /= 255.0
    im -= pixel_means
    im /= pixel_stds
    # im = im[:, :, ::-1]
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    # if np.round(im_scale * im_size_max) > max_size:
    #     im_scale = float(max_size) / float(im_size_max)
    # im = imresize(im, im_scale)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)

    return im, im_scale 
開發者ID:guoruoqian,項目名稱:cascade-rcnn_Pytorch,代碼行數:22,代碼來源:blob.py

示例12: resize_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def resize_image(img):
    img_size = img.shape
    im_size_min = np.min(img_size[0:2])
    im_size_max = np.max(img_size[0:2])

    im_scale = float(600) / float(im_size_min)
    if np.round(im_scale * im_size_max) > 1200:
        im_scale = float(1200) / float(im_size_max)
    new_h = int(img_size[0] * im_scale)
    new_w = int(img_size[1] * im_scale)

    new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
    new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16

    re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
    return re_im, (new_h / img_size[0], new_w / img_size[1]) 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:18,代碼來源:demo.py

示例13: multiscale_single_test

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def multiscale_single_test(image, input_scales, predictor):
    '''
    Predict image semantic segmentation labeling using multi-scale inputs.
    Inputs:
    images: numpy array, [height, width, channel], channel = 3.
    input_scales: list of scale factors. e.g., [0.5, 1.0, 1.5].
    predictor: prediction function which takes one scaled image as input and outputs its semantic segmentation labelings.
    Returns:
    Averaged predicted logits of multi-scale inputs
    '''
    image_height_raw = image.shape[0]
    image_width_raw = image.shape[1]
    multiscale_outputs = []
    for input_scale in input_scales:
        image_height_scaled = round(image_height_raw * input_scale)
        image_width_scaled = round(image_width_raw * input_scale)
        image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
        output = predictor(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw)[0]
        multiscale_outputs.append(output)

    output_mean = np.mean(multiscale_outputs, axis=0)

    return output_mean 
開發者ID:leimao,項目名稱:DeepLab_v3,代碼行數:25,代碼來源:utils.py

示例14: multiscale_single_validate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def multiscale_single_validate(image, label, input_scales, validator):

    image_height_raw = image.shape[0]
    image_width_raw = image.shape[1]
    multiscale_outputs = []
    multiscale_losses = []
    for input_scale in input_scales:
        image_height_scaled = round(image_height_raw * input_scale)
        image_width_scaled = round(image_width_raw * input_scale)
        image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
        output, loss = validator(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw, labels=[label])
        multiscale_outputs.append(output[0])
        multiscale_losses.append(loss)

    output_mean = np.mean(multiscale_outputs, axis=0)
    loss_mean = np.mean(multiscale_losses)

    return output_mean, loss_mean 
開發者ID:leimao,項目名稱:DeepLab_v3,代碼行數:20,代碼來源:utils.py

示例15: __getitem__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INTER_LINEAR [as 別名]
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]
        image = np.asarray(image, np.float32)
        image = (image - image.min()) / (image.max() - image.min())
        
        img_h, img_w, _ = image.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
        image = image.transpose((2, 0, 1))
        return image, np.array(size), name 
開發者ID:speedinghzl,項目名稱:pytorch-segmentation-toolbox,代碼行數:20,代碼來源:datasets.py


注:本文中的cv2.INTER_LINEAR屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。