本文整理汇总了Python中cv2.INTER_LINEAR属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.INTER_LINEAR属性的具体用法?Python cv2.INTER_LINEAR怎么用?Python cv2.INTER_LINEAR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.INTER_LINEAR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resize
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def resize(video, size, interpolation):
if interpolation == 'bilinear':
inter = cv2.INTER_LINEAR
elif interpolation == 'nearest':
inter = cv2.INTER_NEAREST
else:
raise NotImplementedError
shape = video.shape[:-3]
video = video.reshape((-1, *video.shape[-3:]))
resized_video = np.zeros((video.shape[0], size[1], size[0], video.shape[-1]))
for i in range(video.shape[0]):
img = cv2.resize(video[i], size, inter)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
resized_video[i] = img
return resized_video.reshape((*shape, size[1], size[0], video.shape[-1]))
示例2: _elastic
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def _elastic(image, p, alpha=None, sigma=None, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
From:
https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
"""
if random.random() > p:
return image
if alpha == None:
alpha = image.shape[0] * random.uniform(0.5,2)
if sigma == None:
sigma = int(image.shape[0] * random.uniform(0.5,1))
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape[:2]
dx, dy = [cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1) * alpha, (sigma|1, sigma|1), 0) for _ in range(2)]
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
x, y = np.clip(x+dx, 0, shape[1]-1).astype(np.float32), np.clip(y+dy, 0, shape[0]-1).astype(np.float32)
return cv2.remap(image, x, y, interpolation=cv2.INTER_LINEAR, borderValue= 0, borderMode=cv2.BORDER_REFLECT)
示例3: step
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def step(self, amt=1):
image = self._capFrame()
if self.crop:
image = image[self._cropY + self.yoff:self._ih - self._cropY +
self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
else:
t, b, l, r = self._pad
image = cv2.copyMakeBorder(
image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])
resized = cv2.resize(image, (self.width, self.height),
interpolation=cv2.INTER_LINEAR)
if self.mirror:
resized = cv2.flip(resized, 1)
for y in range(self.height):
for x in range(self.width):
self.layout.set(x, y, tuple(resized[y, x][0:3]))
示例4: detect
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def detect(self, img):
"""
img: rgb 3 channel
"""
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
bounding_boxes, _ = FaceDet.detect_face(
img, minsize, self.pnet, self.rnet, self.onet, threshold, factor)
area = (bounding_boxes[:, 2] - bounding_boxes[:, 0]) * (bounding_boxes[:, 3] - bounding_boxes[:, 1])
face_idx = area.argmax()
bbox = bounding_boxes[face_idx][:4] # xy,xy
margin = 32
x0 = np.maximum(bbox[0] - margin // 2, 0)
y0 = np.maximum(bbox[1] - margin // 2, 0)
x1 = np.minimum(bbox[2] + margin // 2, img.shape[1])
y1 = np.minimum(bbox[3] + margin // 2, img.shape[0])
x0, y0, x1, y1 = bbox = [int(k + 0.5) for k in [x0, y0, x1, y1]]
cropped = img[y0:y1, x0:x1, :]
scaled = cv2.resize(cropped, (160, 160), interpolation=cv2.INTER_LINEAR)
return scaled, bbox
示例5: resize
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def resize(im, short, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(short) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale
示例6: resize
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def resize(src, size, interpolation=cv2.INTER_LINEAR):
"""Decode image from str buffer.
Wrapper for cv2.imresize that uses mx.nd.NDArray
Parameters
----------
src : NDArray
image in (width, height, channels)
size : tuple
target size in (width, height)
interpolation : int
same as interpolation for cv2.imresize
Returns
-------
img : NDArray
resized image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]),
interpolation, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
示例7: resize_maps
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def resize_maps(map, map_scales, resize_method):
scaled_maps = []
for i, sc in enumerate(map_scales):
if resize_method == 'antialiasing':
# Resize using open cv so that we can compute the size.
# Use PIL resize to use anti aliasing feature.
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
w = map_.shape[1]; h = map_.shape[0]
map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
map_ = np.asarray(map__img).astype(np.float32)
map_ = map_/255.
map_ = np.minimum(map_, 1.0)
map_ = np.maximum(map_, 0.0)
elif resize_method == 'linear_noantialiasing':
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
else:
logging.error('Unknown resizing method')
scaled_maps.append(map_)
return scaled_maps
示例8: generate_egocentric_maps
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc,
x_axis, y_axis, theta):
maps = []
for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)):
maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_,
map_crop_size,
interpolation=cv2.INTER_LINEAR)[0])
maps_i[np.isnan(maps_i)] = 0
maps.append(maps_i)
return maps
示例9: __next__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Normalize RGB
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return self.sources, img, img0, None
示例10: load_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def load_image(self, index):
# loads 1 image from dataset
img = self.imgs[index]
if img is None:
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # _LINEAR fastest
# Augment colorspace
if self.augment:
augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])
return img
示例11: prep_im_for_blob
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def prep_im_for_blob(im, pixel_means, pixel_stds, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im /= 255.0
im -= pixel_means
im /= pixel_stds
# im = im[:, :, ::-1]
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > max_size:
# im_scale = float(max_size) / float(im_size_max)
# im = imresize(im, im_scale)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
示例12: resize_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def resize_image(img):
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if np.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
new_h = int(img_size[0] * im_scale)
new_w = int(img_size[1] * im_scale)
new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16
re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return re_im, (new_h / img_size[0], new_w / img_size[1])
示例13: multiscale_single_test
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def multiscale_single_test(image, input_scales, predictor):
'''
Predict image semantic segmentation labeling using multi-scale inputs.
Inputs:
images: numpy array, [height, width, channel], channel = 3.
input_scales: list of scale factors. e.g., [0.5, 1.0, 1.5].
predictor: prediction function which takes one scaled image as input and outputs its semantic segmentation labelings.
Returns:
Averaged predicted logits of multi-scale inputs
'''
image_height_raw = image.shape[0]
image_width_raw = image.shape[1]
multiscale_outputs = []
for input_scale in input_scales:
image_height_scaled = round(image_height_raw * input_scale)
image_width_scaled = round(image_width_raw * input_scale)
image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
output = predictor(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw)[0]
multiscale_outputs.append(output)
output_mean = np.mean(multiscale_outputs, axis=0)
return output_mean
示例14: multiscale_single_validate
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def multiscale_single_validate(image, label, input_scales, validator):
image_height_raw = image.shape[0]
image_width_raw = image.shape[1]
multiscale_outputs = []
multiscale_losses = []
for input_scale in input_scales:
image_height_scaled = round(image_height_raw * input_scale)
image_width_scaled = round(image_width_raw * input_scale)
image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
output, loss = validator(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw, labels=[label])
multiscale_outputs.append(output[0])
multiscale_losses.append(loss)
output_mean = np.mean(multiscale_outputs, axis=0)
loss_mean = np.mean(multiscale_losses)
return output_mean, loss_mean
示例15: __getitem__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_LINEAR [as 别名]
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image = (image - image.min()) / (image.max() - image.min())
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, np.array(size), name