本文整理汇总了Python中cv2.INTER_CUBIC属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.INTER_CUBIC属性的具体用法?Python cv2.INTER_CUBIC怎么用?Python cv2.INTER_CUBIC使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.INTER_CUBIC属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: format_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, fx, fy
示例2: format_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
示例3: __call__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def __call__(self, sample):
# Fixed range of scales
sc = self.scales[random.randint(0, len(self.scales) - 1)]
for elem in sample.keys():
if 'fname' in elem:
continue
tmp = sample[elem]
if tmp.ndim == 2:
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
tmp = cv2.resize(tmp, None, fx=sc, fy=sc, interpolation=flagval)
sample[elem] = tmp
return sample
示例4: resized_crop
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR):
"""Crop the given numpy ndarray and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (numpy ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``scale``.
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_CUBIC``.
Returns:
PIL Image: Cropped image.
"""
assert _is_numpy_image(img), 'img should be numpy image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation=interpolation)
return img
示例5: get_data
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def get_data(name, batch):
isTrain = name == 'train'
image_shape = 224
if isTrain:
augmentors = [
# use lighter augs if model is too small
GoogleNetResize(crop_area_fraction=0.49 if args.width_ratio < 1 else 0.08,
target_shape=image_shape),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(int(image_shape*256/224), cv2.INTER_CUBIC),
imgaug.CenterCrop((image_shape, image_shape)),
]
return get_imagenet_dataflow(args.data_dir, name, batch, augmentors,
meta_dir = args.meta_dir)
示例6: _augment
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
示例7: _resize_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def _resize_image(img):
dst_width = CFG.ARCH.INPUT_SIZE[0]
dst_height = CFG.ARCH.INPUT_SIZE[1]
h_old, w_old, _ = img.shape
height = dst_height
width = int(w_old * height / h_old)
if width < dst_width:
left_padding = int((dst_width - width)/2)
right_padding = dst_width - width - left_padding
resized_img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
resized_img = cv2.copyMakeBorder(resized_img, 0, 0, left_padding, right_padding,
cv2.BORDER_CONSTANT, value=[255, 255, 255])
else:
resized_img = cv2.resize(img, (dst_width, height), interpolation=cv2.INTER_CUBIC)
return resized_img
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:18,代码来源:write_tfrecord.py
示例8: fixed_resize
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def fixed_resize(sample, resolution, flagval=None):
if flagval is None:
if ((sample == 0) | (sample == 1)).all():
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
if isinstance(resolution, int):
tmp = [resolution, resolution]
tmp[np.argmax(sample.shape[:2])] = int(round(float(resolution)/np.min(sample.shape[:2])*np.max(sample.shape[:2])))
resolution = tuple(tmp)
if sample.ndim == 2 or (sample.ndim == 3 and sample.shape[2] == 3):
sample = cv2.resize(sample, resolution[::-1], interpolation=flagval)
else:
tmp = sample
sample = np.zeros(np.append(resolution, tmp.shape[2]), dtype=np.float32)
for ii in range(sample.shape[2]):
sample[:, :, ii] = cv2.resize(tmp[:, :, ii], resolution[::-1], interpolation=flagval)
return sample
示例9: get_mnist_data
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def get_mnist_data(is_train, image_size, batchsize):
ds = MNISTCh('train' if is_train else 'test', shuffle=True)
if is_train:
augs = [
imgaug.RandomApplyAug(imgaug.RandomResize((0.8, 1.2), (0.8, 1.2)), 0.3),
imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.5),
imgaug.RandomApplyAug(imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01), 0.25),
imgaug.Resize((224, 224), cv2.INTER_AREA)
]
ds = AugmentImageComponent(ds, augs)
ds = PrefetchData(ds, 128*10, multiprocessing.cpu_count())
ds = BatchData(ds, batchsize)
ds = PrefetchData(ds, 256, 4)
else:
# no augmentation, only resizing
augs = [
imgaug.Resize((image_size, image_size), cv2.INTER_CUBIC),
]
ds = AugmentImageComponent(ds, augs)
ds = BatchData(ds, batchsize)
ds = PrefetchData(ds, 20, 2)
return ds
示例10: update_vis
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def update_vis(self):
ims = self.opt_engine.get_images(self.frame_id)
if ims is not None:
self.ims = ims
if self.ims is None:
return
ims_show = []
n_imgs = self.ims.shape[0]
for n in range(n_imgs):
# im = ims[n]
im_s = cv2.resize(self.ims[n], (self.width, self.width), interpolation=cv2.INTER_CUBIC)
if n == self.select_id and self.topK > 1:
t = 3 # thickness
cv2.rectangle(im_s, (t, t), (self.width - t, self.width - t), (0, 255, 0), t)
im_s = im_s[np.newaxis, ...]
ims_show.append(im_s)
if ims_show:
ims_show = np.concatenate(ims_show, axis=0)
g_tmp = utils.grid_vis(ims_show, self.grid_size[1], self.grid_size[0]) # (nh, nw)
self.vis_results = g_tmp.copy()
self.update()
示例11: format_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def format_img(self, img, box_2d):
# Should this happen? or does normalize take care of it. YOLO doesnt like
# img=img.astype(np.float) / 255
# torch transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
process = transforms.Compose ([
transforms.ToTensor(),
normalize
])
# crop image
pt1 = box_2d[0]
pt2 = box_2d[1]
crop = img[pt1[1]:pt2[1]+1, pt1[0]:pt2[0]+1]
crop = cv2.resize(src = crop, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
# recolor, reformat
batch = process(crop)
return batch
示例12: decode_pose
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def decode_pose(img_orig, heatmaps, pafs):
param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
# Bottom-up approach:
# Step 1: find all joints in the image (organized by joint type: [0]=nose, [1]=neck...)
joint_list_per_joint_type = NMS(param, heatmaps, img_orig.shape[0] / float(heatmaps.shape[0]))
# joint_list is an unravel'd version of joint_list_per_joint, where we add
# a 5th column to indicate the joint_type (0=nose, 1=neck...)
joint_list = np.array([tuple(peak) + (joint_type,) for joint_type, joint_peaks in enumerate(joint_list_per_joint_type) for peak in joint_peaks])
# Step 2: find which joints go together to form limbs (which wrists go with which elbows)
paf_upsamp = cv2.resize(pafs, (img_orig.shape[1], img_orig.shape[0]), interpolation=cv2.INTER_CUBIC)
connected_limbs = find_connected_joints(param, paf_upsamp, joint_list_per_joint_type)
# Step 3: associate limbs that belong to the same person
person_to_joint_assoc = group_limbs_of_same_person(connected_limbs, joint_list)
# (Step 4): plot results
to_plot, canvas = plot_pose(img_orig, joint_list, person_to_joint_assoc)
return to_plot, canvas, joint_list, person_to_joint_assoc
示例13: get_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def get_img(self):
while True:
img_name = self.image_files[self.index]
label_name = img_name.replace('.jpg', '.png')
img = cv2.imread(img_name)
if img is None:
print("load img failed:", img_name)
self.next_img()
else:
break
if self.birdeye == True:
warped_img = cv2.warpPerspective(img, self.M, (4000, 4000),flags=cv2.INTER_CUBIC)
img = cv2.resize(warped_img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
else:
img = cv2.resize(img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
img = img.transpose((2,0,1))
return img, label_name
示例14: _resize_cv2
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def _resize_cv2(img, size, interpolation):
img = img.transpose((1, 2, 0))
if interpolation == PIL.Image.NEAREST:
cv_interpolation = cv2.INTER_NEAREST
elif interpolation == PIL.Image.BILINEAR:
cv_interpolation = cv2.INTER_LINEAR
elif interpolation == PIL.Image.BICUBIC:
cv_interpolation = cv2.INTER_CUBIC
elif interpolation == PIL.Image.LANCZOS:
cv_interpolation = cv2.INTER_LANCZOS4
H, W = size
img = cv2.resize(img, dsize=(W, H), interpolation=cv_interpolation)
# If input is a grayscale image, cv2 returns a two-dimentional array.
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
return img.transpose((2, 0, 1))
示例15: step
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INTER_CUBIC [as 别名]
def step(self, amt=1):
ret, frame = self._vid.read()
image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGRA)
if self.crop:
image = image[self._cropY + self.yoff:self._ih - self._cropY +
self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
else:
t, b, l, r = self._pad
image = cv2.copyMakeBorder(
image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])
resized = cv2.resize(image, (self.width, self.height),
interpolation=cv2.INTER_CUBIC)
if self.mirror:
resized = cv2.flip(resized, 1)
for y in range(self.height):
for x in range(self.width):
self.layout.set(x, y, tuple(resized[y, x][0:3]))
if not isinstance(self.videoSource, int):
self._frameCount += 1
if self._frameCount >= self._frameTotal:
self._vid.set(1, 0) # CV_CAP_PROP_POS_FRAMES
self._frameCount = 0
self.animComplete = True