本文整理汇总了Python中cv2.NORM_MINMAX属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.NORM_MINMAX属性的具体用法?Python cv2.NORM_MINMAX怎么用?Python cv2.NORM_MINMAX使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.NORM_MINMAX属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update_mean_shift_bookkeeping
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def _update_mean_shift_bookkeeping(self, frame, box_grouped):
"""Preprocess all valid bounding boxes for mean-shift tracking
This method preprocesses all relevant bounding boxes (those that
have been detected by both mean-shift tracking and saliency) for
the next mean-shift step.
:param frame: current RGB input frame
:param box_grouped: list of bounding boxes
"""
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
self.object_roi = []
self.object_box = []
for box in box_grouped:
(x, y, w, h) = box
hsv_roi = hsv[y:y + h, x:x + w]
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
self.object_roi.append(roi_hist)
self.object_box.append(box)
示例2: calculate_roi_hist
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def calculate_roi_hist(self, frame):
"""Calculates region of interest histogram.
Args:
frame: The np.array image frame to calculate ROI histogram for.
"""
(x, y, w, h) = self.box
roi = frame[y:y + h, x:x + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0, 1], mask, [180, 255],
[0, 180, 0, 255])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
self.roi_hist = roi_hist
# Run this every frame
示例3: load_frames
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def load_frames(file_path, resize_to=224.0):
# Saved numpy files should be read in with format (time, height, width, channel)
frames = np.load(file_path)
t, h, w, c = frames.shape
# Resize and scale images for the network structure
#TODO: maybe use opencv to normalize the image
#frames = cv.normalize(frames, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
frames_out = []
need_resize = False
if w < resize_to or h < resize_to:
d = resize_to - min(w, h)
sc = 1 + d / min(w, h)
need_resize = True
for i in range(t):
img = frames[i, :, :, :]
if need_resize:
img = cv.resize(img, dsize=(0, 0), fx=sc, fy=sc)
img = (img / 255.) * 2 - 1
frames_out.append(img)
return np.asarray(frames_out, dtype=np.float32)
示例4: proc_oflow
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def proc_oflow(images):
h, w = images.shape[-3:-1]
processed_images = []
for image in images:
hsv = np.zeros((h, w, 3), dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 1] = 255
mag, ang = cv2.cartToPolar(image[..., 0], image[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
processed_image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
processed_images.append(processed_image)
return np.stack(processed_images)
示例5: compute_dense_optical_flow
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[..., 1] = 255
flow = None
flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
next=current_image_gray, flow=flow,
pyr_scale=0.8, levels=15, winsize=5,
iterations=10, poly_n=5, poly_sigma=0,
flags=10)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
示例6: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def main():
src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
result = cv2.normalize(result, dst=None, alpha=0, beta=1,
norm_type=cv2.NORM_MINMAX, dtype=-1)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
matchLoc = maxLoc
draw1 = cv2.rectangle(
src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
draw2 = cv2.rectangle(
result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
cv2.imshow('draw1', draw1)
cv2.imshow('draw2', draw2)
cv2.waitKey(0)
print src.shape
print tpl.shape
print result.shape
print matchLoc
示例7: capture_histogram
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def capture_histogram(path_of_sample):
# read the image
color = cv2.imread(path_of_sample)
# convert to HSV
color_hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
# compute the histogram
object_hist = cv2.calcHist([color_hsv], # image
[0, 1], # channels
None, # no mask
[180, 256], # size of histogram
[0, 180, 0, 256] # channel values
)
# min max normalization
cv2.normalize(object_hist, object_hist, 0, 255, cv2.NORM_MINMAX)
return object_hist
开发者ID:PacktPublishing,项目名称:Hands-On-Machine-Learning-with-OpenCV-4,代码行数:22,代码来源:object_detection_using_color.py
示例8: generate_target
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def generate_target(object_file, target_name):
border = 20
size = [960, 720]
foreground = cv2.imread(object_file, cv2.IMREAD_UNCHANGED)
if foreground is None:
return False
cv2.normalize(foreground, foreground, 0, 255, cv2.NORM_MINMAX)
foreground = foreground.astype(numpy.uint8)
ratio = numpy.amin(numpy.divide(
numpy.subtract(size, [2*border, 2*border]), foreground.shape[0:2]))
forground_size = numpy.floor(numpy.multiply(foreground.shape[0:2], ratio)).astype(int)
foreground = cv2.resize(foreground, (forground_size[1], forground_size[0]))
foreground = image_fill(foreground,size,[0,0,0,0])
cv2.imwrite(target_name, foreground)
示例9: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def __init__(self, parent, capture, fps=24):
wx.Panel.__init__(self, parent)
self.capture = capture2
ret, frame = self.capture.read()
sal = mr_sal.saliency(frame)
sal = cv2.resize(sal,(320,240)).astype(sp.uint8)
sal = cv2.normalize(sal, None, 0, 255, cv2.NORM_MINMAX)
outsal = cv2.applyColorMap(sal,cv2.COLORMAP_HSV)
self.bmp = wx.BitmapFromBuffer(320,240, outsal.astype(sp.uint8))
self.timer = wx.Timer(self)
self.timer.Start(1000./fps)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.NextFrame)
示例10: get_blur_im
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def get_blur_im(self):
"""downscale and blur the image"""
# preprocess image
dwnscl_factor = 4; # Hydra images' shape is divisible by 4
blr_sigma = 17; # blur the image a bit, seems to work better
new_shape = (self.img.shape[1]//dwnscl_factor, # as x,y, not row,columns
self.img.shape[0]//dwnscl_factor)
try:
dwn_gray_im = cv2.resize(self.img, new_shape)
except:
pdb.set_trace()
# apply blurring
blur_im = cv2.GaussianBlur(dwn_gray_im, (blr_sigma,blr_sigma),0)
# normalise between 0 and 255
blur_im = cv2.normalize(blur_im, None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
return blur_im
示例11: updateROIs
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def updateROIs(self):
#useful for resizing events
if self.Ifull.size == 0:
self.twoViews.cleanCanvas()
else:
cur = self.ui.tabWidget.currentIndex()
if cur == self.tab_keys['mask']:
I1, I2 = self.Ifull, self.Imask
elif cur == self.tab_keys['bgnd']:
I1 = self.Ifull
I2 = np.zeros_like(self.IsubtrB)
cv2.normalize(self.IsubtrB,I2,0,255,cv2.NORM_MINMAX)
else:
I1, I2 = self.Ifull, self.Ifull
qimage_roi1 = self._numpy2qimage(I1)
qimage_roi2 = self._numpy2qimage(I2)
self.twoViews.setPixmap(qimage_roi1, qimage_roi2)
示例12: flow2colorimage
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def flow2colorimage(ar_f_Flow:np.array(float)) -> np.array(int):
""" translate 1 optical flow (with values from -1.0 to 1.0) to an colorful image
"""
h, w, c = ar_f_Flow.shape
if not isinstance(ar_f_Flow[0,0,0], np.float32):
warnings.warn("Need to convert flows to float32")
ar_f_Flow = ar_f_Flow.astype(np.float32)
ar_n_hsv = np.zeros((h, w, 3), dtype = np.uint8)
ar_n_hsv[...,1] = 255
# get colors
mag, ang = cv2.cartToPolar(ar_f_Flow[..., 0], ar_f_Flow[..., 1])
ar_n_hsv[...,0] = ang * 180 / np.pi / 2
ar_n_hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
ar_n_bgr = cv2.cvtColor(ar_n_hsv, cv2.COLOR_HSV2BGR)
return ar_n_bgr
示例13: get_state
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def get_state(self):
responses1 = self.client.simGetImages([ # depth visualization image
airsim.ImageRequest("1", airsim.ImageType.Scene, False,
False)]) # scene vision image in uncompressed RGBA array
response = responses1[0]
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array
img_rgba = img1d.reshape(response.height, response.width, 3)
img = Image.fromarray(img_rgba)
img_rgb = img.convert('RGB')
self.iter = self.iter+1
state = np.asarray(img_rgb)
state = cv2.resize(state, (self.input_size, self.input_size), cv2.INTER_LINEAR)
state = cv2.normalize(state, state, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
state_rgb = []
state_rgb.append(state[:, :, 0:3])
state_rgb = np.array(state_rgb)
state_rgb = state_rgb.astype('float32')
return state_rgb
示例14: weights_img_func
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def weights_img_func(key, entry, viewer):
data = entry['weights']
assert(data.ndim == 4)
img_cnt_max = viewer.img_cnt_max[key]
res_data = list()
# accumulate to 3 channels image
for i in six.moves.range(min(data.shape[0], img_cnt_max)):
img_shape = (3,) + data.shape[2:4]
accum = np.zeros(img_shape, dtype=data.dtype)
for ch in six.moves.range(data.shape[1]):
accum[ch % 3] += data[i][ch]
# normalize
img = np.transpose(accum, (1, 2, 0))
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
width = img.shape[0] * 15
res_data.append({'img': img, 'width': width})
return res_data
# ========================= Loss Graph (In a tab page) ========================
示例15: resize_and_contrast
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORM_MINMAX [as 别名]
def resize_and_contrast(in_dir, out_dir, target_size):
check_and_mkdir(out_dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
for subdir, dirs, files in os.walk(in_dir):
for f in files:
file_path = subdir + os.sep + f
if (is_image(f)):
img = cv2.imread(file_path, 0)
resized_img = cv2.resize(img, (target_size, target_size), interpolation = cv2.INTER_CUBIC)
class_dir = out_dir + os.sep + file_path.split("/")[-2]
check_and_mkdir(class_dir)
file_name = class_dir + os.sep + file_path.split("/")[-1]
print(file_name)
norm_image = cv2.normalize(resized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) * 256
# norm_image = clahe.apply(resized_img)
cv2.imwrite(file_name, norm_image)
# count the direct one-step sub directories (which will represent the class name)