本文整理汇总了Python中cv2.CV_64F属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.CV_64F属性的具体用法?Python cv2.CV_64F怎么用?Python cv2.CV_64F使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.CV_64F属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _lapulaseDetection
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def _lapulaseDetection(self, imgName):
"""
:param strdir: 文件所在的目录
:param name: 文件名称
:return: 检测模糊后的分数
"""
# step1: 预处理
img2gray, reImg = self.preImgOps(imgName)
# step2: laplacian算子 获取评分
resLap = cv2.Laplacian(img2gray, cv2.CV_64F)
score = resLap.var()
print("Laplacian %s score of given image is %s", str(score))
# strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分
newImg = self._drawImgFonts(reImg, str(score))
newDir = self.strDir + "/_lapulaseDetection_/"
if not os.path.exists(newDir):
os.makedirs(newDir)
newPath = newDir + imgName
# 显示
cv2.imwrite(newPath, newImg) # 保存图片
cv2.imshow(imgName, newImg)
cv2.waitKey(0)
# step3: 返回分数
return score
示例2: color_grid_thresh
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def color_grid_thresh(img, s_thresh=(170,255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# combine the two binary
binary = sxbinary | s_binary
# Stack each channel (for visual check the pixal sourse)
# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
return binary
示例3: _create_derivative
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def _create_derivative(cls, filepath):
img = cv2.imread(filepath,0)
edges = cv2.Canny(img, 175, 320, apertureSize=3)
# Create gradient map using Sobel
sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=-1)
sobely64f = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=-1)
theta = np.arctan2(sobely64f, sobelx64f)
if diagnostics:
cv2.imwrite('edges.jpg',edges)
cv2.imwrite('sobelx64f.jpg', np.absolute(sobelx64f))
cv2.imwrite('sobely64f.jpg', np.absolute(sobely64f))
# amplify theta for visual inspection
theta_visible = (theta + np.pi)*255/(2*np.pi)
cv2.imwrite('theta.jpg', theta_visible)
return (edges, sobelx64f, sobely64f, theta)
示例4: _find_edges_laplacian
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def _find_edges_laplacian(image, edge_multiplier, from_colorspace):
image_gray = colorlib.change_colorspace_(np.copy(image),
to_colorspace=colorlib.CSPACE_GRAY,
from_colorspace=from_colorspace)
image_gray = image_gray[..., 0]
edges_f = cv2.Laplacian(_normalize_cv2_input_arr_(image_gray / 255.0),
cv2.CV_64F)
edges_f = np.abs(edges_f)
edges_f = edges_f ** 2
vmax = np.percentile(edges_f, min(int(90 * (1/edge_multiplier)), 99))
edges_f = np.clip(edges_f, 0.0, vmax) / vmax
edges_uint8 = np.clip(np.round(edges_f * 255), 0, 255.0).astype(np.uint8)
edges_uint8 = _blur_median(edges_uint8, 3)
edges_uint8 = _threshold(edges_uint8, 50)
return edges_uint8
# Added in 0.4.0.
示例5: laplacian
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def laplacian(mask):
'''
Get 2nd order gradients using the Laplacian
'''
# blur
mask = cv2.GaussianBlur(mask, (5, 5), 0)
# edges with laplacian
laplacian = cv2.Laplacian(mask, cv2.CV_64F, 5)
# stretch
laplacian = contrast_stretch(laplacian)
# cast
laplacian = np.uint8(laplacian)
return laplacian
示例6: gradients
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def gradients(mask, direction='x'):
'''
Get gradients using sobel operator
'''
mask = cv2.GaussianBlur(mask, (5, 5), 0)
if direction == 'x':
# grad x
sobel = cv2.Sobel(mask, cv2.CV_64F, 1, 0, ksize=7)
elif direction == 'y':
# grad y
sobel = cv2.Sobel(mask, cv2.CV_64F, 0, 1, ksize=7)
else:
print("Invalid gradient direction. Must be x or y")
quit()
# sobel = np.absolute(sobel)
sobel = contrast_stretch(sobel) # expand contrast
sobel = np.uint8(sobel)
return sobel
示例7: compute_energy_matrix
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def compute_energy_matrix(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Compute X derivative of the image
sobel_x = cv2.Sobel(gray,cv2.CV_64F, 1, 0, ksize=3)
# Compute Y derivative of the image
sobel_y = cv2.Sobel(gray,cv2.CV_64F, 0, 1, ksize=3)
abs_sobel_x = cv2.convertScaleAbs(sobel_x)
abs_sobel_y = cv2.convertScaleAbs(sobel_y)
# Return weighted summation of the two images i.e. 0.5*X + 0.5*Y
return cv2.addWeighted(abs_sobel_x, 0.5, abs_sobel_y, 0.5, 0)
# Find vertical seam in the input image
开发者ID:PacktPublishing,项目名称:OpenCV-3-x-with-Python-By-Example,代码行数:18,代码来源:reduce_image_by_seam_carving.py
示例8: abs_sobel_thresh
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
# 3) Take the absolute value of the derivative or gradient
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255.*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return binary_output
示例9: mag_thresh
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def mag_thresh(img, sobel_kernel=3, thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= thresh[0]) & (gradmag <= thresh[1])] = 1
return binary_output
示例10: get_laplace_points
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def get_laplace_points(self, image: np.ndarray, num_points=500) -> np.ndarray:
if num_points <= 0:
return np.zeros((0, 2), dtype=np.uint8)
image = cv2.GaussianBlur(image, (15, 15), 0)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = np.uint8(np.absolute(cv2.Laplacian(image, cv2.CV_64F, 19)))
image = cv2.GaussianBlur(image, (15, 15), 0)
image = (image * (255 / image.max())).astype(np.uint8)
image = image.astype(np.float32) / image.sum()
if self.options['visualize_laplace']:
self.visualize_image(image, 'laplace')
weights = np.ravel(image)
coordinates = np.arange(0, weights.size, dtype=np.uint32)
choices = np.random.choice(coordinates, size=num_points, replace=False, p=weights)
raw_points = np.unravel_index(choices, image.shape)
points = np.stack(raw_points, axis=-1)[..., ::-1]
return points
示例11: get_best_images
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def get_best_images(plate_images, num_img_return):
"""
Get the top num_img_return quality images (with the least blur).
Laplacian function returns a value which indicates how blur the image is.
The lower the value, the more blur the image have
"""
# first, pick the image with the largest area because the bigger the image, the bigger the characters on the plate
if len(plate_images) > (num_img_return + 2):
plate_images = sorted(plate_images, key=lambda x : x[0].shape[0]*x[0].shape[1], reverse=True)[:(num_img_return+2)]
# secondly, pick the images with the least blur
if len(plate_images) > num_img_return:
plate_images = sorted(plate_images, key=lambda img : cv2.Laplacian(img[0], cv2.CV_64F).var(), reverse=True)[:num_img_return]
# img[0] because plate_images = [plate image, char on plate]
return plate_images
示例12: variance_of_laplacian
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
# In[ ]:
# In[ ]:
#accuracy_score(y, y_pred)
# In[4]:
示例13: compute_inital_corner_likelihood
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def compute_inital_corner_likelihood(image):
likelihoods = []
for prototype in ck.CORNER_KERNEL_PROTOTYPES:
filter_responses = [cv2.filter2D(image, ddepth=cv2.CV_64F, kernel=kernel) for kernel in prototype]
fA, fB, fC, fD = filter_responses
mean_response = (fA + fB + fC + fD) / 4.
minAB = np.minimum(fA, fB)
minCD = np.minimum(fC, fD)
diff1 = minAB - mean_response
diff2 = minCD - mean_response
# For an ideal corner, the response of {A,B} should be greater than the mean response of {A,B,C,D},
# while the response of {C,D} should be smaller, and vice versa for flipped corners.
likelihood1 = np.minimum(diff1, -diff2)
likelihood2 = np.minimum(-diff1, diff2) # flipped case
likelihoods.append(likelihood1)
likelihoods.append(likelihood2)
corner_likelihood = np.max(likelihoods, axis=0)
return corner_likelihood
示例14: dir_threshold
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
""" threshold according to the direction of the gradient
:param img:
:param sobel_kernel:
:param thresh:
:return:
"""
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
示例15: color_grid_thresh_dynamic
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_64F [as 别名]
def color_grid_thresh_dynamic(img, s_thresh=(170,255), sx_thresh=(20, 100)):
img = np.copy(img)
height = img.shape[0]
width = img.shape[1]
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
sxbinary[:, :width//2] = 0 # use the left side
s_binary[:,width//2:] = 0 # use the right side
# combine the two binary
binary = sxbinary | s_binary
# Stack each channel (for visual check the pixal sourse)
# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
return binary