本文整理匯總了Python中cv2.CV_32F屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CV_32F屬性的具體用法?Python cv2.CV_32F怎麽用?Python cv2.CV_32F使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.CV_32F屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: preprocess_hog
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
#不能保證包括所有省份
示例2: load_frames
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def load_frames(file_path, resize_to=224.0):
# Saved numpy files should be read in with format (time, height, width, channel)
frames = np.load(file_path)
t, h, w, c = frames.shape
# Resize and scale images for the network structure
#TODO: maybe use opencv to normalize the image
#frames = cv.normalize(frames, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
frames_out = []
need_resize = False
if w < resize_to or h < resize_to:
d = resize_to - min(w, h)
sc = 1 + d / min(w, h)
need_resize = True
for i in range(t):
img = frames[i, :, :, :]
if need_resize:
img = cv.resize(img, dsize=(0, 0), fx=sc, fy=sc)
img = (img / 255.) * 2 - 1
frames_out.append(img)
return np.asarray(frames_out, dtype=np.float32)
示例3: preprocess_hog
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
示例4: coherence_filter
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
h, w = img.shape[:2]
for i in xrange(iter_n):
print(i)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
x, y = eigen[:,:,1,0], eigen[:,:,1,1]
gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
m = gvv < 0
ero = cv2.erode(img, None)
dil = cv2.dilate(img, None)
img1 = ero
img1[m] = dil[m]
img = np.uint8(img*(1.0 - blend) + img1*blend)
print('done')
return img
示例5: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def __init__(self, frame, rect):
x1, y1, x2, y2 = rect
w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
self.size = w, h
img = cv2.getRectSubPix(frame, (w, h), (x, y))
self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
g = np.zeros((h, w), np.float32)
g[h//2, w//2] = 1
g = cv2.GaussianBlur(g, (-1, -1), 2.0)
g /= g.max()
self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for i in xrange(128):
a = self.preprocess(rnd_warp(img))
A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True)
self.update_kernel()
self.update(frame)
示例6: preprocess
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def preprocess(image):
# load the image
image = cv2.imread(args["image"])
#resize image
image = cv2.resize(image,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
#convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#calculate x & y gradient
gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1)
gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur the image
blurred = cv2.blur(gradient, (3, 3))
# threshold the image
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return thresh
示例7: preprocess_hog
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
示例8: renderEnvLuminosityNoise
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
"""
render the different environment luminosity
"""
# variate luminosity and color
origin_image_LAB = cv2.cvtColor(
origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
origin_image_LAB[:, :, 0] = origin_image_LAB[:,
:, 0] * (np.random.randn() * noise_var + 1.0)
origin_image_LAB[:, :, 1] = origin_image_LAB[:,
:, 1] * (np.random.randn() * noise_var + 1.0)
origin_image_LAB[:, :, 2] = origin_image_LAB[:,
:, 2] * (np.random.randn() * noise_var + 1.0)
out_image = cv2.cvtColor(
origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
return out_image
示例9: get_mag_avg
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def get_mag_avg(img):
img = np.sqrt(img)
kernels = get_kernels()
mag = np.zeros(img.shape, dtype='float32')
for kernel_filter in kernels:
gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)
mag += cv2.magnitude(gx, gy)
mag /= len(kernels)
return np.uint8(mag)
示例10: get_mag_ang
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def get_mag_ang(img):
"""
Gets image gradient (magnitude) and orientation (angle)
Args:
img
Returns:
Gradient, orientation
"""
img = np.sqrt(img)
gx = cv2.Sobel(np.float32(img), cv2.CV_32F, 1, 0)
gy = cv2.Sobel(np.float32(img), cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
return mag, ang, gx, gy
示例11: get_state
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def get_state(self):
responses1 = self.client.simGetImages([ # depth visualization image
airsim.ImageRequest("1", airsim.ImageType.Scene, False,
False)]) # scene vision image in uncompressed RGBA array
response = responses1[0]
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array
img_rgba = img1d.reshape(response.height, response.width, 3)
img = Image.fromarray(img_rgba)
img_rgb = img.convert('RGB')
self.iter = self.iter+1
state = np.asarray(img_rgb)
state = cv2.resize(state, (self.input_size, self.input_size), cv2.INTER_LINEAR)
state = cv2.normalize(state, state, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
state_rgb = []
state_rgb.append(state[:, :, 0:3])
state_rgb = np.array(state_rgb)
state_rgb = state_rgb.astype('float32')
return state_rgb
示例12: get_init_process_img
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def get_init_process_img(roi_img):
"""
對圖片進行初始化處理,包括,梯度化,高斯模糊,二值化,腐蝕,膨脹和邊緣檢測
:param roi_img: ndarray
:return: ndarray
"""
h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
img = cv2.add(h, v)
img = cv2.convertScaleAbs(img)
img = cv2.GaussianBlur(img, (3, 3), 0)
ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
kernel = np.ones((1, 1), np.uint8)
img = cv2.erode(img, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=2)
img = cv2.erode(img, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=2)
img = auto_canny(img)
return img
示例13: resize_and_contrast
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def resize_and_contrast(in_dir, out_dir, target_size):
check_and_mkdir(out_dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
for subdir, dirs, files in os.walk(in_dir):
for f in files:
file_path = subdir + os.sep + f
if (is_image(f)):
img = cv2.imread(file_path, 0)
resized_img = cv2.resize(img, (target_size, target_size), interpolation = cv2.INTER_CUBIC)
class_dir = out_dir + os.sep + file_path.split("/")[-2]
check_and_mkdir(class_dir)
file_name = class_dir + os.sep + file_path.split("/")[-1]
print(file_name)
norm_image = cv2.normalize(resized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) * 256
# norm_image = clahe.apply(resized_img)
cv2.imwrite(file_name, norm_image)
# count the direct one-step sub directories (which will represent the class name)
示例14: coherence_filter
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
h, w = img.shape[:2]
for i in xrange(iter_n):
print i,
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
x, y = eigen[:,:,1,0], eigen[:,:,1,1]
gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
m = gvv < 0
ero = cv2.erode(img, None)
dil = cv2.dilate(img, None)
img1 = ero
img1[m] = dil[m]
img = np.uint8(img*(1.0 - blend) + img1*blend)
print 'done'
return img
示例15: gradient_and_binary
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_32F [as 別名]
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 將灰度圖二值化,後麵兩個參數調試用
"""
求取梯度,二值化
:param img_blurred: 濾波後的圖片
:param image_name: 圖片名,測試用
:param save_path: 保存路徑,測試用
:return: 二值化後的圖片
"""
gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
img_gradient = cv2.subtract(gradX, gradY)
img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,計算梯度, 也可以用canny算子替代
# 這裏改進成自適應閾值,貌似沒用
img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
# cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 閾值未調整好
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
img_closed = cv2.erode(img_closed, None, iterations=9)
img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蝕膨脹
# 這裏調整了kernel大小(減小),腐蝕膨脹次數後(增大),出錯的概率大幅減小
return img_closed