当前位置: 首页>>代码示例>>Python>>正文


Python cv2.CV_32F属性代码示例

本文整理汇总了Python中cv2.CV_32F属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.CV_32F属性的具体用法?Python cv2.CV_32F怎么用?Python cv2.CV_32F使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.CV_32F属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: preprocess_hog

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def preprocess_hog(digits):
	samples = []
	for img in digits:
		gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
		gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
		mag, ang = cv2.cartToPolar(gx, gy)
		bin_n = 16
		bin = np.int32(bin_n*ang/(2*np.pi))
		bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
		mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
		hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
		hist = np.hstack(hists)
		
		# transform to Hellinger kernel
		eps = 1e-7
		hist /= hist.sum() + eps
		hist = np.sqrt(hist)
		hist /= norm(hist) + eps
		
		samples.append(hist)
	return np.float32(samples)
#不能保证包括所有省份 
开发者ID:wzh191920,项目名称:License-Plate-Recognition,代码行数:24,代码来源:predict.py

示例2: load_frames

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def load_frames(file_path, resize_to=224.0):
    # Saved numpy files should be read in with format (time, height, width, channel)
    frames = np.load(file_path)
    t, h, w, c = frames.shape

    # Resize and scale images for the network structure
    #TODO: maybe use opencv to normalize the image
    #frames = cv.normalize(frames, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
    frames_out = []
    need_resize = False
    if w < resize_to or h < resize_to:
        d = resize_to - min(w, h)
        sc = 1 + d / min(w, h)
        need_resize = True
    for i in range(t):
        img = frames[i, :, :, :]
        if need_resize:
            img = cv.resize(img, dsize=(0, 0), fx=sc, fy=sc)
        img = (img / 255.) * 2 - 1
        frames_out.append(img)
    return np.asarray(frames_out, dtype=np.float32) 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:23,代码来源:smoke_video_dataset_cp.py

示例3: preprocess_hog

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def preprocess_hog(digits):
    samples = []
    for img in digits:
        gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
        mag, ang = cv2.cartToPolar(gx, gy)
        bin_n = 16
        bin = np.int32(bin_n*ang/(2*np.pi))
        bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
        mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
        hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
        hist = np.hstack(hists)

        # transform to Hellinger kernel
        eps = 1e-7
        hist /= hist.sum() + eps
        hist = np.sqrt(hist)
        hist /= norm(hist) + eps

        samples.append(hist)
    return np.float32(samples) 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:23,代码来源:digits.py

示例4: coherence_filter

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:26,代码来源:coherence.py

示例5: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def __init__(self, frame, rect):
        x1, y1, x2, y2 = rect
        w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
        x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
        self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
        self.size = w, h
        img = cv2.getRectSubPix(frame, (w, h), (x, y))

        self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
        g = np.zeros((h, w), np.float32)
        g[h//2, w//2] = 1
        g = cv2.GaussianBlur(g, (-1, -1), 2.0)
        g /= g.max()

        self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
        self.H1 = np.zeros_like(self.G)
        self.H2 = np.zeros_like(self.G)
        for i in xrange(128):
            a = self.preprocess(rnd_warp(img))
            A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
            self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
            self.H2 += cv2.mulSpectrums(     A, A, 0, conjB=True)
        self.update_kernel()
        self.update(frame) 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:26,代码来源:mosse.py

示例6: preprocess

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def preprocess(image):
	# load the image
	image = cv2.imread(args["image"])

	#resize image
	image = cv2.resize(image,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)

	#convert to grayscale
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

	#calculate x & y gradient
	gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1)
	gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1)

	# subtract the y-gradient from the x-gradient
	gradient = cv2.subtract(gradX, gradY)
	gradient = cv2.convertScaleAbs(gradient)

	# blur the image
	blurred = cv2.blur(gradient, (3, 3))

	# threshold the image
	(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
	thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	return thresh 
开发者ID:pyxploiter,项目名称:Barcode-Detection-and-Decoding,代码行数:27,代码来源:barcodeD&D_zbar.py

示例7: preprocess_hog

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def preprocess_hog(digits):
	samples = []
	for img in digits:
		gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
		gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
		mag, ang = cv2.cartToPolar(gx, gy)
		bin_n = 16
		bin = np.int32(bin_n*ang/(2*np.pi))
		bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
		mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
		hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
		hist = np.hstack(hists)
		
		# transform to Hellinger kernel
		eps = 1e-7
		hist /= hist.sum() + eps
		hist = np.sqrt(hist)
		hist /= norm(hist) + eps
		
		samples.append(hist)
	return np.float32(samples) 
开发者ID:DataXujing,项目名称:vehicle-license-plate-recognition,代码行数:23,代码来源:svm_train.py

示例8: renderEnvLuminosityNoise

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
        """
        render the different environment luminosity
        """
        # variate luminosity and color
        origin_image_LAB = cv2.cvtColor(
            origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
        origin_image_LAB[:, :, 0] = origin_image_LAB[:,
                                                     :, 0] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 1] = origin_image_LAB[:,
                                                     :, 1] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 2] = origin_image_LAB[:,
                                                     :, 2] * (np.random.randn() * noise_var + 1.0)
        out_image = cv2.cvtColor(
            origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
        return out_image 
开发者ID:araffin,项目名称:robotics-rl-srl,代码行数:18,代码来源:omnirobot_simulator_server.py

示例9: get_mag_avg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def get_mag_avg(img):

    img = np.sqrt(img)

    kernels = get_kernels()

    mag = np.zeros(img.shape, dtype='float32')

    for kernel_filter in kernels:

        gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
        gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)

        mag += cv2.magnitude(gx, gy)

    mag /= len(kernels)

    return np.uint8(mag) 
开发者ID:jgrss,项目名称:spfeas,代码行数:20,代码来源:spfunctions.py

示例10: get_mag_ang

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def get_mag_ang(img):

    """
    Gets image gradient (magnitude) and orientation (angle)

    Args:
        img

    Returns:
        Gradient, orientation
    """

    img = np.sqrt(img)

    gx = cv2.Sobel(np.float32(img), cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(np.float32(img), cv2.CV_32F, 0, 1)

    mag, ang = cv2.cartToPolar(gx, gy)

    return mag, ang, gx, gy 
开发者ID:jgrss,项目名称:spfeas,代码行数:22,代码来源:spfunctions.py

示例11: get_state

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def get_state(self):
        responses1 = self.client.simGetImages([  # depth visualization image
            airsim.ImageRequest("1", airsim.ImageType.Scene, False,
                                False)])  # scene vision image in uncompressed RGBA array

        response = responses1[0]
        img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)  # get numpy array
        img_rgba = img1d.reshape(response.height, response.width, 3)
        img = Image.fromarray(img_rgba)
        img_rgb = img.convert('RGB')
        self.iter = self.iter+1
        state = np.asarray(img_rgb)

        state = cv2.resize(state, (self.input_size, self.input_size), cv2.INTER_LINEAR)
        state = cv2.normalize(state, state, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
        state_rgb = []
        state_rgb.append(state[:, :, 0:3])
        state_rgb = np.array(state_rgb)
        state_rgb = state_rgb.astype('float32')

        return state_rgb 
开发者ID:aqeelanwar,项目名称:DRLwithTL,代码行数:23,代码来源:agent.py

示例12: get_init_process_img

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def get_init_process_img(roi_img):
    """
    对图片进行初始化处理,包括,梯度化,高斯模糊,二值化,腐蚀,膨胀和边缘检测
    :param roi_img: ndarray
    :return: ndarray
    """
    h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
    v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
    img = cv2.add(h, v)
    img = cv2.convertScaleAbs(img)
    img = cv2.GaussianBlur(img, (3, 3), 0)
    ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    kernel = np.ones((1, 1), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = auto_canny(img)
    return img 
开发者ID:inuyasha2012,项目名称:answer-sheet-scan,代码行数:21,代码来源:utils.py

示例13: resize_and_contrast

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def resize_and_contrast(in_dir, out_dir, target_size):
    check_and_mkdir(out_dir)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

    for subdir, dirs, files in os.walk(in_dir):
        for f in files:
            file_path = subdir + os.sep + f
            if (is_image(f)):
                img = cv2.imread(file_path, 0)
                resized_img = cv2.resize(img, (target_size, target_size), interpolation = cv2.INTER_CUBIC)
                class_dir = out_dir + os.sep + file_path.split("/")[-2]
                check_and_mkdir(class_dir)

                file_name = class_dir + os.sep + file_path.split("/")[-1]
                print(file_name)

                norm_image = cv2.normalize(resized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) * 256
                # norm_image = clahe.apply(resized_img)
                cv2.imwrite(file_name, norm_image)

# count the direct one-step sub directories (which will represent the class name) 
开发者ID:meliketoy,项目名称:gradcam.pytorch,代码行数:23,代码来源:file_function.py

示例14: coherence_filter

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print i,

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print 'done'
    return img 
开发者ID:fatcloud,项目名称:PyCV-time,代码行数:26,代码来源:coherence.py

示例15: gradient_and_binary

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CV_32F [as 别名]
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'):  # 将灰度图二值化,后面两个参数调试用
    """
    求取梯度,二值化
    :param img_blurred: 滤波后的图片
    :param image_name: 图片名,测试用
    :param save_path: 保存路径,测试用
    :return:  二值化后的图片
    """
    gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
    img_gradient = cv2.subtract(gradX, gradY)
    img_gradient = cv2.convertScaleAbs(img_gradient)  # sobel算子,计算梯度, 也可以用canny算子替代

    # 这里改进成自适应阈值,貌似没用
    img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
    # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh)  # 二值化 阈值未调整好

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
    img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
    img_closed = cv2.erode(img_closed, None, iterations=9)
    img_closed = cv2.dilate(img_closed, None, iterations=9)  # 腐蚀膨胀
    # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小

    return img_closed 
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:27,代码来源:cut_part.py


注:本文中的cv2.CV_32F属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。