當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.blur方法代碼示例

本文整理匯總了Python中cv2.blur方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.blur方法的具體用法?Python cv2.blur怎麽用?Python cv2.blur使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.blur方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: movement

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
開發者ID:PiSimo,項目名稱:PiCamNN,代碼行數:20,代碼來源:picam.py

示例2: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def __init__(self, img, use_numpy_fft=True, gauss_kernel=(5, 5)):
        """Constructor

            This method initializes the saliency algorithm.

            :param img: an RGB input image
            :param use_numpy_fft: flag whether to use NumPy's FFT (True) or
                                  OpenCV's FFT (False)
            :param gauss_kernel: Kernel size for Gaussian blur
        """
        self.use_numpy_fft = use_numpy_fft
        self.gauss_kernel = gauss_kernel
        self.frame_orig = img

        # downsample image for processing
        self.small_shape = (64, 64)
        self.frame_small = cv2.resize(img, self.small_shape[1::-1])

        # whether we need to do the math (True) or it has already
        # been done (False)
        self.need_saliency_map = True 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:23,代碼來源:saliency.py

示例3: merge_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
    face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)

    for group in core.OVERLAY_POINTS:
        cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))

    r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))

    center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))

    if mat_multiple:
        mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
        face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))

    if blur_detail_x and blur_detail_y:
        face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)

    return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE) 
開發者ID:gyp03,項目名稱:yry,代碼行數:20,代碼來源:morpher.py

示例4: blend_non_transparent

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def blend_non_transparent(sprite, background_img):
    gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    background_mask = 255 - overlay_mask

    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0)) 
開發者ID:guille0,項目名稱:hazymaze,代碼行數:18,代碼來源:helpers.py

示例5: sobel

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def sobel(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v,cv2.COLOR_BGR2GRAY)
    x, y = cv2.Sobel(s,cv2.CV_16S,1,0), cv2.Sobel(s,cv2.CV_16S,0,1)
    s = cv2.convertScaleAbs(cv2.subtract(x,y))
    s = cv2.blur(s,(9,9))
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:21,代碼來源:pycv2.py

示例6: pivot_smooth

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def pivot_smooth(img, shape, wd, flags):
    pivot_m1 = img[:, 1280 - wd:1279 + wd, :]
    pivot_l = img[:, :wd, :]
    pivot_r = img[:, 2560 - wd:, :]
    pivot_m2 = np.append(pivot_r, pivot_l, axis=1)
    if flags:
        pivot_m1 = cv2.GaussianBlur(pivot_m1, shape, 0)
        pivot_m2 = cv2.GaussianBlur(pivot_m2, shape, 0)
    else:
        pivot_m1 = cv2.blur(pivot_m1, shape)
        pivot_m2 = cv2.blur(pivot_m2, shape)

    result = np.copy(img)
    result[:, 1280 - wd:1279 + wd, :] = pivot_m1
    result[:, :wd, :] = pivot_m2[:, wd:, :]
    result[:, 2560 - wd:, :] = pivot_m2[:, :wd, :]
    return result 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:19,代碼來源:.demo.py

示例7: threshold_video

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def threshold_video(lower_color, upper_color, blur):


    # Convert BGR to HSV
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

    # hold the HSV image to get only red colors
    mask = cv2.inRange(hsv, lower_color, upper_color)

    # Returns the masked imageBlurs video to smooth out image

    return mask



# Finds the tape targets from the masked image and displays them on original stream + network tales 
開發者ID:team3997,項目名稱:ChickenVision,代碼行數:18,代碼來源:ChickenVision.py

示例8: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def __init__(self, sigma=(0.0, 3.0),
                 seed=None, name=None,
                 random_state="deprecated", deterministic="deprecated"):
        super(GaussianBlur, self).__init__(
            seed=seed, name=name,
            random_state=random_state, deterministic=deterministic)

        self.sigma = iap.handle_continuous_param(
            sigma, "sigma", value_range=(0, None), tuple_to_uniform=True,
            list_to_choice=True)

        # epsilon value to estimate whether sigma is sufficently above 0 to
        # apply the blur
        self.eps = 1e-3

    # Added in 0.4.0. 
開發者ID:aleju,項目名稱:imgaug,代碼行數:18,代碼來源:blur.py

示例9: _augment_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def _augment_images(self, images, random_state, parents, hooks):
        result = images
        nb_images = len(images)
        if self.mode == "single":
            samples = self.k.draw_samples((nb_images,), random_state=random_state)
            samples = (samples, samples)
        else:
            samples = (
                self.k[0].draw_samples((nb_images,), random_state=random_state),
                self.k[1].draw_samples((nb_images,), random_state=random_state),
            )
        for i in range(nb_images):
            kh, kw = samples[0][i], samples[1][i]
            #print(images.shape, result.shape, result[i].shape)
            kernel_impossible = (kh == 0 or kw == 0)
            kernel_does_nothing = (kh == 1 and kw == 1)
            if not kernel_impossible and not kernel_does_nothing:
                image_aug = cv2.blur(result[i], (kh, kw))
                # cv2.blur() removes channel axis for single-channel images
                if image_aug.ndim == 2:
                    image_aug = image_aug[..., np.newaxis]
                result[i] = image_aug
        return result 
開發者ID:liuguiyangnwpu,項目名稱:DL.EyeSight,代碼行數:25,代碼來源:blur.py

示例10: preprocess

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def preprocess(image):
	# load the image
	image = cv2.imread(args["image"])

	#resize image
	image = cv2.resize(image,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)

	#convert to grayscale
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

	#calculate x & y gradient
	gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1)
	gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1)

	# subtract the y-gradient from the x-gradient
	gradient = cv2.subtract(gradX, gradY)
	gradient = cv2.convertScaleAbs(gradient)

	# blur the image
	blurred = cv2.blur(gradient, (3, 3))

	# threshold the image
	(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
	thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	return thresh 
開發者ID:pyxploiter,項目名稱:Barcode-Detection-and-Decoding,代碼行數:27,代碼來源:barcodeD&D_zbar.py

示例11: blend_non_transparent

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def blend_non_transparent(face_img, overlay_img):
    # Let's find a mask covering all the non-black (foreground) pixels
    # NB: We need to do this on grayscale version of the image
    gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    # Let's shrink and blur it a little to make the transitions smoother...
    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    # And the inverse mask, that covers all the black (background) pixels
    background_mask = 255 - overlay_mask

    # Turn the masks into three channel, so we can use them as weights
    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    # Create a masked out face image, and masked out overlay
    # We convert the images to floating point in range 0.0 - 1.0
    face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    # And finally just add them together, and rescale it back to an 8bit integer image
    return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0)) 
開發者ID:guille0,項目名稱:songoku,代碼行數:26,代碼來源:helpers.py

示例12: downsample

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def downsample(img, scale=None, output_wh=None, max_side=None, min_side=None, block_size=None, mode=None):
    if max_side is not None:
        cur_max_side = max(img.shape[:2])
        scale = max_side / cur_max_side
    if min_side is not None:
        cur_min_side = min(img.shape[:2])
        scale = min_side / cur_min_side
    if scale is not None:
        output_wh = (int(np.round(img.shape[1]*scale)),
                     int(np.round(img.shape[0]*scale)))
    if block_size is not None:
        output_wh = (img.shape[1]//block_size, img.shape[0]//block_size)
    else:
        block_size = img.shape[1]//output_wh[0]
    if block_size > 1:
        img = cv2.blur(img, (block_size, block_size))
    return cv2.resize(img, output_wh, interpolation=cv2.INTER_AREA if mode is None else mode) 
開發者ID:kylemcdonald,項目名稱:python-utils,代碼行數:19,代碼來源:imutil.py

示例13: data_augment

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def data_augment(xb,yb):
    if np.random.random() < 0.25:
        xb,yb = rotate(xb,yb,90)
    if np.random.random() < 0.25:
        xb,yb = rotate(xb,yb,180)
    if np.random.random() < 0.25:
        xb,yb = rotate(xb,yb,270)
    if np.random.random() < 0.25:
        xb = cv2.flip(xb, 1)  # flipcode > 0:沿y軸翻轉
        yb = cv2.flip(yb, 1)

    #對原圖像做模糊處理
    if np.random.random() < 0.25:
        xb = random_gamma_transform(xb,1.0)
        
    if np.random.random() < 0.25:
        xb = blur(xb)
    
    if np.random.random() < 0.2:
        xb = add_noise(xb)
        
    return xb,yb

#創建數據 
開發者ID:1044197988,項目名稱:Semantic-segmentation-of-remote-sensing-images,代碼行數:26,代碼來源:生成數據並增強.py

示例14: unwarp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def unwarp(img, bm):
    w,h=img.shape[0],img.shape[1]
    bm = bm.transpose(1, 2).transpose(2, 3).detach().cpu().numpy()[0,:,:,:]
    bm0=cv2.blur(bm[:,:,0],(3,3))
    bm1=cv2.blur(bm[:,:,1],(3,3))
    bm0=cv2.resize(bm0,(h,w))
    bm1=cv2.resize(bm1,(h,w))
    bm=np.stack([bm0,bm1],axis=-1)
    bm=np.expand_dims(bm,0)
    bm=torch.from_numpy(bm).double()

    img = img.astype(float) / 255.0
    img = img.transpose((2, 0, 1))
    img = np.expand_dims(img, 0)
    img = torch.from_numpy(img).double()

    res = F.grid_sample(input=img, grid=bm)
    res = res[0].numpy().transpose((1, 2, 0))

    return res 
開發者ID:cvlab-stonybrook,項目名稱:DewarpNet,代碼行數:22,代碼來源:infer.py

示例15: set_blur_and_threshold

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import blur [as 別名]
def set_blur_and_threshold(self,
                               blur_kernel=0, blur_type="gaussian", blur_passes=1, threshold=0):
        """ Set the internal blur kernel and threshold amount for returned masks

        Parameters
        ----------
        blur_kernel: int, optional
            The kernel size, in pixels to apply gaussian blurring to the mask. Set to 0 for no
            blurring. Should be odd, if an even number is passed in (outside of 0) then it is
            rounded up to the next odd number. Default: 0
        blur_type: ["gaussian", "normalized"], optional
            The blur type to use. ``gaussian`` or ``normalized`` box filter. Default: ``gaussian``
        blur_passes: int, optional
            The number of passed to perform when blurring. Default: 1
        threshold: int, optional
            The threshold amount to minimize/maximize mask values to 0 and 100. Percentage value.
            Default: 0
        """
        logger.trace("blur_kernel: %s, threshold: %s", blur_kernel, threshold)
        if blur_type is not None:
            blur_kernel += 0 if blur_kernel == 0 or blur_kernel % 2 == 1 else 1
            self._blur["kernel"] = blur_kernel
            self._blur["type"] = blur_type
            self._blur["passes"] = blur_passes
        self._threshold = (threshold / 100.0) * 255.0 
開發者ID:deepfakes,項目名稱:faceswap,代碼行數:27,代碼來源:faces_detect.py


注:本文中的cv2.blur方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。