当前位置: 首页>>代码示例>>Python>>正文


Python cv2.dilate方法代码示例

本文整理汇总了Python中cv2.dilate方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.dilate方法的具体用法?Python cv2.dilate怎么用?Python cv2.dilate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.dilate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prediction

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
开发者ID:cristianpb,项目名称:object-detection,代码行数:19,代码来源:motion.py

示例2: movement

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
开发者ID:PiSimo,项目名称:PiCamNN,代码行数:20,代码来源:picam.py

示例3: find_squares

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:22,代码来源:squares.py

示例4: find_components

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def find_components(im, max_components=16):
    """Dilate the image until there are just a few connected components.
    Returns contours for these components."""
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    dilation = dilate(im, kernel, 6)

    count = 21
    n = 0
    sigma = 0.000

    while count > max_components:
        n += 1
        sigma += 0.005
        result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(result) == 3:
            _, contours, hierarchy = result
        elif len(result) == 2:
            contours, hierarchy = result
        possible = find_likely_rectangles(contours, sigma)
        count = len(possible)

    return (dilation, possible, n) 
开发者ID:jlsutherland,项目名称:doc2text,代码行数:24,代码来源:page.py

示例5: make_edge_smooth

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def make_edge_smooth(dataset_name, img_size) :
    check_folder('./dataset/{}/{}'.format(dataset_name, 'trainB_smooth'))

    file_list = glob('./dataset/{}/{}/*.*'.format(dataset_name, 'trainB'))
    save_dir = './dataset/{}/trainB_smooth'.format(dataset_name)

    kernel_size = 5
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    gauss = cv2.getGaussianKernel(kernel_size, 0)
    gauss = gauss * gauss.transpose(1, 0)

    for f in tqdm(file_list) :
        file_name = os.path.basename(f)

        bgr_img = cv2.imread(f)
        gray_img = cv2.imread(f, 0)

        bgr_img = cv2.resize(bgr_img, (img_size, img_size))
        pad_img = np.pad(bgr_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')
        gray_img = cv2.resize(gray_img, (img_size, img_size))

        edges = cv2.Canny(gray_img, 100, 200)
        dilation = cv2.dilate(edges, kernel)

        gauss_img = np.copy(bgr_img)
        idx = np.where(dilation != 0)
        for i in range(np.sum(dilation != 0)):
            gauss_img[idx[0][i], idx[1][i], 0] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
            gauss_img[idx[0][i], idx[1][i], 1] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
            gauss_img[idx[0][i], idx[1][i], 2] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))

        cv2.imwrite(os.path.join(save_dir, file_name), gauss_img) 
开发者ID:taki0112,项目名称:CartoonGAN-Tensorflow,代码行数:37,代码来源:edge_smooth.py

示例6: spline_transform_multi

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def spline_transform_multi(img, mask):
    bimask=mask>0
    M,N=np.where(bimask)
    w=np.ptp(N)+1
    h=np.ptp(M)+1
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask
    y,x=np.where(bound>0)

    if x.size>4:
        newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5)

        new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR)
        new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST)
    elif x.size>0:
        new_img=img
        new_msk=mask
    return new_img,new_msk 
开发者ID:yelantingfeng,项目名称:pyLucid,代码行数:20,代码来源:lucidDream.py

示例7: SeamlessClone_trimap

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def SeamlessClone_trimap(srcIm,dstIm,imMask,offX,offY):
    dstIm=dstIm.copy()
    bimsk=imMask>0

    new_msk=np.zeros(dstIm.shape[:2],dtype='uint8')
    new_msk[offY:offY+imMask.shape[0],offX:offX+imMask.shape[1]]=imMask

    dstIm[new_msk>0]=srcIm[imMask>0]

    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bimsk=bimsk.astype('uint8')
    bdmsk=cv2.dilate(bimsk,kernel)-cv2.erode(bimsk,kernel)
    mask255=bdmsk>0
    mask255=(mask255*255).astype('uint8')

    offCenter=(int(offX+imMask.shape[1]/2),int(offY+imMask.shape[0]/2))

    if np.any(bdmsk>0):
        outputIm=cv2.seamlessClone(srcIm,dstIm,mask255,offCenter,cv2.MIXED_CLONE)
    else:
        outputIm=dstIm
        #when one object have very few pixels, bdmsk will be totally zero, which will cause segmentation fault.

    return outputIm,new_msk 
开发者ID:yelantingfeng,项目名称:pyLucid,代码行数:26,代码来源:lucidDream.py

示例8: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def __getitem__(self, idx):
        '''

        :param idx: Index of the image file
        :return: returns the image and corresponding label file.
        '''
        image_name = self.imList[idx]
        label_name = self.labelList[idx]
        image = cv2.imread(image_name)
        label = cv2.imread(label_name, 0)
        label_bool = 255 * ((label > 200).astype(np.uint8))

        if self.transform:
            [image, label] = self.transform(image, label_bool)
        if self.edge:
            np_label = 255 * label.data.numpy().astype(np.uint8)
            kernel = np.ones((self.kernel_size , self.kernel_size ), np.uint8)
            erosion = cv2.erode(np_label, kernel, iterations=1)
            dilation = cv2.dilate(np_label, kernel, iterations=1)
            boundary = dilation - erosion
            edgemap = 255 * torch.ones_like(label)
            edgemap[torch.from_numpy(boundary) > 0] = label[torch.from_numpy(boundary) > 0]
            return (image, label, edgemap)
        else:
            return (image, label) 
开发者ID:clovaai,项目名称:ext_portrait_segmentation,代码行数:27,代码来源:DataSet.py

示例9: process_images

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def process_images(self, clean, mask):
        i, j, h, w = RandomResizedCrop.get_params(clean, scale=(0.5, 2.0), ratio=(3. / 4., 4. / 3.))
        clean_img = resized_crop(clean, i, j, h, w, size=self.img_size, interpolation=Image.BICUBIC)
        mask = resized_crop(mask, i, j, h, w, self.img_size, interpolation=Image.BICUBIC)

        # get mask before further image augment
        # mask = self.get_mask(raw_img, clean_img)

        if self.add_random_masks:
            mask = random_masks(mask.copy(), size=self.img_size[0], offset=10)
        mask = np.where(np.array(mask) > brightness_difference * 255, np.uint8(255), np.uint8(0))
        mask = cv2.dilate(mask, np.ones((10, 10), np.uint8), iterations=1)

        mask = np.expand_dims(mask, -1)
        mask_t = to_tensor(mask)
        # mask_t = (mask_t > brightness_difference).float()

        # mask_t, _ = torch.max(mask_t, dim=0, keepdim=True)
        binary_mask = (1 - mask_t)  # valid positions are 1; holes are 0
        binary_mask = binary_mask.expand(3, -1, -1)
        clean_img = self.transformer(clean_img)
        corrupted_img = clean_img * binary_mask
        return corrupted_img, binary_mask, clean_img 
开发者ID:yu45020,项目名称:Text_Segmentation_Image_Inpainting,代码行数:25,代码来源:Dataloader.py

示例10: coherence_filter

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:26,代码来源:coherence.py

示例11: skeletonize

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def skeletonize(image_in):
    '''Inputs and grayscale image and outputs a binary skeleton image'''
    size = np.size(image_in)
    skel = np.zeros(image_in.shape, np.uint8)

    ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    done = False

    while not done:
        eroded = cv2.erode(image_edit, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(image_edit, temp)
        skel = cv2.bitwise_or(skel, temp)
        image_edit = eroded.copy()

        zeros = size - cv2.countNonZero(image_edit)
        if zeros == size:
            done = True

    return skel 
开发者ID:petern3,项目名称:crop_row_detection,代码行数:23,代码来源:line_detect_2.py

示例12: pre_process_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def pre_process_image(img, skip_dilate=False):
	"""Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image."""

	# Gaussian blur with a kernal size (height, width) of 9.
	# Note that kernal sizes must be positive and odd and the kernel must be square.
	proc = cv2.GaussianBlur(img.copy(), (9, 9), 0)

	# Adaptive threshold using 11 nearest neighbour pixels
	proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)

	# Invert colours, so gridlines have non-zero pixel values.
	# Necessary to dilate the image, otherwise will look like erosion instead.
	proc = cv2.bitwise_not(proc, proc)

	if not skip_dilate:
		# Dilate the image to increase the size of the grid lines.
		kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8)
		proc = cv2.dilate(proc, kernel)

	return proc 
开发者ID:aakashjhawar,项目名称:SolveSudoku,代码行数:22,代码来源:SudokuExtractor.py

示例13: generate_edge

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def generate_edge(label, edge_width=10, area_thrs=200):
    label_list = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
    edge = np.zeros_like(label, dtype=np.uint8)
    for i in np.unique(label):
        # have no instance
        if i < 1000 or (i // 1000) not in label_list:
            continue
        
        # filter out small objects
        mask = (label == i).astype(np.uint8)
        if mask.sum() < area_thrs:
            continue
        
        rmin, rmax, cmin, cmax = _get_bbox(mask)
        mask_edge = _generate_edge(mask[rmin:rmax+1, cmin:cmax+1])
        edge[rmin:rmax+1, cmin:cmax+1][mask_edge > 0] = 255
    
    # dilation on edge
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
    edge = cv2.dilate(edge, kernel)
    return edge 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:23,代码来源:instance_edge_generator.py

示例14: find_components

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def find_components(edges, max_components=16):
    """Dilate the image until there are just a few connected components.

    Returns contours for these components."""
    # Perform increasingly aggressive dilation until there are just a few
    # connected components.
    count = 21
    dilation = 5
    n = 1
    while count > 16:
        n += 1
        dilated_image = dilate(edges, N=3, iterations=n)
        contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        count = len(contours)
    #print dilation
    #Image.fromarray(edges).show()
    #Image.fromarray(255 * dilated_image).show()
    return contours 
开发者ID:danvk,项目名称:oldnyc,代码行数:20,代码来源:crop_morphology.py

示例15: findPiccircle

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import dilate [as 别名]
def findPiccircle(frame, color):

	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)	
	color_dict = color_list.getColorList()
	mask = cv2.inRange(hsv, color_dict[color][0], color_dict[color][1])
	dilated = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=2)
	## 需要修改minRadius以及maxRadius,用来限制识别圆的大小,排除其他的干扰
	circles = cv2.HoughCircles(dilated, cv2.HOUGH_GRADIENT, 1, 1000, param1=15, param2=10, minRadius=15, maxRadius=50)
	
	center = None
	if circles is not None:
		x, y, radius = circles[0][0]
		center = (x, y)
		cv2.circle(frame, center, radius, (0, 255, 0), 2)
		cv2.circle(frame, center, 2, (0,255,0), -1, 8, 0 );
		print('圆心:{}, {}'.format(x, y))
		
	cv2.imshow('result', frame)	
	
	if center != None:
		return center 
开发者ID:yzy1996,项目名称:Python-Code,代码行数:23,代码来源:detect_picture_color_circle.py


注:本文中的cv2.dilate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。