当前位置: 首页>>代码示例>>Python>>正文


Python cv2.erode方法代码示例

本文整理汇总了Python中cv2.erode方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.erode方法的具体用法?Python cv2.erode怎么用?Python cv2.erode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.erode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: movement

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
开发者ID:PiSimo,项目名称:PiCamNN,代码行数:20,代码来源:picam.py

示例2: _pre_process_input_minimal

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)

        cv2.erode(self._buff_grey, self._erode_kern, dst=self._buff_grey)

        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
开发者ID:gilestrolab,项目名称:ethoscope,代码行数:19,代码来源:single_roi_tracker.py

示例3: SeamlessClone_trimap

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def SeamlessClone_trimap(srcIm,dstIm,imMask,offX,offY):
    dstIm=dstIm.copy()
    bimsk=imMask>0

    new_msk=np.zeros(dstIm.shape[:2],dtype='uint8')
    new_msk[offY:offY+imMask.shape[0],offX:offX+imMask.shape[1]]=imMask

    dstIm[new_msk>0]=srcIm[imMask>0]

    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bimsk=bimsk.astype('uint8')
    bdmsk=cv2.dilate(bimsk,kernel)-cv2.erode(bimsk,kernel)
    mask255=bdmsk>0
    mask255=(mask255*255).astype('uint8')

    offCenter=(int(offX+imMask.shape[1]/2),int(offY+imMask.shape[0]/2))

    if np.any(bdmsk>0):
        outputIm=cv2.seamlessClone(srcIm,dstIm,mask255,offCenter,cv2.MIXED_CLONE)
    else:
        outputIm=dstIm
        #when one object have very few pixels, bdmsk will be totally zero, which will cause segmentation fault.

    return outputIm,new_msk 
开发者ID:yelantingfeng,项目名称:pyLucid,代码行数:26,代码来源:lucidDream.py

示例4: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def __init__(self, fin, scale=1.0, fmask=None):
            self.fin = fin
            # read in distort
            with open(fin, 'r') as f:
                header = f.readline().rstrip()
                chunks = re.sub(r'[^0-9,]', '', header).split(',')
                self.mapu = np.zeros((int(chunks[1]), int(chunks[0])),
                                     dtype=np.float32)
                self.mapv = np.zeros((int(chunks[1]), int(chunks[0])),
                                     dtype=np.float32)
                for line in f.readlines():
                    chunks = line.rstrip().split(' ')
                    self.mapu[int(chunks[0]), int(chunks[1])] = float(chunks[3])
                    self.mapv[int(chunks[0]), int(chunks[1])] = float(chunks[2])
            # generate a mask
            self.mask = np.ones(self.mapu.shape, dtype=np.uint8)
            self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR)
            kernel = np.ones((30, 30), np.uint8)
            self.mask = cv2.erode(self.mask, kernel, iterations=1)
            # crop black regions out
            h, w = self.mask.shape
            self.x_lim = [f(np.where(self.mask[int(h/2), :])[0])
                          for f in [np.min, np.max]]
            self.y_lim = [f(np.where(self.mask[:, int(w/2)])[0])
                          for f in [np.min, np.max]] 
开发者ID:ethz-asl,项目名称:hierarchical_loc,代码行数:27,代码来源:nclt.py

示例5: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def __getitem__(self, idx):
        '''

        :param idx: Index of the image file
        :return: returns the image and corresponding label file.
        '''
        image_name = self.imList[idx]
        label_name = self.labelList[idx]
        image = cv2.imread(image_name)
        label = cv2.imread(label_name, 0)
        label_bool = 255 * ((label > 200).astype(np.uint8))

        if self.transform:
            [image, label] = self.transform(image, label_bool)
        if self.edge:
            np_label = 255 * label.data.numpy().astype(np.uint8)
            kernel = np.ones((self.kernel_size , self.kernel_size ), np.uint8)
            erosion = cv2.erode(np_label, kernel, iterations=1)
            dilation = cv2.dilate(np_label, kernel, iterations=1)
            boundary = dilation - erosion
            edgemap = 255 * torch.ones_like(label)
            edgemap[torch.from_numpy(boundary) > 0] = label[torch.from_numpy(boundary) > 0]
            return (image, label, edgemap)
        else:
            return (image, label) 
开发者ID:clovaai,项目名称:ext_portrait_segmentation,代码行数:27,代码来源:DataSet.py

示例6: coherence_filter

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:26,代码来源:coherence.py

示例7: skeletonize

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def skeletonize(image_in):
    '''Inputs and grayscale image and outputs a binary skeleton image'''
    size = np.size(image_in)
    skel = np.zeros(image_in.shape, np.uint8)

    ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    done = False

    while not done:
        eroded = cv2.erode(image_edit, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(image_edit, temp)
        skel = cv2.bitwise_or(skel, temp)
        image_edit = eroded.copy()

        zeros = size - cv2.countNonZero(image_edit)
        if zeros == size:
            done = True

    return skel 
开发者ID:petern3,项目名称:crop_row_detection,代码行数:23,代码来源:line_detect_2.py

示例8: blend_non_transparent

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def blend_non_transparent(sprite, background_img):
    gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    background_mask = 255 - overlay_mask

    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0)) 
开发者ID:guille0,项目名称:hazymaze,代码行数:18,代码来源:helpers.py

示例9: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def __call__(self, sample):
        alpha = sample['alpha']
        # Adobe 1K
        fg_width = np.random.randint(1, 30)
        bg_width = np.random.randint(1, 30)
        fg_mask = (alpha + 1e-5).astype(np.int).astype(np.uint8)
        bg_mask = (1 - alpha + 1e-5).astype(np.int).astype(np.uint8)
        fg_mask = cv2.erode(fg_mask, self.erosion_kernels[fg_width])
        bg_mask = cv2.erode(bg_mask, self.erosion_kernels[bg_width])

        trimap = np.ones_like(alpha) * 128
        trimap[fg_mask == 1] = 255
        trimap[bg_mask == 1] = 0

        sample['trimap'] = trimap
        return sample 
开发者ID:Yaoyi-Li,项目名称:GCA-Matting,代码行数:18,代码来源:data_generator.py

示例10: get_max_area_contour

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def get_max_area_contour(input_image):
        # Get the contours.
        expected_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(expected_gray, (41, 41), 0)
        thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)
        _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        # Find the biggest area
        try:
            if len(contours) > 0:
                max_area_contour = max(contours, key=cv2.contourArea)
                return max_area_contour
        except ValueError as error:
            print(error) 
开发者ID:GalBrandwine,项目名称:HalloPy,代码行数:18,代码来源:image_comp_tool.py

示例11: blend_non_transparent

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def blend_non_transparent(face_img, overlay_img):
    # Let's find a mask covering all the non-black (foreground) pixels
    # NB: We need to do this on grayscale version of the image
    gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    # Let's shrink and blur it a little to make the transitions smoother...
    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    # And the inverse mask, that covers all the black (background) pixels
    background_mask = 255 - overlay_mask

    # Turn the masks into three channel, so we can use them as weights
    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    # Create a masked out face image, and masked out overlay
    # We convert the images to floating point in range 0.0 - 1.0
    face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    # And finally just add them together, and rescale it back to an 8bit integer image
    return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0)) 
开发者ID:guille0,项目名称:songoku,代码行数:26,代码来源:helpers.py

示例12: check_if_top_is_unreliable

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def check_if_top_is_unreliable(mean_pred, albu_pred):
    unreliable = np.zeros_like(albu_pred)
    rows, cols = unreliable.shape
    unreliable[(albu_pred > 30) & (albu_pred < 210)] = 255
    unreliable = cv2.erode(unreliable, (55, 55), iterations=10)
    unreliable = unreliable[0:rows // 2, ...]
    biggest = biggest_contour(unreliable)
    if biggest is None:
        return None
    if cv2.contourArea(biggest) > 40000:
        x, y, w, h = cv2.boundingRect(biggest)
        x, y, w, h = max(x - 50, 0), y - 50, w + 100, h + 100
        mask = (albu_pred > 55).astype(np.uint8) * 255
        c = biggest_contour(mask[y:y + h, x:x + w])
        c = cv2.convexHull(c)
        mask[y:y + h, x:x + w] = cv2.drawContours(mask[y:y + h, x:x + w], [c], -1, 255, -1)
        result = (mean_pred > 127).astype(np.uint8) * 255
        result[y:y + h, x:x + w] = mask[y:y + h, x:x + w]
        return result
    return None 
开发者ID:asanakoy,项目名称:kaggle_carvana_segmentation,代码行数:22,代码来源:generate_sub_final_ensemble.py

示例13: detect_shirt

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def detect_shirt(self):
        
        
        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)
        
        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
               
        return self.m,cntr 
开发者ID:akash0x53,项目名称:virtual-dressing-room,代码行数:24,代码来源:Tshirt.py

示例14: process

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def process(im_name, bg_name):
    im = cv.imread(fg_path + im_name)
    a = cv.imread(a_path + im_name, 0)
    h, w = im.shape[:2]
    bg = cv.imread(bg_path + bg_name)
    bh, bw = bg.shape[:2]
    wratio = w / bw
    hratio = h / bh
    ratio = wratio if wratio > hratio else hratio
    if ratio > 1:
        bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv.INTER_CUBIC)

    return composite4(im, bg, a, w, h)


# def gen_trimap(alpha):
#     fg = np.array(np.equal(alpha, 255).astype(np.float32))
#     # fg = cv.erode(fg, kernel, iterations=np.random.randint(1, 3))
#     unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
#     unknown = cv.dilate(unknown, kernel, iterations=np.random.randint(1, 20))
#     trimap = fg * 255 + (unknown - fg) * 128
#     trimap = np.clip(trimap, 0, 255.0)
#     return trimap.astype(np.uint8) 
开发者ID:foamliu,项目名称:Mobile-Image-Matting,代码行数:25,代码来源:data_gen.py

示例15: Make_boundary

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import erode [as 别名]
def Make_boundary(self, label, k_size):
        np_label = label.data.numpy().astype(np.uint8)
        target_label = 255 * np_label * (np_label == 1).astype(np.uint8)
        ignore_label = np_label * (np_label == self.ignore_idx).astype(np.uint8)


        kernel = np.ones((k_size, k_size), np.uint8)
        erosion = cv2.erode(target_label, kernel, iterations=1)
        dilation = cv2.dilate(target_label, kernel, iterations=1)
        boundary = dilation - erosion
        edgemap = 255 * torch.ones_like(label)
        edgemap[torch.from_numpy(boundary) > 0] = label[torch.from_numpy(boundary) > 0]
        edgemap[torch.from_numpy(ignore_label)>0] = self.ignore_idx

        return edgemap 
开发者ID:clovaai,项目名称:ext_portrait_segmentation,代码行数:17,代码来源:DataSet.py


注:本文中的cv2.erode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。