當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.connectedComponents方法代碼示例

本文整理匯總了Python中cv2.connectedComponents方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.connectedComponents方法的具體用法?Python cv2.connectedComponents怎麽用?Python cv2.connectedComponents使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.connectedComponents方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: acquire_weights

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def acquire_weights(UV_weight_npy):
    if os.path.isfile(UV_weight_npy):
        return np.load(UV_weight_npy)
    else:
        mask_name = UV_weight_npy.replace('weights.npy', 'mask.png')
        print(mask_name)
        UV_mask = imread(mask_name)
        if UV_mask.ndim == 3:
            UV_mask = UV_mask[:,:,0]
        ret, labels = connectedComponents(UV_mask, connectivity=4)
        unique, counts = np.unique(labels, return_counts=True)
        print(unique, counts)
        
        UV_weights = np.zeros_like(UV_mask).astype(np.float32)
        for id, count in zip(unique, counts):
            if id == 0:
                continue
            indices = np.argwhere(labels == id)
            UV_weights[indices[:,0], indices[:,1]] = 1 / count
        
        UV_weights *= np.prod(UV_mask.shape)   # adjust loss to [0,10] level.
        np.save(UV_weight_npy, UV_weights)
        return UV_weights 
開發者ID:Lotayou,項目名稱:densebody_pytorch,代碼行數:25,代碼來源:networks.py

示例2: local_max_roll

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def local_max_roll(fm, k0, k1, diff):
    max_ls = []
    for ksize in range(k0, k1):
        selem = disk(ksize)
        fm_max = local_max(fm, selem, diff)
        max_ls.append(fm_max)
    lmax = (np.stack(max_ls, axis=0).sum(axis=0) > 0).astype(np.uint8)
    nlab, max_lab = cv2.connectedComponents(lmax)
    max_res = np.zeros_like(lmax)
    for lb in range(1, nlab):
        area = max_lab == lb
        if np.sum(area) > 1:
            crds = tuple(int(np.median(c)) for c in np.where(area))
            max_res[crds] = 1
        else:
            max_res[np.where(area)] = 1
    return max_res 
開發者ID:DeniseCaiLab,項目名稱:minian,代碼行數:19,代碼來源:initialization.py

示例3: get_target_position_fast

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def get_target_position_fast(self, state, player_pos):
        state_cut = state[:player_pos[0],:,:]
        m1 = (state_cut[:, :, 0] == 245)
        m2 = (state_cut[:, :, 1] == 245)
        m3 = (state_cut[:, :, 2] == 245)
        m = np.uint8(np.float32(m1 * m2 * m3) * 255)
        b1, b2 = cv2.connectedComponents(m)
        for i in range(1, np.max(b2) + 1):
            x, y = np.where(b2 == i)
            if len(x) > 280 and len(x) < 310:
                r_x, r_y = x, y
        h, w = int(r_x.mean()), int(r_y.mean())
        return np.array([h, w]) 
開發者ID:Prinsphield,項目名稱:Wechat_AutoJump,代碼行數:15,代碼來源:nn_play.py

示例4: get_target_position_fast

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def get_target_position_fast(self, state, player_pos):
        state_cut = state[:player_pos[0],:,:]
        m1 = (state_cut[:, :, 0] == 245)
        m2 = (state_cut[:, :, 1] == 245)
        m3 = (state_cut[:, :, 2] == 245)
        m = np.uint8(np.float32(m1 * m2 * m3) * 255)
        b1, b2 = cv2.connectedComponents(m)
        for i in range(1, np.max(b2) + 1):
            x, y = np.where(b2 == i)
            # print('fast', len(x))
            if len(x) > 280 and len(x) < 310:
                r_x, r_y = x, y
        h, w = int(r_x.mean()), int(r_y.mean())
        return np.array([h, w]) 
開發者ID:Prinsphield,項目名稱:Wechat_AutoJump,代碼行數:16,代碼來源:play.py

示例5: opencv_segmentation

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def opencv_segmentation(mask, kernel=k_3x3, k=3):
    # noise removal
    opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel, iterations=k)

    # sure background area
    sure_bg = cv.dilate(opening, kernel, iterations=k)

    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opening,cv.DIST_L2, 5)
    ret, sure_fg = cv.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown > 0] = 0

    labels_ws = cv.watershed(cv.cvtColor(mask, cv.COLOR_GRAY2RGB), markers)

    if labels_ws.max() - 1 < 2:
        return [mask], labels_ws

    res_masks = []
    for idx in range(2,  labels_ws.max() + 1):
        m = labels_ws == idx
        if m.sum() > 5:
            m = cv.dilate(m.astype(np.uint8), kernel, iterations=1)
            res_masks.append(m)
    return res_masks, labels_ws 
開發者ID:gangadhar-p,項目名稱:NucleiDetectron,代碼行數:38,代碼來源:mask_morphology.py

示例6: get_df

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def get_df():
    ###################################################################
    data_dir = '/home/work/dsb/ext/TNBC_NucleiSegmentation'
    #correct directory address accordingly
    ######################################################################3
    assert os.path.exists(data_dir)
    image_folders = glob.glob(os.path.join(data_dir, 'Slide*'))
    image_folders = sorted(image_folders)
    df = []
    for image_folder in image_folders:
        image_fls = os.listdir(image_folder)        
        image_fls = sorted(image_fls)
        for image_fl in image_fls:
            filepath = os.path.join(image_folder, image_fl)
            image = cv2.imread(filepath)
            mask_path = filepath.replace('Slide', 'GT')
            mask_unet = cv2.imread(mask_path, 0)
            assert len(np.unique(mask_unet))==2
            _, mask = cv2.connectedComponents(mask_unet, connectivity=4)
            df.append({'image':image, 'mask':mask,
                       'image_path':filepath, 'mask_path': mask_path,
                       'id':image_fl[:-4], 'nb_instance':mask.max(),
                       'shape':image.shape})

    df = pd.DataFrame(df)
    save_to_cache(df, 'TNBC') 
開發者ID:jacobkie,項目名稱:2018DSB,代碼行數:28,代碼來源:TNBC.py

示例7: boundary2mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def boundary2mask(boundary):
    assert len(np.unique(boundary))==2
    boundary_inverse = boundary.max()-boundary
    _, mask = cv2.connectedComponents(boundary_inverse, connectivity=4)
    vals, counts = np.unique(np.hstack([mask[0], mask[-1], mask[:,0], mask[:,-1]]),
                         return_counts = True)
    bg_label = vals[np.argmax(counts)]
    mask[mask==bg_label]=0
    mask, _ = renumber_mask(mask)
    fill_boundary(mask, boundary>0)
    return mask 
開發者ID:jacobkie,項目名稱:2018DSB,代碼行數:13,代碼來源:2009isbi.py

示例8: check_sanity

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def check_sanity(mask, boundary, folder, fl):    
    _, rest = cv2.connectedComponents(((boundary>0)&(mask==0)).astype('uint8'))
    vals, counts = np.unique(rest[rest>0], return_counts=True)
    vals = vals[counts>4]
    counts = counts[counts>4]

    if len(vals):
        plt.imsave(os.path.join('/home/work/dsb/ext/2009isbi/unclosed_boundary',
                            '{}_{}'.format(folder, fl)), (rest>0).astype('uint8')*80)
    return mask 
開發者ID:jacobkie,項目名稱:2018DSB,代碼行數:12,代碼來源:2009isbi.py

示例9: watershed

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def watershed(rgb, idx, mask):
  '''
    Get watershed transform from image
  '''

  # kernel definition
  kernel = np.ones((3, 3), np.uint8)

  # sure background area
  sure_bg = cv2.dilate(mask, kernel)
  sure_bg = np.uint8(sure_bg)
  # util.im_gray_plt(sure_bg,"sure back")

  # Finding sure foreground area
  dist_transform = cv2.distanceTransform(np.uint8(mask), cv2.DIST_L2, 3)
  # util.im_gray_plt(dist_transform,"dist transform")
  ret, sure_fg = cv2.threshold(
      dist_transform, 0.5 * dist_transform.max(), 255, 0)

  # Finding unknown region
  sure_fg = np.uint8(sure_fg)
  # util.im_gray_plt(sure_fg,"sure fore")

  unknown = cv2.subtract(sure_bg, sure_fg)
  # util.im_gray_plt(unknown,"unknown")

  # marker labelling
  ret, markers = cv2.connectedComponents(sure_fg)

  # add one to all labels so that sure background is not 0, but 1
  markers = markers + 1

  # mark the region of unknown with zero
  markers[unknown == 255] = 0

  # util.im_gray_plt(np.uint8(markers),"markers")

  # apply watershed
  markers = cv2.watershed(rgb, markers)

  # create limit mask
  mask = np.zeros(mask.shape, np.uint8)
  mask[markers == -1] = 255

  return mask 
開發者ID:PRBonn,項目名稱:bonnet,代碼行數:47,代碼來源:plant_features.py

示例10: panoptic_merge

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def panoptic_merge(self, insts, segms, dets):
        '''
        insts : [N, 28, 28]
        segms : [H, W]
        dets : [N, 6]

        return : category_id, id
        Note : for stuff : category_id == id
               for thing : category_id == id // 1000
        '''
        panoptic = np.zeros(segms.shape + (3,), dtype=np.uint16)
        unique_cls = np.unique(segms)
        stuff = np.zeros_like(segms)
        for _cls in unique_cls:
            if _cls in self._stuff_inst_mapping:
                stuff[segms == _cls] = 255
            else:
                stuff[segms == _cls] = self._stuff_mapping[_cls]
        panoptic[:, :, 2] = stuff

        # Merge Thing
        for _cls in self._inst_mapping:
            sdet = dets[dets[:,-1] == _cls]
            sinst = insts[dets[:,-1] == _cls]
            inst_id = 0
            for i, inst in enumerate(sinst):
                score = sdet[i, -2]
                if score >= self._panoptic_score_thresh:
                    inst_map = panoptic[:, :, 1]
                    valid_area = (inst_map == 0) & (inst == 1)
                    if np.count_nonzero(valid_area) > self._min_thing_area:
                        thing_cls = self._inst_mapping[_cls]
                        panoptic[:, :, 1][valid_area] = thing_cls * 1000 + inst_id
                        panoptic[:, :, 2][valid_area] = thing_cls * 1000 + inst_id
                        inst_id += 1

        # Merge Stuff
        stuff_map = panoptic[:, :, 1] == 0
        stuff_cls = np.unique(panoptic[:, :, 2][stuff_map])
        for _cls in stuff_cls:
            if _cls >= 0:
                stuff_seg = (panoptic[:, :, 2] == _cls).astype(np.uint8)
                num, componets = cv2.connectedComponents(stuff_seg)
                for i in range(num):
                    if i > 0:
                        com_map = componets == i
                        if np.count_nonzero(com_map) <= self._min_stuff_area:
                            panoptic[:, :, 2][com_map] = 255

        # Convert 255 to Unlabeled
        panoptic[panoptic == 255] = 0
        return panoptic

    # pylint: disable=arguments-differ, unused-argument 
開發者ID:Angzz,項目名稱:panoptic-fpn-gluon,代碼行數:56,代碼來源:citys_panoptic.py

示例11: pse

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def pse(kernals, min_area):
    kernal_num = len(kernals)
    pred = np.zeros(kernals[0].shape, dtype='int32')
    
    label_num, label = cv2.connectedComponents(kernals[kernal_num - 1], connectivity=4)
    
    for label_idx in range(1, label_num):
        if np.sum(label == label_idx) < min_area:
            label[label == label_idx] = 0

    q1 = queue.Queue(maxsize = 0)
    next_q = queue.Queue(maxsize = 0)
    points = np.array(np.where(label > 0)).transpose((1, 0))
    
    for point_idx in range(points.shape[0]):
        x, y = points[point_idx, 0], points[point_idx, 1]
        l = label[x, y]
        q1.put((x, y, l))
        pred[x, y] = l

    dx = [-1, 1, 0, 0]
    dy = [0, 0, -1, 1]
    for kernal_idx in range(kernal_num - 2, -1, -1):
        kernal = kernals[kernal_idx].copy()
        while not q1.empty():
            (x, y, l) = q1.get()

            is_edge = True
            for j in range(4):
                tmpx = x + dx[j]
                tmpy = y + dy[j]
                if tmpx < 0 or tmpx >= kernal.shape[0] or tmpy < 0 or tmpy >= kernal.shape[1]:
                    continue
                if kernal[tmpx, tmpy] == 0 or pred[tmpx, tmpy] > 0:
                    continue

                q1.put((tmpx, tmpy, l))
                pred[tmpx, tmpy] = l
                is_edge = False
            if is_edge:
                next_q.put((x, y, l))
        
        # kernal[pred > 0] = 0
        q1, next_q = next_q, q1
        
        # points = np.array(np.where(pred > 0)).transpose((1, 0))
        # for point_idx in range(points.shape[0]):
        #     x, y = points[point_idx, 0], points[point_idx, 1]
        #     l = pred[x, y]
        #     queue.put((x, y, l))

    return pred 
開發者ID:rahzaazhar,項目名稱:PAN-PSEnet,代碼行數:54,代碼來源:pypse.py

示例12: watershed

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def watershed(src):
    # Change color to gray scale
    gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

    # Use the Otsu's binarization
    thresh,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    # print(thresh)  # print threshold

    # Noise removal
    kernel = np.ones((3,3), np.uint8)
    opening = cv2.morphologyEx(bin_img,cv2.MORPH_OPEN,kernel,iterations = 2)

    # Sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    # Apply watershed
    markers = cv2.watershed(src,markers)
    src[markers == -1] = [255,0,0]

    # Check marker (If check markers, please import matplotlib)
    # plt.imshow(markers)
    # plt.show()

    # Check markers data
    # print(np.unique(markers,return_counts=True))

    return markers, src 
開發者ID:karaage0703,項目名稱:python-image-processing,代碼行數:46,代碼來源:watershed.py

示例13: watershed

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import connectedComponents [as 別名]
def watershed(src):
    """
    Performs a marker-based image segmentation using the watershed algorithm.
    :param src: 8-bit 1-channel image.
    :return: 32-bit single-channel image (map) of markers.
    """
    # cv2.imwrite('{}.png'.format(np.random.randint(1000)), src)
    gray = src.copy()
    img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
    # h, w = gray.shape[:2]
    # block_size = (min(h, w) // 4 + 1) * 2 + 1
    # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0)
    _ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    # dist_transform = opening & gray
    # cv2.imshow('dist_transform', dist_transform)
    # _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV)
    _ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY)

    # Finding unknown region
    # sure_bg = np.uint8(sure_bg)
    sure_fg = np.uint8(sure_fg)
    # cv2.imshow('sure_fg', sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker label
    lingret, marker_map = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    marker_map = marker_map + 1

    # Now, mark the region of unknown with zero
    marker_map[unknown == 255] = 0

    marker_map = cv2.watershed(img, marker_map)

    return marker_map 
開發者ID:RubanSeven,項目名稱:CRAFT_keras,代碼行數:47,代碼來源:fake_util.py


注:本文中的cv2.connectedComponents方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。