当前位置: 首页>>代码示例>>Python>>正文


Python cv2.connectedComponentsWithStats方法代码示例

本文整理汇总了Python中cv2.connectedComponentsWithStats方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.connectedComponentsWithStats方法的具体用法?Python cv2.connectedComponentsWithStats怎么用?Python cv2.connectedComponentsWithStats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.connectedComponentsWithStats方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: remove_small_objects

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def remove_small_objects(img, min_size=150):
        # find all your connected components (white blobs in your image)
        nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
        # connectedComponentswithStats yields every seperated component with information on each of them, such as size
        # the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
        sizes = stats[1:, -1]
        nb_components = nb_components - 1

        # your answer image
        img2 = img
        # for every component in the image, you keep it only if it's above min_size
        for i in range(0, nb_components):
            if sizes[i] < min_size:
                img2[output == i + 1] = 0

        return img2 
开发者ID:YoongiKim,项目名称:Walk-Assistant,代码行数:18,代码来源:filter.py

示例2: refine_sil

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def refine_sil(sil, min_pixel):
    if len(sil.shape)==3:
        sil = sil[:,:,0]
        c3 = True
    else:
        c3 = False
    sil[sil>0] = 255
    
    nb_components, output, stats, centroids = \
            cv2.connectedComponentsWithStats(sil, connectivity = 8)
    
    sizes = stats[1:, -1]; nb_components = nb_components - 1

    refined_sil = np.zeros((output.shape))
    #for every component in the image, you keep it only if it's above min_size
    for i in range(0, nb_components):
        if sizes[i] >= min_pixel:
            refined_sil[output == i + 1] = 255
            
    if c3 is True:
        refined_sil = np.stack((refined_sil,)*3, -1)
    return refined_sil
  
    

# subdivide mesh to 4 times faces 
开发者ID:zhuhao-nju,项目名称:hmd,代码行数:28,代码来源:utility.py

示例3: _connect_components_analysis

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def _connect_components_analysis(image):
    """
    connect components analysis to remove the small components
    :param image:
    :return:
    """
    if len(image.shape) == 3:
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray_image = image

    return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S) 
开发者ID:MaybeShewill-CV,项目名称:lanenet-lane-detection,代码行数:14,代码来源:lanenet_postprocess.py

示例4: removeSmallComponents

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def removeSmallComponents(image, threshold):
    #find all your connected components (white blobs in your image)
    nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
    sizes = stats[1:, -1]; nb_components = nb_components - 1

    img2 = np.zeros((output.shape),dtype = np.uint8)
    #for every component in the image, you keep it only if it's above threshold
    for i in range(0, nb_components):
        if sizes[i] >= threshold:
            img2[output == i + 1] = 255
    return img2 
开发者ID:hoanglehaithanh,项目名称:Traffic-Sign-Detection,代码行数:13,代码来源:main.py

示例5: find_connected

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def find_connected(score_map, threshold=0.7):
    binary_map = (score_map > threshold).astype(np.uint8)
    connectivity = 8
    output = cv2.connectedComponentsWithStats(binary_map, connectivity=connectivity, ltype=cv2.CV_32S)
    label_map = output[1]
    # show_image(np.asarray(label_map * 100.0, np.uint8))
    return np.max(label_map), label_map 
开发者ID:UpCoder,项目名称:ICPR_TextDection,代码行数:9,代码来源:tools.py

示例6: _connect_components_analysis

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def _connect_components_analysis(image):
        """

        :param image:
        :return:
        """
        if len(image.shape) == 3:
            gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray_image = image

        return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S) 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:14,代码来源:lanenet_postprocess.py

示例7: split_mask_erode_dilate

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def split_mask_erode_dilate(mask, kernel=k_3x3, k=3):
    img_erosion = cv.erode(mask, kernel, iterations=k)
    output = cv.connectedComponentsWithStats(img_erosion, 4, cv.CV_32S)
    if output[0] < 2:
        return [mask], output[1]
    else:
        masks_res = []
        for idx in range(1, output[0]):
            res_m = (output[1] == idx).astype(np.uint8)
            res_m = cv.dilate(res_m, kernel, iterations=k)
            if res_m.sum() > 5:
                masks_res.append(res_m)
        return masks_res, output[1] 
开发者ID:gangadhar-p,项目名称:NucleiDetectron,代码行数:15,代码来源:mask_morphology.py

示例8: get_mean_cell_size

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def get_mean_cell_size(mask_contours):
    nuclei_sizes = []
    for mask_contour in mask_contours: 
        mask = mask_contour[:,:,0]
        contour = mask_contour[:,:,1]
        new_mask = (mask*255).astype(np.uint8)
        new_contour = (contour*255).astype(np.uint8)
        true_foreground = cv2.subtract(new_mask, new_contour)
        output = cv2.connectedComponentsWithStats(true_foreground)
        nuclei_sizes.append(np.mean(output[2][1:,cv2.CC_STAT_AREA]))
    return nuclei_sizes 
开发者ID:nicolefinnie,项目名称:kaggle-dsb2018,代码行数:13,代码来源:image_resize.py

示例9: obj_histogram

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def obj_histogram(self, mask, label):
    # holders for predicted object and right object (easily calculate histogram)
    predicted = []
    labeled = []

    # get connected components in label for each class
    for i in range(self.num_classes):
      # get binary image for this class
      bin_lbl = np.zeros(label.shape)
      bin_lbl[label == i] = 1
      bin_lbl[label != i] = 0

      # util.im_gray_plt(bin_lbl,'class '+str(i))
      connectivity = 4
      output = cv2.connectedComponentsWithStats(
          bin_lbl.astype(np.uint8), connectivity, cv2.CV_32S)
      num_components = output[0]
      components = output[1]
      stats = output[2]
      centroids = output[3]

      for j in range(1, num_components):  # 0 is background (useless)
        # only process if it has more than 50pix
        if stats[j][cv2.CC_STAT_AREA] > 50:
          # for each component in each class, see the class with the highest percentage of pixels
          # make mask with just this component of this class
          comp_mask = np.zeros(label.shape)
          comp_mask[components == j] = 0
          comp_mask[components != j] = 1
          # mask the prediction
          masked_prediction = np.ma.masked_array(mask, mask=comp_mask)
          # get histogram and get the argmax that is not zero
          class_hist, _ = np.histogram(masked_prediction.compressed(),
                                       bins=self.num_classes, range=[0, self.num_classes])
          max_class = np.argmax(class_hist)
          # print("\nMax class: ",max_class,"  real: ",i)
          # util.im_gray_plt(comp_mask)
          # util.im_block()
          # sum an entry to the containers depending on right or wrong
          predicted.append(max_class)
          labeled.append(i)
    # for idx in range(len(predicted)):
    #   print(predicted[idx],labeled[idx])

    # histogram to count right and wrong objects
    histrange = np.array([[-0.5, self.num_classes - 0.5],
                          [-0.5, self.num_classes - 0.5]], dtype='float64')
    h_now, _, _ = np.histogram2d(np.array(predicted),
                                 np.array(labeled),
                                 bins=self.num_classes,
                                 range=histrange)

    return h_now 
开发者ID:PRBonn,项目名称:bonnet,代码行数:55,代码来源:abstract_net.py

示例10: train_gen

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def train_gen(self, number_of_objects, number_of_trees):
        """
        Generates cluster programs to be drawn in one image.
        :param number_of_objects: Total number of objects to draw in one image
        :param number_of_trees: total number of cluster to draw in one image
        :return:
        """
        num_objs = 0
        programs = []
        while num_objs < number_of_objects:
            index = np.random.choice(len(self.train_substrings))
            if num_objs + len(self.train_substrings[index].keys()) > number_of_objects:
                required_indices = sorted(self.train_substrings[index].keys())[0:number_of_objects - num_objs]
                cluster = {}
                for r in required_indices:
                    p = self.train_substrings[index][r]
                    image = image_from_expressions([p,], stack_size=9, canvas_shape=[64, 64])

                    # Makes sure that the object created doesn't have disjoint parts,
                    # don't include the program, because it makes the analysis difficult.
                    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
                        np.array(image[0], dtype=np.uint8))
                    if nlabels > 2:
                        continue
                    cluster[r] = self.train_substrings[index][r]
                if cluster:
                    programs.append(cluster)
                    num_objs += len(cluster.keys())
                num_objs += len(cluster.keys())
            else:
                cluster = {}
                for k, p in self.train_substrings[index].items():
                    image = image_from_expressions([p], stack_size=9, canvas_shape=[64, 64])
                    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
                        np.array(image[0], dtype=np.uint8))
                    if nlabels > 2:
                        continue
                    cluster[k] = p
                if cluster:
                    programs.append(cluster)
                    num_objs += len(cluster.keys())
        return programs 
开发者ID:Hippogriff,项目名称:CSGNet,代码行数:44,代码来源:Grouping.py

示例11: thresh

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def thresh(img, conservative=0, min_blob_size=50):
  '''
    Get threshold to make mask using the otsus method, and apply a correction
    passed in conservative (-100;100) as a percentage of th.
  '''

  # blur and get level using otsus
  blur = cv2.GaussianBlur(img, (13, 13), 0)
  level, _ = cv2.threshold(
      blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)

  # print("Otsus Level: ",level)

  # change with conservative
  level += conservative / 100.0 * level

  # check boundaries
  level = 255 if level > 255 else level
  level = 0 if level < 0 else level

  # mask image
  _, mask = cv2.threshold(blur, level, 255, cv2.THRESH_BINARY)

  # morph operators
  kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
  mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
  mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

  # remove small connected blobs
  # find connected components
  n_components, output, stats, centroids = cv2.connectedComponentsWithStats(
      mask, connectivity=8)
  # remove background class
  sizes = stats[1:, -1]
  n_components = n_components - 1

  # remove blobs
  mask_clean = np.zeros((output.shape))
  # for every component in the image, keep it only if it's above min_blob_size
  for i in range(0, n_components):
    if sizes[i] >= min_blob_size:
      mask_clean[output == i + 1] = 255

  return mask_clean 
开发者ID:PRBonn,项目名称:bonnet,代码行数:46,代码来源:plant_features.py

示例12: getDetBoxes_core

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text):
    # prepare data
    linkmap = linkmap.copy()
    textmap = textmap.copy()
    img_h, img_w = textmap.shape

    """ labeling method """
    ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
    ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)

    text_score_comb = np.clip(text_score + link_score, 0, 1)
    nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8), connectivity=4)

    det = []
    mapper = []
    for k in range(1,nLabels):
        # size filtering
        size = stats[k, cv2.CC_STAT_AREA]
        if size < 10: continue

        # thresholding
        if np.max(textmap[labels==k]) < text_threshold: continue

        # make segmentation map
        segmap = np.zeros(textmap.shape, dtype=np.uint8)
        segmap[labels==k] = 255
        segmap[np.logical_and(link_score==1, text_score==0)] = 0   # remove link area
        x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
        w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
        niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
        sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
        # boundary check
        if sx < 0 : sx = 0
        if sy < 0 : sy = 0
        if ex >= img_w: ex = img_w
        if ey >= img_h: ey = img_h
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1 + niter, 1 + niter))
        segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)

        # make box
        np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
        rectangle = cv2.minAreaRect(np_contours)
        box = cv2.boxPoints(rectangle)

        # align diamond-shape
        w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
        box_ratio = max(w, h) / (min(w, h) + 1e-5)
        if abs(1 - box_ratio) <= 0.1:
            l, r = min(np_contours[:,0]), max(np_contours[:,0])
            t, b = min(np_contours[:,1]), max(np_contours[:,1])
            box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)

        # make clock-wise order
        startidx = box.sum(axis=1).argmin()
        box = np.roll(box, 4-startidx, 0)
        box = np.array(box)

        det.append(box)
        mapper.append(k)

    return det, labels, mapper 
开发者ID:clovaai,项目名称:CRAFT-pytorch,代码行数:63,代码来源:craft_utils.py

示例13: draw_binary_mask

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def draw_binary_mask(
        self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0
    ):
        """
        Args:
            binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
                W is the image width. Each value in the array is either a 0 or 1 value of uint8
                type.
            color: color of the mask. Refer to `matplotlib.colors` for a full list of
                formats that are accepted. If None, will pick a random color.
            edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
                full list of formats that are accepted.
            text (str): if None, will be drawn in the object's center of mass.
            alpha (float): blending efficient. Smaller values lead to more transparent masks.
            area_threshold (float): a connected component small than this will not be shown.

        Returns:
            output (VisImage): image object with mask drawn.
        """
        if color is None:
            color = random_color(rgb=True, maximum=1)
        color = mplc.to_rgb(color)

        has_valid_segment = False
        binary_mask = binary_mask.astype("uint8")  # opencv needs uint8
        mask = GenericMask(binary_mask, self.output.height, self.output.width)
        shape2d = (binary_mask.shape[0], binary_mask.shape[1])

        if not mask.has_holes:
            # draw polygons for regular masks
            for segment in mask.polygons:
                area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
                if area < (area_threshold or 0):
                    continue
                has_valid_segment = True
                segment = segment.reshape(-1, 2)
                self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
        else:
            rgba = np.zeros(shape2d + (4,), dtype="float32")
            rgba[:, :, :3] = color
            rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
            has_valid_segment = True
            self.output.ax.imshow(rgba)

        if text is not None and has_valid_segment:
            # TODO sometimes drawn on wrong objects. the heuristics here can improve.
            lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
            _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
            largest_component_id = np.argmax(stats[1:, -1]) + 1

            # draw text on the largest component, as well as other very large components.
            for cid in range(1, _num_cc):
                if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
                    # median is more stable than centroid
                    # center = centroids[largest_component_id]
                    center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
                    self.draw_text(text, center, color=lighter_color)
        return self.output 
开发者ID:facebookresearch,项目名称:detectron2,代码行数:60,代码来源:visualizer.py

示例14: draw_binary_mask

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def draw_binary_mask(
        self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096
    ):
        """
        Args:
            binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
                W is the image width. Each value in the array is either a 0 or 1 value of uint8
                type.
            color: color of the mask. Refer to `matplotlib.colors` for a full list of
                formats that are accepted. If None, will pick a random color.
            edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
                full list of formats that are accepted.
            text (str): if None, will be drawn in the object's center of mass.
            alpha (float): blending efficient. Smaller values lead to more transparent masks.
            area_threshold (float): a connected component small than this will not be shown.

        Returns:
            output (VisImage): image object with mask drawn.
        """
        if color is None:
            color = random_color(rgb=True, maximum=1)
        if area_threshold is None:
            area_threshold = 4096

        has_valid_segment = False
        binary_mask = binary_mask.astype("uint8")  # opencv needs uint8
        mask = GenericMask(binary_mask, self.output.height, self.output.width)
        shape2d = (binary_mask.shape[0], binary_mask.shape[1])

        if not mask.has_holes:
            # draw polygons for regular masks
            for segment in mask.polygons:
                area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
                if area < area_threshold:
                    continue
                has_valid_segment = True
                segment = segment.reshape(-1, 2)
                self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
        else:
            rgba = np.zeros(shape2d + (4,), dtype="float32")
            rgba[:, :, :3] = color
            rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
            has_valid_segment = True
            self.output.ax.imshow(rgba)

        if text is not None and has_valid_segment:
            # TODO sometimes drawn on wrong objects. the heuristics here can improve.
            lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
            _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
            largest_component_id = np.argmax(stats[1:, -1]) + 1

            # draw text on the largest component, as well as other very large components.
            for cid in range(1, _num_cc):
                if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
                    # median is more stable than centroid
                    # center = centroids[largest_component_id]
                    center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
                    self.draw_text(text, center, color=lighter_color)
        return self.output 
开发者ID:conansherry,项目名称:detectron2,代码行数:61,代码来源:visualizer.py

示例15: post_process_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import connectedComponentsWithStats [as 别名]
def post_process_image(image, mask, contour):
    """ Watershed on the markers generated on the sure foreground to find all disconnected objects
    The (mask - contour) is the true foreground. We set the contour to be unknown area. 
    Index of contour = -1
    Index of unkown area = 0
    Index of background = 1  -> set back to 0 after watershed
    Index of found objects > 1
    """
    
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    
    new_contour = (contour*255).astype(np.uint8)
    new_mask = (mask*255).astype(np.uint8)
    new_mask = cv2.morphologyEx(new_mask, cv2.MORPH_OPEN, kernel, iterations=1)
  

    _, thresh_mask = cv2.threshold(new_mask,0,255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    _, thresh_contour = cv2.threshold(new_contour,0,255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    sure_background = cv2.dilate(thresh_mask,kernel,iterations=3)
    
    sure_foreground = cv2.subtract(thresh_mask, thresh_contour)
    mask_plus_contour = cv2.add(thresh_mask, thresh_contour)
    mask_plus_contour = cv2.cvtColor(mask_plus_contour, cv2.COLOR_GRAY2RGB)

    unknown = cv2.subtract(sure_background, sure_foreground)
    # Marker labelling
    output = cv2.connectedComponentsWithStats(sure_foreground)
    labels = output[1]
    stats = output[2]
    # Add one to all labels so that sure background is not 0, 0 is considered unknown by watershed
    # this way, watershed can distinguish unknown from the background
    labels = labels + 1
    labels[unknown==255] = 0

    try:
        # random walker on thresh_mask leads a lot higher mean IoU but lower LB
        #labels = random_walker(thresh_mask, labels)   
        # random walker on thresh_mask leads lower mean IoU but higher LB
        labels = random_walker(mask_plus_contour, labels, multichannel=True)   

    except:
        labels = cv2.watershed(mask_plus_contour, labels)

    labels[labels==-1] = 0
    labels[labels==1] = 0
    labels = labels -1
    labels[labels==-1] = 0
    # discard nuclei which are too big or too small
    mean = np.mean(stats[1:,cv2.CC_STAT_AREA])

    for i in range(1, labels.max()):
         if stats[i, cv2.CC_STAT_AREA] > mean*10 or stats[i, cv2.CC_STAT_AREA] < mean/10:
            labels[labels==i] = 0
            
    labels = renumber_labels(labels)
        
    return labels 
开发者ID:nicolefinnie,项目名称:kaggle-dsb2018,代码行数:59,代码来源:image_processing.py


注:本文中的cv2.connectedComponentsWithStats方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。