当前位置: 首页>>代码示例>>Python>>正文


Python cv2.mean方法代码示例

本文整理汇总了Python中cv2.mean方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.mean方法的具体用法?Python cv2.mean怎么用?Python cv2.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def __init__(self, max_half_life=500. * 1000, min_half_life=5.* 1000, increment = 1.2):
        # the maximal half life of a pixel from background, in seconds
        self._max_half_life = float(max_half_life)
        # the minimal one
        self._min_half_life = float(min_half_life)

        # starts with the fastest learning rate
        self._current_half_life = self._min_half_life

        # fixme theoretically this should depend on time, not frame index
        self._increment = increment
        # the mean background
        self._bg_mean = None
        # self._bg_sd = None

        self._buff_alpha_matrix = None
        self._buff_invert_alpha_mat = None
        # the time stamp of the frame las used to update
        self.last_t = 0 
开发者ID:gilestrolab,项目名称:ethoscope,代码行数:21,代码来源:adaptive_bg_tracker.py

示例2: shade

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def shade(self, polygons: np.ndarray, image: np.ndarray) -> np.ndarray:
        canvas_dimensions = self.get_output_dimensions(image)
        scale_factor = max(canvas_dimensions) / max(image.shape)
        scaled_polygons = polygons * scale_factor
        output_image = np.zeros(canvas_dimensions, dtype=np.uint8)
        for polygon, scaled_polygon in zip(polygons, scaled_polygons):
            polygon = self.strip_negative_points(polygon)
            scaled_polygon = self.strip_negative_points(scaled_polygon)
            if len(polygon) < 3:
                continue
            mask = np.zeros(image.shape[:2], dtype=np.uint8)
            cv2.fillConvexPoly(mask, polygon, (255,))
            color = self.get_dominant_color(image[mask > 0], 3, 3).tolist()
            # color = cv2.mean(image, mask)[:3]
            cv2.fillConvexPoly(output_image, scaled_polygon.astype(np.int32), color, lineType=cv2.LINE_AA)
        return output_image 
开发者ID:tasercake,项目名称:lowpolypy,代码行数:18,代码来源:process.py

示例3: retrieve_area_color

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def retrieve_area_color(data, contour, labels):
    """Mask an image area and retrieve its dominant color starting from a label
    glossary, by determining its closest label (regarding euclidean distance).

    Largely inspired from : https://www.pyimagesearch.com/\
    2016/02/15/determining-object-color-with-opencv/

    Parameters
    ----------
    data : np.array
        3-channelled image
    contour : np.array
        List of points that delimits the area
    labels : list
        List of dictionnary that describes each labels (with "id" and "color" keys)
    """
    mask = np.zeros(data.shape[:2], dtype="uint8")
    cv2.drawContours(mask, [contour], -1, 255, -1)
    mean = cv2.mean(data, mask=mask)[:3]
    min_dist = (np.inf, None)
    for label in labels:
        d = np.linalg.norm(label["color"] - np.array(mean))
        if d < min_dist[0]:
            min_dist = (d, label["id"])
    return min_dist[1] 
开发者ID:Oslandia,项目名称:deeposlandia,代码行数:27,代码来源:geometries.py

示例4: anonymize_face_pixelate

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def anonymize_face_pixelate(image, blocks=3):
    # divide the input image into NxN blocks
    (h, w) = image.shape[:2]
    xSteps = np.linspace(0, w, blocks + 1, dtype="int")
    ySteps = np.linspace(0, h, blocks + 1, dtype="int")
    # loop over the blocks in both the x and y direction
    for i in range(1, len(ySteps)):
        for j in range(1, len(xSteps)):
            # compute the starting and ending (x, y)-coordinates
            # for the current block
            startX = xSteps[j - 1]
            startY = ySteps[i - 1]
            endX = xSteps[j]
            endY = ySteps[i]
            # extract the ROI using NumPy array slicing, compute the
            # mean of the ROI, and then draw a rectangle with the
            # mean RGB values over the ROI in the original image
            roi = image[startY:endY, startX:endX]
            (B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
            cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1)
    # return the pixelated blurred image
    return image


# Filters path 
开发者ID:charlielito,项目名称:snapchat-filters-opencv,代码行数:27,代码来源:blur_face.py

示例5: roiColor

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def roiColor(self, image):
        """
        Finds the averaged color within the ROI within the square. The ROI is a circle with radius r from
        the centre of the square.
        """
        # Initialise mask
        maskImage = np.zeros((image.shape[0], image.shape[1]), np.uint8)
        # Draw the ROI circle on the mask
        cv2.circle(maskImage, self.roi, self.radius, (255, 255, 255), -1)
        # Find the average color
        average_raw = cv2.mean(image, mask=maskImage)[::-1]
        # Need int format so reassign variable
        average = (int(average_raw[1]), int(average_raw[2]), int(average_raw[3]))

        ## DEBUG
        # print(average)

        return average 
开发者ID:nebbles,项目名称:DE3-ROB1-CHESS,代码行数:20,代码来源:squareClass.py

示例6: add_median_colr

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def add_median_colr(img_ori, img_mask, mask, min_bbox):
    img_crop = faceCrop(img_ori, min_bbox, scale_ratio=0.5)
    (B, G, R) = cv2.split(img_crop)
    B_median = np.median(B)
    G_median = np.median(G)
    R_median = np.median(R)
    # mean_pixel = cv2.mean(img[int(rect[1]):int(rect[3]), int(rect[0]):int(rect[2])])  # get img mean pixel
    rows, cols, _ = img_ori.shape
    for row in range(rows):
        for col in range(cols):
            if mask[row, col] < 1:
                img_mask[row, col][0] = B_median
                img_mask[row, col][1] = G_median
                img_mask[row, col][2] = R_median

    return img_mask 
开发者ID:bleakie,项目名称:MaskInsightface,代码行数:18,代码来源:generate_mask.py

示例7: detect_single_scale

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def detect_single_scale(score_map, geo_map, score_map_thresh, nms_thres, box_thresh, timer):
    if len(score_map.shape) == 4:
        score_map = score_map[0, :, :, 0]
        geo_map = geo_map[0, :, :, ]
    # filter the score map
    xy_text = np.argwhere(score_map > score_map_thresh)
    # sort the text boxes via the y axis
    xy_text = xy_text[np.argsort(xy_text[:, 0])]
    # restore
    start = time.time()
    # xy_text[:, ::-1]*4 满足条件的pixel的坐标
    # geo_map[xy_text[:, 0], xy_text[:, 1], :] 得到对应点到bounding box 的距离
    text_box_restored = restore_rectangle(xy_text[:, ::-1], geo_map[xy_text[:, 0], xy_text[:, 1], :])  # N*4*2
    print('{} text boxes before nms'.format(text_box_restored.shape[0]))
    boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
    boxes[:, :8] = text_box_restored.reshape((-1, 8))
    boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
    timer['restore'] = time.time() - start
    # nms part
    start = time.time()
    # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)
    boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)
    timer['nms'] = time.time() - start

    if boxes.shape[0] == 0:
        return None, timer

    # here we filter some low score boxes by the average score map, this is different from the orginal paper
    for i, box in enumerate(boxes):
        mask = np.zeros_like(score_map, dtype=np.uint8)
        cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32), 1)
        boxes[i, 8] = cv2.mean(score_map, mask)[0]
    boxes = boxes[boxes[:, 8] > box_thresh]
    return boxes 
开发者ID:UpCoder,项目名称:ICPR_TextDection,代码行数:36,代码来源:test_multiscale.py

示例8: distance

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def distance(self, features,time):
        if time - self._last_updated_time > self._max_unupdated_duration:
            logging.warning("FG model not updated for too long. Resetting.")
            self.__init__(self._history_length)
            return 0

        if not self._is_ready:
            last_row = self._ring_buff_idx + 1
        else:
            last_row = self._history_length

        means = np.mean(self._ring_buff[:last_row ], 0)

        np.subtract(self._ring_buff[:last_row], means, self._std_buff[:last_row])
        np.abs(self._std_buff[:last_row], self._std_buff[:last_row])

        stds = np.mean(self._std_buff[:last_row], 0)
        if (stds == 0).any():
            return 0

        a = 1 / (stds* self._sqrt_2_pi)

        b = np.exp(- (features - means) ** 2  / (2 * stds ** 2))

        likelihoods =  a * b

        if np.any(likelihoods==0):
            return 0
        #print features, means
        logls = np.sum(np.log10(likelihoods)) / len(likelihoods)
        return -1.0 * logls 
开发者ID:gilestrolab,项目名称:ethoscope,代码行数:33,代码来源:adaptive_bg_tracker.py

示例9: _pre_process_input_minimal

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
开发者ID:gilestrolab,项目名称:ethoscope,代码行数:30,代码来源:adaptive_bg_tracker.py

示例10: _comput_blob_features

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def _comput_blob_features(self, img, contour, lr = 1e-5):
        hull = contour

        x,y,w,h = cv2.boundingRect(contour)

        roi = img[y : y + h, x : x + w]
        mask = np.zeros_like(roi)
        cv2.drawContours(mask,[hull],-1, 1,-1,offset=(-x,-y))

        mean_col = cv2.mean(roi,mask)[0]


        if len(self.positions) > 2:

            last_two_pos = self._positions.tail(2)
            xm, xmm = last_two_pos.x
            ym, ymm = last_two_pos.y

            instantaneous_speed = abs(xm + 1j*ym - xmm + 1j*ymm)
        else:
            instantaneous_speed = 0
        if np.isnan(instantaneous_speed):
            instantaneous_speed = 0

        features = np.array([cv2.contourArea(hull) + 1.0,
                            cv2.arcLength(hull,True) + 1.0,
                            instantaneous_speed +1.0,
                            mean_col +1
                             ])

        return features 
开发者ID:gilestrolab,项目名称:ethoscope,代码行数:33,代码来源:medianABS_racking.py

示例11: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def detect(self, score_map, geo_map, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):                                                
    '''                                                                                                                                    
    restore text boxes from score map and geo map                                                                                          
    :param score_map:                                                                                                                      
    :param geo_map:                                                                                                                    
    :param score_map_thresh: threshhold for score map                                                                                      
    :param box_thresh: threshhold for boxes                                                                                                
    :param nms_thres: threshold for nms                                                                                                    
    :return:                                                                                                                               
    '''
    if len(score_map.shape) == 4:                                                                                                          
      score_map = score_map[0, :, :, 0]                                                                                                  
      geo_map = geo_map[0, :, :, ]                                                                                                       
    # filter the score map                                                                                                                 
    xy_text = np.argwhere(score_map > score_map_thresh)                                                                                    
    # sort the text boxes via the y axis                                                                                                   
    xy_text = xy_text[np.argsort(xy_text[:, 0])]                                                                                           
    # restore                                                                                                                              

    text_box_restored = restore_rectangle(xy_text[:, ::-1]*4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2                            
    print('{} text boxes before nms'.format(text_box_restored.shape[0]))                                                                   
    boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)                                                                    
    boxes[:, :8] = text_box_restored.reshape((-1, 8))                                                                                      
    boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]                                                                                  
    # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)                                                               
    boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)                                                                                                                                                                   
                                                                                                                                           
    if boxes.shape[0] == 0:                                                                                                                
      return None                                                                                                               
                                                                                                                                           
    # here we filter some low score boxes by the average score map, this is different from the orginal paper                               
    for i, box in enumerate(boxes):                                                                                                        
      mask = np.zeros_like(score_map, dtype=np.uint8)                                                                                    
      cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)                                                           
      boxes[i, 8] = cv2.mean(score_map, mask)[0]                                                                                         
    boxes = boxes[boxes[:, 8] > box_thresh]                                                                                                
                                                                                                                                           
    return boxes 
开发者ID:ucloud,项目名称:uai-sdk,代码行数:40,代码来源:east_inference.py

示例12: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def detect(score_map, geo_map, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):                                                
    '''                                                                                                                                    
    restore text boxes from score map and geo map
    :param score_map: 
    :param geo_map:
    :param score_map_thresh: threshhold for score map
    :param box_thresh: threshhold for boxes
    :param nms_thres: threshold for nms
    :return:
    '''

    if len(score_map.shape) == 4:
        score_map = score_map[0, :, :, 0]
        geo_map = geo_map[0, :, :, ]

    # filter the score map
    xy_text = np.argwhere(score_map > score_map_thresh)
    # sort the text boxes via the y axis                                                                                                   
    xy_text = xy_text[np.argsort(xy_text[:, 0])]
    # restore                                                                                                                              

    text_box_restored = restore_rectangle(xy_text[:, ::-1]*4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2
    print('{} text boxes before nms'.format(text_box_restored.shape[0]))
    boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
    boxes[:, :8] = text_box_restored.reshape((-1, 8))
    boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
    # boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)
    boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)

    if boxes.shape[0] == 0:
        return None

    # here we filter some low score boxes by the average score map, this is different from the orginal paper                               
    for i, box in enumerate(boxes):
        mask = np.zeros_like(score_map, dtype=np.uint8)
        cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)
        boxes[i, 8] = cv2.mean(score_map, mask)[0]
    boxes = boxes[boxes[:, 8] > box_thresh]

    return boxes 
开发者ID:ucloud,项目名称:uai-sdk,代码行数:42,代码来源:east_multi_infer.py

示例13: remove_points_with_big_reproj_err

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def remove_points_with_big_reproj_err(self, points): 
        with self._lock:             
            with self.update_lock: 
                #print('map points: ', sorted([p.id for p in self.points]))
                #print('points: ', sorted([p.id for p in points]))           
                culled_pt_count = 0
                for p in points:
                    # compute reprojection error
                    chi2s = []
                    for f, idx in p.observations():
                        uv = f.kpsu[idx]
                        proj,_ = f.project_map_point(p)
                        invSigma2 = Frame.feature_manager.inv_level_sigmas2[f.octaves[idx]]
                        err = (proj-uv)
                        chi2s.append(np.inner(err,err)*invSigma2)
                    # cull
                    mean_chi2 = np.mean(chi2s)
                    if np.mean(chi2s) > Parameters.kChi2Mono:  # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                        culled_pt_count += 1
                        #print('removing point: ',p.id, 'from frames: ', [f.id for f in p.keyframes])
                        self.remove_point(p)
                Printer.blue("# culled map points: ", culled_pt_count)        


    # BA considering all keyframes: 
    # - local keyframes are adjusted, 
    # - other keyframes are fixed
    # - all points are adjusted 
开发者ID:luigifreda,项目名称:pyslam,代码行数:30,代码来源:map.py

示例14: detectRaidBossTimer

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def detectRaidBossTimer(self, time_img, scale):
        text = ''
        if int(time_img.mean()) > 240:
            return text
        time_img = cv2.resize(time_img, None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_CUBIC)
        cv2.imwrite(self.timefile,time_img)
        text = pytesseract.image_to_string(Image.open(self.timefile),config='-c tessedit_char_whitelist=1234567890: -psm 7')
        return text 
开发者ID:mzsmakr,项目名称:PGSS,代码行数:10,代码来源:raidnearby.py

示例15: detectEgg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import mean [as 别名]
def detectEgg(self, time_img):
        img_gray = cv2.cvtColor(time_img, cv2.COLOR_BGR2GRAY)
        ret, thresh1 = cv2.threshold(img_gray, 220, 255, cv2.THRESH_BINARY_INV)
        kernel = np.ones((2, 2), np.uint8)
        thresh1 = cv2.erode(thresh1, kernel, iterations=1)
        time_mean = cv2.mean(time_img, thresh1)
        if time_mean[2] > (time_mean[0]+50): # Red is greater than Blue+50
            return False, thresh1
        else:
            return True, thresh1 
开发者ID:mzsmakr,项目名称:PGSS,代码行数:12,代码来源:raidnearby.py


注:本文中的cv2.mean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。