当前位置: 首页>>代码示例>>Python>>正文


Python imutils.grab_contours方法代码示例

本文整理汇总了Python中imutils.grab_contours方法的典型用法代码示例。如果您正苦于以下问题:Python imutils.grab_contours方法的具体用法?Python imutils.grab_contours怎么用?Python imutils.grab_contours使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imutils的用法示例。


在下文中一共展示了imutils.grab_contours方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prediction

# 需要导入模块: import imutils [as 别名]
# 或者: from imutils import grab_contours [as 别名]
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
开发者ID:cristianpb,项目名称:object-detection,代码行数:19,代码来源:motion.py

示例2: compare_ssim_debug

# 需要导入模块: import imutils [as 别名]
# 或者: from imutils import grab_contours [as 别名]
def compare_ssim_debug(image_a, image_b, color=(255, 0, 0)):
    """
    Args:
        image_a, image_b: opencv image or PIL.Image
        color: (r, g, b) eg: (255, 0, 0) for red

    Refs:
        https://www.pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/
    """
    ima, imb = conv2cv(image_a), conv2cv(image_b)
    score, diff = compare_ssim(ima, imb, full=True)
    diff = (diff * 255).astype('uint8')
    _, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    cv2color = tuple(reversed(color))
    im = ima.copy()
    for c in cnts:
        x, y, w, h = cv2.boundingRect(c)
        cv2.rectangle(im, (x, y), (x+w, y+h), cv2color, 2)
    # todo: show image
    cv2pil(im).show()
    return im 
开发者ID:openatx,项目名称:uiautomator2,代码行数:26,代码来源:image.py

示例3: invoke_tf_hub_model

# 需要导入模块: import imutils [as 别名]
# 或者: from imutils import grab_contours [as 别名]
def invoke_tf_hub_model(image_path, repo, model_name):
        from PIL import Image
        from torchvision import transforms
        import torch
        gpu_id = 0
        device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
        model = torch.hub.load(repo, model_name, pretrained=True)
        model.eval()
        input_image = Image.open(image_path)
        preprocess = transforms.Compose([
            transforms.Resize(480),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        input_tensor = preprocess(input_image)
        input_batch = input_tensor.unsqueeze(0)  # create a mini-batch as expected by the model
        # # move the param and model to GPU for speed if available
        if torch.cuda.is_available():
            input_batch = input_batch.to(device)
            model.to(device)
        with torch.no_grad():
            output = model(input_batch)
        if isinstance(output, OrderedDict):
            output = output["out"][0]
            predictions_tensor = output.argmax(0)
            # move predictions to the cpu and convert into a numpy array
            predictions_arr: np.ndarray = predictions_tensor.byte().cpu().numpy()
            classes_ids = np.unique(predictions_arr).tolist()
            classes_idx = list(filter(lambda x: x != 0, classes_ids))  ## ignore
            predictions_arr = Image.fromarray(predictions_arr).resize(input_image.size)
            predictions_arr = np.asarray(predictions_arr)
            # 0 value
            predicted_mask = {c: [] for c in classes_idx}

            for idx in classes_idx:
                class_mask = np.zeros(predictions_arr.shape, dtype=np.uint8)
                class_mask[np.where(predictions_arr == idx)] = 255
                contour_list = cv2.findContours(class_mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                contour_list = imutils.grab_contours(contour_list)
                for contour in contour_list:
                    points = np.vstack(contour).squeeze().tolist()
                    predicted_mask[idx].append(points)
                return "mask", predicted_mask
        else:
            class_map = json.load(open("./data/imagenet_class_index.json"))
            max, argmax = output.data.squeeze().max(0)
            class_id = argmax.item()
            predicted_label = class_map[str(class_id)]
            return "label", predicted_label

        return None 
开发者ID:haruiz,项目名称:CvStudio,代码行数:53,代码来源:image_viewer_widget.py

示例4: watchDog

# 需要导入模块: import imutils [as 别名]
# 或者: from imutils import grab_contours [as 别名]
def watchDog(self, imgInput):
        timestamp = datetime.datetime.now()
        gray = cv2.cvtColor(imgInput, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print("[INFO] starting background model...")
            self.avg = gray.copy().astype("float")
            return 'background model'

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        self.frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        self.thresh = cv2.threshold(self.frameDelta, 5, 255,
            cv2.THRESH_BINARY)[1]
        self.thresh = cv2.dilate(self.thresh, None, iterations=2)
        self.cnts = cv2.findContours(self.thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        self.cnts = imutils.grab_contours(self.cnts)
        # print('x')
        # loop over the contours
        for c in self.cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                continue
     
            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (self.mov_x, self.mov_y, self.mov_w, self.mov_h) = cv2.boundingRect(c)
            self.drawing = 1
            
            self.motionCounter += 1
            #print(motionCounter)
            #print(text)
            self.lastMovtionCaptured = timestamp
            led.setColor(255,78,0)
            # switch.switch(1,1)
            # switch.switch(2,1)
            # switch.switch(3,1)

        if (timestamp - self.lastMovtionCaptured).seconds >= 0.5:
            led.setColor(0,78,255)
            self.drawing = 0
            # switch.switch(1,0)
            # switch.switch(2,0)
            # switch.switch(3,0)
        self.pause() 
开发者ID:adeept,项目名称:Adeept_RaspTank,代码行数:51,代码来源:camera_opencv.py

示例5: dewarp_book

# 需要导入模块: import imutils [as 别名]
# 或者: from imutils import grab_contours [as 别名]
def dewarp_book(image):
    """Fix and image warp (dewarp an image).

    Parameters
    ----------
    image : numpy ndarray
        The input image.

    Returns
    -------
    numpy ndarray
        The dewarped image.

    """
    # get input image ration to keep best output resolution quality
    ratio = image.shape[0] / 500.0
    # copy source image for filter operations
    orig = image.copy()
    # resize the input image
    image = imutils.resize(image, height=500)

    # convert rgb input image to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    # sigma parameter is used for automatic Canny edge detection
    sigma = 0.33

    # compute the median of the single channel pixel intensities
    v = np.median(image)

    # apply automatic Canny edge detection using the computed median
    lower = int(max(0, (1.0 - sigma) * v))
    upper = int(min(255, (1.0 + sigma) * v))
    edged = cv2.Canny(image, lower, upper)

    # perform dilate morphological filter to connect teh image pixel points
    '''kernel = np.ones((5,5),np.uint8)
    edged = cv2.dilate(edged,kernel,iterations = 1)'''

    # find contours
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
    # loop over the contours
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        if len(approx) == 4:
            screenCnt = approx
            break
    # apply the four point transform for book dewarping
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return warped 
开发者ID:ahmetozlu,项目名称:signature_extractor,代码行数:58,代码来源:dewapper.py


注:本文中的imutils.grab_contours方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。