当前位置: 首页>>代码示例>>Python>>正文


Python cv2.error方法代码示例

本文整理汇总了Python中cv2.error方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.error方法的具体用法?Python cv2.error怎么用?Python cv2.error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: JpegString

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:20,代码来源:videos_to_tfrecords.py

示例2: create

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def create(self, res_file, show=True):
        img = cv2.imread(self._img_path)

        for thug in self._thugs:
            if thug.eyes_available:
                try:
                    self._draw_glasses(img, thug)
                except ThugError as e:
                    logger.error(e)

                if thug.mouth_available:
                    try:
                        self._draw_cigar(img, thug)  # depends also on eyes
                    except ThugError as e:
                        logger.error(e)

        cv2.imwrite(res_file, img)
        self._img_path = res_file

        return super().create(res_file, show) 
开发者ID:jerry-git,项目名称:thug-memes,代码行数:22,代码来源:thug.py

示例3: _draw_on_top

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def _draw_on_top(self, img, x, y, sub_img, sub_name=''):
        h, w, _ = sub_img.shape
        mask = sub_img[:, :, 3]
        mask_inv = cv2.bitwise_not(mask)
        sub_img_ = sub_img[:, :, :3]

        background = img[y:y + h, x:x + w]
        try:
            background = cv2.bitwise_and(background, background, mask=mask_inv)
        except cv2.error as e:
            raise ThugError(
                'Can not draw {}, please try with smaller {}.'.format(
                    sub_name, sub_name))
        foreground = cv2.bitwise_and(sub_img_, sub_img_, mask=mask)
        sum_ = cv2.add(background, foreground)

        img[y:y + h, x:x + w] = sum_ 
开发者ID:jerry-git,项目名称:thug-memes,代码行数:19,代码来源:thug.py

示例4: __data_generation

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def __data_generation(self, item_ids_temp):
        'Generates data containing batch_size samples' 
        # Initialization
        X = np.empty((len(item_ids_temp), *self.dim, self.n_channels))

        # Generate data
        for i, item_id in enumerate(item_ids_temp):
            image_id = self.image_ids[item_id]
           
            fname = f'{self.dir}/{image_id}.jpg'
            if os.path.isfile(fname):
                img = cv2.imread(fname)
                try:
                    img = cv2.resize(img, self.dim, interpolation = cv2.INTER_LINEAR)
                except cv2.error as e:
                    img = np.zeros([*self.dim, self.n_channels])
            else: 
                img = np.zeros([*self.dim, self.n_channels])

            
            X[i,] = img


        return X 
开发者ID:khuangaf,项目名称:Kaggle-Avito-NN,代码行数:26,代码来源:ImageDataGenerator.py

示例5: does_page_have_valid_table

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def does_page_have_valid_table(self, min_fract_area=.2, min_cells=50):
        """
        Analyzes whether the image contains a table by evaluating the
        coarse table outline and its children
        """
        try: # Some CV2 operations may fail e.g. if no correct supernode has been recognized
            # Check fractional area of table compared to image
            img_area = self.imgshape[0] * self.imgshape[1]
            supernode_area = cv2.contourArea(self.supernode_bbox)
            if supernode_area < img_area * min_fract_area:
                return False
            # Check minimum number of cells (ncells = degree of coarse outline node)
            ncells = self.g.degree(self.supernode_idx)
            return ncells >= min_cells
        except cv2.error:
            return False 
开发者ID:ulikoehler,项目名称:OTR,代码行数:18,代码来源:TableRecognition.py

示例6: run_farneback

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def run_farneback(frames):
    try:
        return cv2.calcOpticalFlowFarneback(
            frames[0], frames[1],
            # options, defaults
            None,  # output
            0.5,  # pyr_scale, 0.5
            10,  # levels, 3
            min(frames[0].shape[:2]) // 5,  # winsize, 15
            10,  # iterations, 3
            7,  # poly_n, 5
            1.5,  # poly_sigma, 1.2
            cv2.OPTFLOW_FARNEBACK_GAUSSIAN,  # flags, 0
        )
    except cv2.error:
        return None 
开发者ID:facebookresearch,项目名称:DetectAndTrack,代码行数:18,代码来源:tracking_engine.py

示例7: load_grey_from_cv2_object

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def load_grey_from_cv2_object(pic_object: np.ndarray) -> np.ndarray:
    """ preparation for cv2 object (force turn it into gray) """
    pic_object = pic_object.astype(np.uint8)
    try:
        # try to turn it into grey
        grey_pic = cv2.cvtColor(pic_object, cv2.COLOR_BGR2GRAY)
    except cv2.error:
        # already grey
        return pic_object
    return grey_pic 
开发者ID:williamfzc,项目名称:findit,代码行数:12,代码来源:toolbox.py

示例8: turn_grey

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def turn_grey(old: np.ndarray) -> np.ndarray:
    try:
        return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
    except cv2.error:
        return old 
开发者ID:williamfzc,项目名称:findit,代码行数:7,代码来源:toolbox.py

示例9: detect_face

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def detect_face(data):
    from retinaface.detector import detector
    from utils import align_face

    src_path = data['src_path']
    dst_path = data['dst_path']
    boxB = np.array(data['boxB'])

    img = cv.imread(src_path)
    if img is not None:
        img, ratio = resize(img)
        boxB = boxB * ratio

        try:
            bboxes, landmarks = detector.detect_faces(img)

            if len(bboxes) > 0:
                i = select_face(bboxes, boxB)
                bbox, landms = bboxes[i], landmarks[i]
                img = align_face(img, [landms])
                dirname = os.path.dirname(dst_path)
                os.makedirs(dirname, exist_ok=True)
                cv.imwrite(dst_path, img)
        except ValueError as err:
            print(err)
        except cv.error as err:
            print(err)

    return True 
开发者ID:foamliu,项目名称:InsightFace-PyTorch,代码行数:31,代码来源:align_facescrub.py

示例10: ParallelPreprocessing

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:40,代码来源:videos_to_tfrecords.py

示例11: test_other_dtypes

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def test_other_dtypes(self):
        aug = iaa.AllChannelsHistogramEqualization()

        # np.uint16: cv2.error: OpenCV(3.4.5) (...)/histogram.cpp:3345:
        #            error: (-215:Assertion failed)
        #            src.type() == CV_8UC1 in function 'equalizeHist'
        # np.uint32: TypeError: src data type = 6 is not supported
        # np.uint64: see np.uint16
        # np.int8: see np.uint16
        # np.int16: see np.uint16
        # np.int32: see np.uint16
        # np.int64: see np.uint16
        # np.float16: TypeError: src data type = 23 is not supported
        # np.float32: see np.uint16
        # np.float64: see np.uint16
        # np.float128: TypeError: src data type = 13 is not supported
        for dtype in [np.uint8]:
            with self.subTest(dtype=np.dtype(dtype).name):
                min_value, _center_value, max_value = \
                    iadt.get_value_range_of_dtype(dtype)
                dynamic_range = max_value + abs(min_value)
                if np.dtype(dtype).kind == "f":
                    img = np.zeros((16,), dtype=dtype)
                    for i in sm.xrange(16):
                        img[i] = min_value + i * (0.01 * dynamic_range)
                    img = img.reshape((4, 4))
                else:
                    img = np.arange(
                        min_value, min_value + 16, dtype=dtype).reshape((4, 4))
                img_aug = aug.augment_image(img)
                assert img_aug.dtype.name == np.dtype(dtype).name
                assert img_aug.shape == img.shape
                assert np.min(img_aug) < min_value + 0.1 * dynamic_range
                assert np.max(img_aug) > max_value - 0.1 * dynamic_range 
开发者ID:aleju,项目名称:imgaug,代码行数:36,代码来源:test_contrast.py

示例12: read

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def read(self):
        """ """
        try:
            ret, frame = self.cam.read()
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        except cv2.error:
            raise cv2.error("OpenCV can't find a camera!")
        if self.bw:
            return np.mean(rgb, 2).astype(rgb.dtype)
        else:
            return rgb 
开发者ID:portugueslab,项目名称:stytra,代码行数:13,代码来源:opencv.py

示例13: show_detection_result

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def show_detection_result(data, label, bboxes, cls_scores, class_name_list):
    data = data[0].as_in_context(mx.cpu(0))
    data[0] = data[0] * 0.229 + 0.485
    data[1] = data[1] * 0.224 + 0.456
    data[2] = data[2] * 0.225 + 0.406
    label = label[0].asnumpy()
    img = data.asnumpy()
    img = np.array(np.round(img * 255), dtype=np.uint8)
    img = np.transpose(img, (1, 2, 0))
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    bboxes = bboxes.asnumpy()
    cls_scores = cls_scores.asnumpy()

    # Show ground truth
    for item in label:
        cv2.rectangle(img, (int(item[0]), int(item[1])), (int(item[2]), int(item[3])), color=(255, 0, 0), thickness=2)
        cv2.putText(img, class_name_list[int(item[4])], (int(item[0]), int(item[3])),0, 0.5,(0, 255, 0))

    # NMS by class
    for cls_id in range(1, len(class_name_list)):
        cur_scores = cls_scores[:, cls_id]
        bboxes_pick = bboxes[:, cls_id * 4: (cls_id+1)*4]
        cur_scores, bboxes_pick = nms(cur_scores, bboxes_pick, cfg.rcnn_nms_thresh)
        for i in range(len(cur_scores)):
            if cur_scores[i] >= cfg.rcnn_score_thresh:
                bbox = bboxes_pick[i]
                cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=(0, 0, 255), thickness=1)
                cv2.putText(img, "{}: {:.4}".format(class_name_list[cls_id], cur_scores[i]), (int(bbox[0]), int(bbox[3])),0, 0.5,(255, 255, 0))
    try:
        cv2.imshow("Img", img)
        cv2.waitKey(0)
    except cv2.error:
        cv2.imwrite("det_result.jpg", img)
        print("imshow() is not supported! Saved result to det_result.jpg.")
        input() 
开发者ID:linmx0130,项目名称:ya_mxdet,代码行数:37,代码来源:vis_tool.py

示例14: call_tracker_constructor

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def call_tracker_constructor(self, tracker_type):
        if tracker_type == 'DASIAMRPN':
            tracker = dasiamrpn()
        else:
            # -- TODO: remove this if I assume OpenCV version > 3.4.0
            if int(self.major_ver == 3) and int(self.minor_ver) < 3:
                #tracker = cv2.Tracker_create(tracker_type)
                pass
            # --
            else:
                try:
                    tracker = cv2.TrackerKCF_create()
                except AttributeError as error:
                    print(error)
                    print('\nMake sure that OpenCV contribute is installed: opencv-contrib-python\n')
                if tracker_type == 'CSRT':
                    tracker = cv2.TrackerCSRT_create()
                elif tracker_type == 'KCF':
                    tracker = cv2.TrackerKCF_create()
                elif tracker_type == 'MOSSE':
                    tracker = cv2.TrackerMOSSE_create()
                elif tracker_type == 'MIL':
                    tracker = cv2.TrackerMIL_create()
                elif tracker_type == 'BOOSTING':
                    tracker = cv2.TrackerBoosting_create()
                elif tracker_type == 'MEDIANFLOW':
                    tracker = cv2.TrackerMedianFlow_create()
                elif tracker_type == 'TLD':
                    tracker = cv2.TrackerTLD_create()
                elif tracker_type == 'GOTURN':
                    tracker = cv2.TrackerGOTURN_create()
        return tracker 
开发者ID:Cartucho,项目名称:OpenLabeling,代码行数:34,代码来源:main.py

示例15: __region_mask__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import error [as 别名]
def __region_mask__(self,reference_image,horizontal_grid,vertical_grid):
        """
        use the first and last horizontal/vertical grid lines to make a mask around the desired region/table
        :return:
        """
        reference_shape = reference_image.shape
        # [:2] in case we read in the image in colour format - doesn't seem necessary to throw an error
        # the first mask will be an outline of the region, sort of like #. The second mask will fill in the
        # central interior box
        mask = np.zeros(reference_shape[:2],np.uint8)
        mask2 = np.zeros(mask.shape,np.uint8)
        # draw the first and last horizontal/vertical grid lines to create a box
        cv2.drawContours(mask,horizontal_grid,0,255,-1)
        cv2.drawContours(mask,horizontal_grid,len(horizontal_grid)-2,255,-1)
        cv2.drawContours(mask,vertical_grid,0,255,-1)
        cv2.drawContours(mask,vertical_grid,len(vertical_grid)-1,255,-1)

        # find the (hopefully) one interior contour - should be our mask
        _,contours, hier = cv2.findContours(mask.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

        assert len(contours) == 1
        for c,h in zip(contours,hier[0]):
            if h[-1] == -1:
                continue

            cv2.drawContours(mask2,[c],0,255,-1)

        return mask2 
开发者ID:zooniverse,项目名称:aggregation,代码行数:30,代码来源:active_weather.py


注:本文中的cv2.error方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。