当前位置: 首页>>代码示例>>Python>>正文


Python mtcnn.MTCNN属性代码示例

本文整理汇总了Python中mtcnn.mtcnn.MTCNN属性的典型用法代码示例。如果您正苦于以下问题:Python mtcnn.MTCNN属性的具体用法?Python mtcnn.MTCNN怎么用?Python mtcnn.MTCNN使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在mtcnn.mtcnn的用法示例。


在下文中一共展示了mtcnn.MTCNN属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from mtcnn import mtcnn [as 别名]
# 或者: from mtcnn.mtcnn import MTCNN [as 别名]
def __init__(self):
        self.detector = MTCNN() 
开发者ID:aangfanboy,项目名称:TripletLossFace,代码行数:4,代码来源:main_data_creator.py

示例2: frame_set_select_extract

# 需要导入模块: from mtcnn import mtcnn [as 别名]
# 或者: from mtcnn.mtcnn import MTCNN [as 别名]
def frame_set_select_extract(self, frame_set_id, opts):
        """
        This method extracts faces from each frame in a frame set.

        :param int frame_set_id: The frame set ID.
        :param dict opts: The dict of opts.
        :rtype: int
        """

        detector = MTCNN()
        offset_percent = 0.2
        min_confidence = 0.9
        debug_ = True if 'debug' in opts else False

        frame_set_path = FrameSetSubDir.path(frame_set_id)

        transform_set_id = TransformSetModel().insert(self.name, frame_set_id)
        transform_set_path = TransformSetSubDir.path(transform_set_id)
        os.makedirs(transform_set_path)

        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        while True:
            result = FrameModel().list(frame_set_id, length=length,
                                       offset=offset, rejected=False)

            if not result:
                break

            for frame_id, _, rejected in result:
                self._extract_faces(frame_set_path, frame_id,
                                    transform_set_path, transform_set_id,
                                    detector, offset_percent, min_confidence,
                                    debug_)

            offset += length

        return transform_set_id 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:41,代码来源:mtcnn_frame_set_select_extract_plugin.py

示例3: __init__

# 需要导入模块: from mtcnn import mtcnn [as 别名]
# 或者: from mtcnn.mtcnn import MTCNN [as 别名]
def __init__(self):
        self.face_detector = MTCNN()
        model_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'trained_detectors',
            'mesonet.hdf5')
        self.mesonet = load_model(model_path) 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:9,代码来源:mesonet_video_select_detect_plugin.py

示例4: _extract_faces

# 需要导入模块: from mtcnn import mtcnn [as 别名]
# 或者: from mtcnn.mtcnn import MTCNN [as 别名]
def _extract_faces(self, frame_set_path, frame_id, transform_set_path,
                       transform_set_id, detector, offset_percent,
                       min_confidence, debug_):
        """
        This method extracts faces from a frame.

        :param str frame_set_path:The frame set path.
        :param int frame_id: The frame ID.
        :param str transform_set_path: The transform set path.
        :param transform_set_id: The transform set ID.
        :param MTCNN detector: The detector to use to detect faces.
        :param float offset_percent:
        :param float min_confidence: The minimum confidence value required to
            accept/reject a detected face.
        :param bool debug_: True if should place markers on landmarks else
            False if should not.
        :rtype: None
        """

        frame_path = FrameFile.path(frame_set_path, frame_id, 'jpg')
        img = cv2.imread(frame_path)
        img_height, img_width = img.shape[:2]

        results = detector.detect_faces(img)
        for r in results:
            if r['confidence'] < min_confidence:
                continue

            x, y, width, height = r['box']

            adjusted_x = int(max(0, x - (0.5 * width * offset_percent)))
            adjusted_y = int(max(0, y - (0.5 * height * offset_percent)))
            t = x + width + (0.5 * width * offset_percent)
            adjusted_right_x = int(min(img_width, t))
            t = y + height + (0.5 * height * offset_percent)
            adjusted_bottom_y = int(min(img_height, t))

            metadata = {'face': {k: [v[0] - adjusted_x, v[1] - adjusted_y]
                                 for k, v in r['keypoints'].items()}}

            transform_id = TransformModel().insert(transform_set_id, frame_id,
                                                   json.dumps(metadata), 0)

            face_crop = img[adjusted_y:adjusted_bottom_y,
                            adjusted_x:adjusted_right_x]
            output_path = TransformFile.path(transform_set_path, transform_id,
                                             'jpg')

            if debug_ is True:
                for _, v in metadata['face'].items():
                    cv2.drawMarker(face_crop, tuple(v), (0, 0, 255),
                                   markerType=cv2.MARKER_DIAMOND,
                                   markerSize=15, thickness=2)

            cv2.imwrite(output_path, face_crop,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])

            debug(f'Transform with ID {transform_id:08d} at {output_path} '
                  f'extracted from frame with ID {frame_id:08d} at '
                  f'{frame_path}', 4) 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:62,代码来源:mtcnn_frame_set_select_extract_plugin.py


注:本文中的mtcnn.mtcnn.MTCNN属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。