当前位置: 首页>>代码示例>>Python>>正文


Python face_recognition.face_locations方法代码示例

本文整理汇总了Python中face_recognition.face_locations方法的典型用法代码示例。如果您正苦于以下问题:Python face_recognition.face_locations方法的具体用法?Python face_recognition.face_locations怎么用?Python face_recognition.face_locations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在face_recognition的用法示例。


在下文中一共展示了face_recognition.face_locations方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: detect_biggest_face

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def detect_biggest_face(img):
    '''
    Detect biggest face in image
    :param img: cv::mat HxWx3 RGB
    :return: 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    max_area = float('-inf')
    max_area_i = 0
    for i, (y, right, bottom, x) in enumerate(bbs):
        area = (right - x) * (bottom - y)
        if max_area < area:
            max_area = area
            max_area_i = i

    if max_area != float('-inf'):
        y, right, bottom, x = bbs[max_area_i]
        return x, y, (right - x), (bottom - y)

    return None 
开发者ID:albertpumarola,项目名称:GANimation,代码行数:24,代码来源:face_utils.py

示例2: dlib_face_det

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def dlib_face_det(image):
    # Detect and localize faces using dlib (via face_recognition).
    # Assumes only one face is in image passed.

    # Convert image from BGR (OpenCV ordering) to dlib ordering (RGB).
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input image.
    # NB: model='cnn' causes OOM.
    boxes = face_recognition.face_locations(rgb,
        number_of_times_to_upsample=2, model='hog')

    if len(boxes) == 0:
        print('*** no face found! ***')
        return None

    # Return bounding box coords in dlib format.
    return boxes 
开发者ID:goruck,项目名称:edge-tpu-servers,代码行数:21,代码来源:encode_faces.py

示例3: find_and_save_face

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def find_and_save_face(web_file,face_file):
    # Load the jpg file into a numpy array
    image = face_recognition.load_image_file(web_file)
    print(image.dtype)
    # Find all the faces in the image
    face_locations = face_recognition.face_locations(image)

    print("I found {} face(s) in this photograph.".format(len(face_locations)))

    for face_location in face_locations:

        # Print the location of each face in this image
        top, right, bottom, left = face_location
        print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

        # You can access the actual face itself like this:
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        pil_image.save(face_file) 
开发者ID:fendouai,项目名称:FaceRank,代码行数:21,代码来源:find_faces_in_picture.py

示例4: upload

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def upload():
    print('tracker start')
    image_path = request.args.get('path').split(os.sep)[1:]
    print(image_path)
    image_path = os.sep.join(image_path)
    image_dir = os.path.dirname(image_path)
    image_name = os.path.basename(image_path)
    print(image_path)
    image = cv2.imread(image_path)
    faces = fr.face_locations(image,
                              number_of_times_to_upsample=0, model="cnn")
    index = 0

    for (top, right, bottom, left) in faces:
        imgFace = image[top:bottom, left:right]
        img_output = cv2.resize(imgFace, (299, 299),
                                interpolation=cv2.INTER_AREA)
        face_path = os.path.join(image_dir, str(index) + image_name)
        index += 1
        cv2.imwrite(face_path, img_output)
    os.remove(image_path)
    print('tracker end')
    return 'true' 
开发者ID:seongahjo,项目名称:Mosaicer,代码行数:25,代码来源:web.py

示例5: face_process

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def face_process():
    myprint("face process start",time.time())
    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    cv2.imshow('Video', frame)
    myprint("face process end", time.time()) 
开发者ID:matiji66,项目名称:face-attendance-machine,代码行数:42,代码来源:facerec_from_webcam_faster.py

示例6: __init__

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def __init__(self, path, load_first_face = True):
        super().__init__(path)
        self.faces = {}
        self.coordinates = {}  # stores the face (locations center, rotation, length)
        self.last_frame = self.get(0)
        self.frame_shape = self.last_frame.shape[:2]
        self.last_location = (0, 200, 200, 0)
        if (load_first_face):
            face_positions = face_recognition.face_locations(self.last_frame, number_of_times_to_upsample=2)
            if len(face_positions) > 0:
                self.last_location = face_positions[0] 
开发者ID:DariusAf,项目名称:MesoNet,代码行数:13,代码来源:pipeline.py

示例7: constructIndexes

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def constructIndexes(self, label):
        valid_links = []
        console.section('Analyzing')
        file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
        file_name += '.jpg'
        tmp_path = os.path.join(tempfile.gettempdir(), file_name)
        console.task("Storing Image in {0}".format(tmp_path))
        for num, i in enumerate(self.profile_img):
            console.task('Analyzing {0}...'.format(i.strip()[:90]))
            urlretrieve(i, tmp_path)
            frame = cv2.imread(tmp_path)
            big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0)
            rgb_small_frame = big_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters)
            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
                name = "Unknown"
                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = self.known_face_names[first_match_index]
                face_names.append(name)

            for _, name in zip(face_locations, face_names):
                if name == label:
                    valid_links.append(num)
        if os.path.isfile(tmp_path):
            console.task("Removing {0}".format(tmp_path))
            os.remove(tmp_path)
        return valid_links 
开发者ID:ThoughtfulDev,项目名称:EagleEye,代码行数:35,代码来源:face_recog.py

示例8: detect_faces

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def detect_faces(frame):
    face_locations = face_recognition.face_locations(frame)
    landmarks = _raw_face_landmarks(frame, face_locations)   

    for ((y, right, bottom, x), landmarks) in zip(face_locations, landmarks):
        yield DetectedFace(frame[y: bottom, x: right], x, right - x, y, bottom - y, landmarks)

# extract all faces in image 
开发者ID:DerWaldi,项目名称:youtube-video-face-swap,代码行数:10,代码来源:face_extractor.py

示例9: _raw_face_landmarks

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def _raw_face_landmarks(face_image, face_locations):
    face_locations = [_css_to_rect(face_location) for face_location in face_locations]
    return [pose_predictor(face_image, face_location) for face_location in face_locations] 
开发者ID:DerWaldi,项目名称:youtube-video-face-swap,代码行数:5,代码来源:face_extractor.py

示例10: reset

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def reset(self):
        self.face_relative_locations = []
        self.face_locations = []
        self.faces = [] 
开发者ID:AIInAi,项目名称:tf-insightface,代码行数:6,代码来源:face_track_server.py

示例11: process

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def process(self, frame):
        self.reset()
        self.cam_h, self.cam_w, _ = frame.shape
        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=self.down_scale_factor, fy=self.down_scale_factor)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]
        self.face_locations = face_recognition.face_locations(rgb_small_frame)
        # Display the results
        for y1_sm, x2_sm, y2_sm, x1_sm in self.face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            x1 = int(x1_sm / self.down_scale_factor)
            x2 = int(x2_sm / self.down_scale_factor)
            y1 = int(y1_sm / self.down_scale_factor)
            y2 = int(y2_sm / self.down_scale_factor)

            x1_rltv = x1 / self.cam_w
            x2_rltv = x2 / self.cam_w
            y1_rltv = y1 / self.cam_h
            y2_rltv = y2 / self.cam_h

            _face_area = frame[x1:x2, y1:y2, :]
            if _face_area.size == 0:
                continue
            self.faces.append(_face_area)
            self.face_relative_locations.append([x1_rltv, y1_rltv, x2_rltv, y2_rltv])
            # cv2.imshow('faces', frame[y1:y2, x1:x2, :])
            # cv2.waitKey(0)
        print('[FaceTracker Server] Found {} faces!'.format(len(self.faces)))
        return self.faces 
开发者ID:AIInAi,项目名称:tf-insightface,代码行数:33,代码来源:face_track_server.py

示例12: get_face_and_save

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def get_face_and_save(path):
    image_path = f'{IMAGES_PATH}/{path}'
    image = face_recognition.load_image_file(image_path)
    locations = face_recognition.face_locations(image)
    if len(locations) == 1:  # save the face of mm
        top, right, bottom, left = locations[0]
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        with open(f'{IMAGES_PATH}/faces/face-{path}', "wb") as f:
            pil_image.save(f)
    return len(locations) 
开发者ID:nladuo,项目名称:MMFinder,代码行数:13,代码来源:filter_images.py

示例13: get_face_and_save

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def get_face_and_save(filename):
    img_path = f"{UPLOAD_DIR}/{filename}"
    image = face_recognition.load_image_file(img_path)
    locations = face_recognition.face_locations(image)
    if len(locations) == 1:  # save the face of mm
        top, right, bottom, left = locations[0]
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        with open(f"{UPLOAD_DIR}/face-{filename}", "wb") as f:
            pil_image.save(f)
    return len(locations) 
开发者ID:nladuo,项目名称:MMFinder,代码行数:13,代码来源:web_service.py

示例14: detect_faces

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def detect_faces(img):
    '''
    Detect faces in image
    :param img: cv::mat HxWx3 RGB
    :return: yield 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    for y, right, bottom, x in bbs:
        # Scale back up face bb
        yield x, y, (right - x), (bottom - y) 
开发者ID:albertpumarola,项目名称:GANimation,代码行数:14,代码来源:face_utils.py

示例15: _img_morph

# 需要导入模块: import face_recognition [as 别名]
# 或者: from face_recognition import face_locations [as 别名]
def _img_morph(self, img, expresion):
        bbs = face_recognition.face_locations(img)
        if len(bbs) > 0:
            y, right, bottom, x = bbs[0]
            bb = x, y, (right - x), (bottom - y)
            face = face_utils.crop_face_with_bb(img, bb)
            face = face_utils.resize_face(face)
        else:
            face = face_utils.resize_face(img)

        morphed_face = self._morph_face(face, expresion)

        return morphed_face 
开发者ID:albertpumarola,项目名称:GANimation,代码行数:15,代码来源:test.py


注:本文中的face_recognition.face_locations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。