當前位置: 首頁>>代碼示例>>Python>>正文


Python face_recognition.face_encodings方法代碼示例

本文整理匯總了Python中face_recognition.face_encodings方法的典型用法代碼示例。如果您正苦於以下問題:Python face_recognition.face_encodings方法的具體用法?Python face_recognition.face_encodings怎麽用?Python face_recognition.face_encodings使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在face_recognition的用法示例。


在下文中一共展示了face_recognition.face_encodings方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: loadKnown

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def loadKnown(self, label):    
        console.task('Loading known faces')
        pa_g = Path('./known')
        pathlist = []
        for ext in ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG', '.bmp', '.BMP']:
            tmp_pl = pa_g.glob('**/*{}'.format(ext))
            for t in tmp_pl:
                pathlist.append(t)
        for path in pathlist:
            p_str = str(path)
            delim = '/'
            if platform == "win32":
                delim = '\\'
            console.subtask('Loading {0}'.format(p_str.split(delim)[1]))
            im = face_recognition.load_image_file(p_str)
            encoding = face_recognition.face_encodings(im, num_jitters=self.num_jitters)
            for e in encoding:
                self.known_face_encodings.append(e)
                self.known_face_names.append(label) 
開發者ID:ThoughtfulDev,項目名稱:EagleEye,代碼行數:21,代碼來源:face_recog.py

示例2: encoding_images

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def encoding_images(path):
    """
    對path路徑下的子文件夾中的圖片進行編碼,
    TODO:
        對人臉數據進行曆史庫中的人臉向量進行歐式距離的比較,當距離小於某個閾值的時候提醒:
        如果相似的是本人,則跳過該條記錄,並提醒已經存在,否則警告人臉過度相似問題,
    :param path:
    :return:
    """
    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        for subdir in subdirs:
            print('process image name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue
                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break
                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")
    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
開發者ID:matiji66,項目名稱:face-attendance-machine,代碼行數:41,代碼來源:encoding_images.py

示例3: dlib_encoder

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def dlib_encoder(image, boxes):
    # Encode face into a 128-D representation (embeddings) using dlib.

    # Convert image from BGR (OpenCV ordering) to dlib ordering (RGB).
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Generate encodings. Only one face is assumed so take the 1st element. 
    encoding = face_recognition.face_encodings(face_image=rgb,
        known_face_locations=boxes, num_jitters=10)[0]

    return encoding

# Loop over the image paths.
# NB: Its assumed that only one face is in each image. 
開發者ID:goruck,項目名稱:edge-tpu-servers,代碼行數:16,代碼來源:encode_faces.py

示例4: test_load

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def test_load():
    face_encodings, face_names = load_encodings()
    print("===========face_encodings================")
    print(face_encodings)
    print("===========================")
    print(face_names)
    print("===========face_names================") 
開發者ID:matiji66,項目名稱:face-attendance-machine,代碼行數:9,代碼來源:encoding_images.py

示例5: initialize_database

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def initialize_database(self):
        """
        Reads the PNG images from ./people folder and
        creates a list of peoples

        The names of the image files are considered as their
        real names.

        For example;
        /people
          - mario.png
          - jennifer.png
          - melanie.png

        Returns:
        (tuple) (people_list, name_list) (features of people, names of people)

        """
        filenames = glob.glob(cd + '/people/*.png')

        people_list = []
        name_list = []

        for f in filenames:
            im = cv2.imread(f, 1)

            #cv2.imshow("Database Image", im)

            #cv2.waitKey(500)

            im = im.astype(np.uint8)

            people_list.append(fr.face_encodings(im)[0])

            name_list.append(f.split('/')[-1].split('.')[0])

            #cv2.destroyAllWindows()

        return (people_list, name_list) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:41,代碼來源:face_recognizer.py

示例6: constructIndexes

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def constructIndexes(self, label):
        valid_links = []
        console.section('Analyzing')
        file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
        file_name += '.jpg'
        tmp_path = os.path.join(tempfile.gettempdir(), file_name)
        console.task("Storing Image in {0}".format(tmp_path))
        for num, i in enumerate(self.profile_img):
            console.task('Analyzing {0}...'.format(i.strip()[:90]))
            urlretrieve(i, tmp_path)
            frame = cv2.imread(tmp_path)
            big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0)
            rgb_small_frame = big_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters)
            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
                name = "Unknown"
                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = self.known_face_names[first_match_index]
                face_names.append(name)

            for _, name in zip(face_locations, face_names):
                if name == label:
                    valid_links.append(num)
        if os.path.isfile(tmp_path):
            console.task("Removing {0}".format(tmp_path))
            os.remove(tmp_path)
        return valid_links 
開發者ID:ThoughtfulDev,項目名稱:EagleEye,代碼行數:35,代碼來源:face_recog.py

示例7: predict

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def predict(X_frame, knn_clf=None, model_path=None, distance_threshold=0.5):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_frame: frame to do the prediction on.
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    X_face_locations = face_recognition.face_locations(X_frame)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test image
    faces_encodings = face_recognition.face_encodings(X_frame, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 
開發者ID:ageitgey,項目名稱:face_recognition,代碼行數:37,代碼來源:facerec_ipcamera_knn.py

示例8: predict

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("Invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    # Load image file and find face locations
    X_img = face_recognition.load_image_file(X_img_path)
    X_face_locations = face_recognition.face_locations(X_img)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test iamge
    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 
開發者ID:ageitgey,項目名稱:face_recognition,代碼行數:42,代碼來源:face_recognition_knn.py

示例9: recognize_face_from_image_file

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def recognize_face_from_image_file(image_file: str) -> Optional[ndarray]:

    if PYROBOY_AVAILABLE:
        return FaceRec.get_biggest_face_encoding(image_file)
    else:
        logger.warning("Falling back to basic Face Recognition functions, since Pyroboy is unavailable!")
        image = fr.load_image_file(image_file)
        faces = fr.face_encodings(image)
        if faces:
            return faces[0]
        return None 
開發者ID:Roboy,項目名稱:ravestate,代碼行數:13,代碼來源:face_recognition.py

示例10: get_processed_frame_object

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def get_processed_frame_object(frame_obj, scale=1.0):
        """Processes value produced by producer, returns prediction with png image.

        :param frame_obj: frame dictionary with frame information and frame itself
        :param scale: (0, 1] scale image before face recognition, speeds up processing, decreases accuracy
        :return: A dict updated with faces found in that frame, i.e. their location and encoding.
        """

        frame = np_from_json(frame_obj, prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        if scale != 1:
            # Resize frame of video to scale size for faster face recognition processing
            rgb_small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

        else:
            rgb_small_frame = frame

        with timer("PROCESS RAW FRAME {}".format(frame_obj["frame_num"])):
            # Find all the faces and face encodings in the current frame of video
            with timer("Locations in frame"):
                face_locations = np.array(face_recognition.face_locations(rgb_small_frame))
                face_locations_dict = np_to_json(face_locations, prefix_name="face_locations")

            with timer("Encodings in frame"):
                face_encodings = np.array(face_recognition.face_encodings(rgb_small_frame, face_locations))
                face_encodings_dict = np_to_json(face_encodings, prefix_name="face_encodings")

        frame_obj.update(face_locations_dict)
        frame_obj.update(face_encodings_dict)

        return frame_obj 
開發者ID:rohit-mehra,項目名稱:eye_of_sauron,代碼行數:35,代碼來源:prediction_producer.py

示例11: calc_face_encoding

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def calc_face_encoding(image):
    # Currently only use first face found on picture
    loaded_image = face_recognition.load_image_file(image)
    faces = face_recognition.face_encodings(loaded_image)

    # If more than one face on the given image was found -> error
    if len(faces) > 1:
        raise Exception(
            "Found more than one face in the given training image.")

    # If none face on the given image was found -> error
    if not faces:
        raise Exception("Could not find any face in the given training image.")

    return faces[0] 
開發者ID:JanLoebel,項目名稱:face_recognition,代碼行數:17,代碼來源:facerec_service.py

示例12: detect_faces_in_image

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def detect_faces_in_image(file_stream):
    # Load the uploaded image file
    img = face_recognition.load_image_file(file_stream)

    # Get face encodings for any faces in the uploaded image
    uploaded_faces = face_recognition.face_encodings(img)

    # Defaults for the result object
    faces_found = len(uploaded_faces)
    faces = []

    if faces_found:
        face_encodings = list(faces_dict.values())
        for uploaded_face in uploaded_faces:
            match_results = face_recognition.compare_faces(
                face_encodings, uploaded_face)
            for idx, match in enumerate(match_results):
                if match:
                    match = list(faces_dict.keys())[idx]
                    match_encoding = face_encodings[idx]
                    dist = face_recognition.face_distance([match_encoding],
                            uploaded_face)[0]
                    faces.append({
                        "id": match,
                        "dist": dist
                    })

    return {
        "count": faces_found,
        "faces": faces
    }

# <Picture functions> #

# <Controller> 
開發者ID:JanLoebel,項目名稱:face_recognition,代碼行數:37,代碼來源:facerec_service.py

示例13: extract_face_from_image

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def extract_face_from_image(X_img_path, detection_model, jitters, encoding_models):
        # Load image data in a numpy array
        try:
            log.debug("extract_face_from_image | Loading image {}".format(X_img_path))
            X_img, ratio = load_image_file(X_img_path)
        except OSError:
            log.error("extract_face_from_image | What have you uploaded ???")
            return -2, -2, -1
        log.debug("extract_face_from_image | Extracting faces locations ...")
        try:
            # TODO: Reduce size of the image at every iteration
            X_face_locations = face_recognition.face_locations(
                X_img, model=detection_model)  # model="cnn")
        except RuntimeError:
            log.error(
                "extract_face_from_image | GPU does not have enough memory: FIXME unload data and retry")
            return None, None, ratio

        log.debug("extract_face_from_image | Found {} face(s) for the given image".format(
            len(X_face_locations)))

        # If no faces are found in the image, return an empty result.
        if len(X_face_locations) == 0:
            log.warning("extract_face_from_image | Seems that no faces was found :( ")
            return -3, -3, ratio

        # Find encodings for faces in the test image
        log.debug("extract_face_from_image | Encoding faces using [{}] jitters ...".format(jitters))
        # num_jitters increase the distortion check
        faces_encodings = face_recognition.face_encodings(
            X_img, known_face_locations=X_face_locations, num_jitters=jitters, model=encoding_models)
        log.debug("extract_face_from_image | Face encoded! | Let's ask to the neural network ...")
        return faces_encodings, X_face_locations, ratio 
開發者ID:alessiosavi,項目名稱:PyRecognizer,代碼行數:35,代碼來源:Classifier.py

示例14: init_dataset_core

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def init_dataset_core(detection_model, jitters, encoding_models, img_path=None):
        """
        Delegated core method for parallelize work
        :detection_model
        :jitters
        :param img_path:
        :return:
        """
        try:
            image = load_image_file(img_path)
        except OSError:
            log.error(
                "init_dataset | === FATAL === | Image {} is corrupted!!".format(img_path))
            return None
        # log.debug("initDataset | Image loaded! | Searching for face ...")
        # Array of w,x,y,z coordinates
        # NOTE: Can be used batch_face_locations in order to parallelize the image init, but unfortunately
        # it's the only GPU that i have right now. And, of course, i'll try to don't burn it
        face_bounding_boxes = face_locations(image, model=detection_model)
        face_data = None
        if len(face_bounding_boxes) == 1:
            log.info(
                "initDataset | Image {0} have only 1 face, loading for future training ...".format(img_path))
            # Loading the X [data] using 300 different distortion
            face_data = face_encodings(image, known_face_locations=face_bounding_boxes, num_jitters=jitters,
                                       model=encoding_models)[0]
        else:
            log.error(
                "initDataset | Image {0} not suitable for training!".format(img_path))
            if len(face_bounding_boxes) == 0:
                log.error("initDataset | I've not found any face :/ ")
            else:
                log.error(
                    "initDataset | Found more than one face, too much for me Sir :&")
        return face_data 
開發者ID:alessiosavi,項目名稱:PyRecognizer,代碼行數:37,代碼來源:Person.py

示例15: preprocess

# 需要導入模塊: import face_recognition [as 別名]
# 或者: from face_recognition import face_encodings [as 別名]
def preprocess(self, imgs):
        for img in imgs:
            if img is None:
                continue
            h, w, c = img.shape
            # img = cv2.resize(img, (64, 64))
            code = face_recognition.face_encodings(img, [(0, w, h, 0)])[0]
            self.img_encode_code.append(code)
            # self.img_encode_code_array = np.array(self.img_encode_code) 
開發者ID:MashiMaroLjc,項目名稱:rabbitVE,代碼行數:11,代碼來源:dlib_compare.py


注:本文中的face_recognition.face_encodings方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。