本文整理汇总了Python中imutils.face_utils.shape_to_np方法的典型用法代码示例。如果您正苦于以下问题:Python face_utils.shape_to_np方法的具体用法?Python face_utils.shape_to_np怎么用?Python face_utils.shape_to_np使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类imutils.face_utils
的用法示例。
在下文中一共展示了face_utils.shape_to_np方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: orientation
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def orientation(self):
"""
人脸定位
:return:
"""
faces = []
for rect in self.rects:
face = {}
face_shades_width = rect.right() - rect.left()
predictor_shape = self.predictor(self.img_gray, rect)
face_shape = face_utils.shape_to_np(predictor_shape)
face['cigarette'] = self.get_cigarette_info(face_shape, face_shades_width)
face['glasses'] = self.get_glasses_info(face_shape, face_shades_width)
faces.append(face)
return faces
开发者ID:tomoncle,项目名称:face-detection-induction-course,代码行数:20,代码来源:input_static_pic_to_gif2_for_class.py
示例2: orientation
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def orientation(self, rects, img_gray):
"""
人脸定位
:return:
"""
faces = []
for rect in rects:
face = {}
face_shades_width = rect.right() - rect.left()
predictor_shape = self.predictor(img_gray, rect)
face_shape = face_utils.shape_to_np(predictor_shape)
face['cigarette'] = self.get_cigarette_info(face_shape, face_shades_width)
face['glasses'] = self.get_glasses_info(face_shape, face_shades_width)
faces.append(face)
return faces
示例3: blur
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def blur(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 0)
mask = np.zeros(image.shape[:2], np.uint8)
blurred_image = image.copy()
for face in faces: # if there are faces
(x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
blurred_image[y : y + h, x : x + w, :] = anonymize_face_pixelate(
blurred_image[y : y + h, x : x + w, :], blocks=10
)
# *** Facial Landmarks detection
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
# Get mask with only face shape
shape = cv2.convexHull(shape)
cv2.drawContours(mask, [shape], -1, 255, -1)
# Replace blurred image only in mask
mask = mask / 255.0
mask = np.expand_dims(mask, axis=-1)
image = (1.0 - mask) * image + mask * blurred_image
image = image.astype(np.uint8)
return image
示例4: shape_to_np
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def shape_to_np(shape, dtype="int"):
# initialize (x, y) coordinates to zero
coords = np.zeros((shape.num_parts, 2), dtype=dtype)
# loop through 68 facial landmarks and convert them
# to a 2-tuple of (x, y)- coordinates
for i in range(0, shape.num_parts):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
# construct the arguments
# if you want to pass arguments at the time of running code
# follow below code and format for running code
示例5: encode_face
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def encode_face(image):
face_locations = face_detector(image, 1)
face_encodings_list = []
landmarks_list = []
for face_location in face_locations:
# DETECT FACES
shape = pose_predictor_68_point(image, face_location)
face_encodings_list.append(np.array(face_encoder.compute_face_descriptor(image, shape, num_jitters=1)))
# GET LANDMARKS
shape = face_utils.shape_to_np(shape)
landmarks_list.append(shape)
face_locations = transform(image, face_locations)
return face_encodings_list, face_locations, landmarks_list
示例6: recognize
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def recognize():
database = initialize()
cap = cv2.VideoCapture(0)
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
while True:
ret, img = cap.read()
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
subjects = detector(gray, 0)
for subject in subjects:
shape = predictor(gray, subject)
shape = face_utils.shape_to_np(shape) # converting to NumPy Array
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(img, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(img, [rightEyeHull], -1, (0, 255, 0), 1)
extract_face_info(img, img_rgb, database,ear)
cv2.imshow('Recognizing faces', img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
示例7: build_mask_dataset
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def build_mask_dataset(in_dir, out_dir, model_path):
"""Generate a dataset of segmentation masks from images.
@param in_dir: string
input directory of images.
@param out_dir: string
output directory of images.
@param model_path: string
path to HOG model for facial features.
"""
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(model_path)
image_paths = os.listdir(in_dir)
n_images = len(image_paths)
for i, image_path in enumerate(image_paths):
print('Building face-mask dataset: [%d/%d] images.' % (i + 1, n_images))
image_full_path = os.path.join(in_dir, image_path)
image = cv2.imread(image_full_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
try:
rect = rects[0] # we are only going to use the first one
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
output = visualize_facial_landmarks(image, shape)
cv2.imwrite(os.path.join(out_dir, image_path), output)
except:
# if for some reason no bounding box is found, send blank.
output = np.ones_like(image) * 255
cv2.imwrite(os.path.join(out_dir, image_path), output)
示例8: get_marks
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def get_marks(gray, face_coord):
"""
Arguments:
gray: grayscale image array
face_coord: rectangle coordinates for one face
Returns:
array of coordinates of facial landmarks scaled to range [-0.5, 0.5]
"""
shape = face_marks(gray, face_coord)
shape = face_utils.shape_to_np(shape).astype(float)
shape[:,0] = (shape[:,0] - face_coord.left())/face_coord.width()-0.5
shape[:,1] = (shape[:,1] - face_coord.top())/face_coord.height()-0.5
return shape
示例9: get_landmarks
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def get_landmarks(self, frame, type):
'''
Get all facial landmarks in a face
Args:
frame (cv2 image): the original frame. In RGB format.
type (str): 5 or 68 facial landmarks
Outputs:
shape (array): facial landmarks' co-ords in format of of tuples (x,y)
'''
if self.predictor is None:
print("[INFO] load " + type + " facial landmarks model ...")
self.predictor = dlib.shape_predictor("../shape_predictor_" + type + "_face_landmarks.dat")
print("[INFO] Load model - DONE!")
if frame is None:
return None, None
# all face will be resized to a fix size, e.g width = 200
#face = imutils.resize(face, width=200)
# face must be gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.face_detection(frame)
if len(rects)<0 or len(rects)==0:
return None, None
shape = self.predictor(gray, rects[0])
shape = face_utils.shape_to_np(shape)
# in shape, there are 68 pairs of (x, y) carrying coords of 68 points.
# to draw landmarks, use: for (x, y) in shape: cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
return shape, rects
示例10: extract_fl_features
# 需要导入模块: from imutils import face_utils [as 别名]
# 或者: from imutils.face_utils import shape_to_np [as 别名]
def extract_fl_features(self, video_input, user_acts):
"""TODO
Returns:
dict: TODO
"""
def _distance(a, b):
return np.linalg.norm(a-b)
print(f'VIDEO FEATURE ENTER, len(video_input): {len(video_input)}')
features = []
aggregated_feats = None
for frame in video_input[::2]:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = self.CLAHE.apply(frame)
faces = self.DETECTOR(frame, 1)
if len(faces) > 0: # at least one face detected
landmarks = self.PREDICTOR(frame, faces[0])
landmarks = face_utils.shape_to_np(landmarks)
norm_left_eye = _distance(landmarks[21], landmarks[39])
norm_right_eye = _distance(landmarks[22], landmarks[42])
norm_lips = _distance(landmarks[33], landmarks[52])
eyebrow_left = sum(
[(_distance(landmarks[39], landmarks[i]) / norm_left_eye)
for i in [18, 19, 20, 21]]
)
eyebrow_right = sum(
[(_distance(landmarks[42], landmarks[i]) / norm_right_eye)
for i in [22, 23, 24, 25]]
)
lip_left = sum(
[(_distance(landmarks[33], landmarks[i]) / norm_lips)
for i in [48, 49, 50]]
)
lip_right = sum(
[(_distance(landmarks[33], landmarks[i]) / norm_lips)
for i in [52, 53, 54]]
)
mouth_width = _distance(landmarks[48], landmarks[54])
mouth_height = _distance(landmarks[51], landmarks[57])
features.append(np.array([
eyebrow_left,
eyebrow_right,
lip_left,
lip_right,
mouth_width,
mouth_height
]))
# aggregate features across frames
if len(features) > 0:
mean = np.mean(features, axis=0)
mini = np.amin(features, axis=0)
maxi = np.amax(features, axis=0)
std = np.std(features, axis=0)
perc25 = np.percentile(features, q=25, axis=0)
perc75 = np.percentile(features, q=75, axis=0)
aggregated_feats = np.array([mean, mini, maxi, std, perc25, perc75]).reshape(1, 36)
print("VIDEO FEAT PUB")
return {'fl_features': aggregated_feats}