本文整理汇总了Python中facenet.prewhiten方法的典型用法代码示例。如果您正苦于以下问题:Python facenet.prewhiten方法的具体用法?Python facenet.prewhiten怎么用?Python facenet.prewhiten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类facenet
的用法示例。
在下文中一共展示了facenet.prewhiten方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def execute(self, data, batch_size):
image_list = []
results = []
for i in range(batch_size):
img = Image.open(data[i])
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
image_list.append(prewhitened)
rets = self.cal_embed(image_list)
for i in range(batch_size):
ret = rets[i].tolist()
ret = json.dumps(ret)
#ret = json.dumps([[ret.__dict__ for ob in lst] for lst in ret])
results.append(ret)
return results
示例2: load_and_align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
tmp_image_paths=copy.copy(image_paths)
img_list = []
for image in tmp_image_paths:
img = misc.imread(os.path.expanduser(image), mode='RGB')
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images
示例3: load_and_align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def load_and_align_data(image_paths,
image_size=160,
margin=44,
gpu_memory_fraction=1.0):
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in range(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]), mode='RGB')
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(
cropped, (image_size, image_size), interp='bilinear')
prewhitened = prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images
示例4: generate_embedding
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def generate_embedding(self, face):
# Get input and output tensors
images_placeholder = self.sess.graph.get_tensor_by_name("batch_join:0") #jjia changed 2018/01/21
embeddings = self.sess.graph.get_tensor_by_name("embeddings:0")
phase_train_placeholder = self.sess.graph.get_tensor_by_name("phase_train:0")
prewhiten_face = facenet.prewhiten(face.image)
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: [prewhiten_face], phase_train_placeholder: False}
#jjia
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
return self.sess.run(embeddings, feed_dict=feed_dict)[0]
示例5: load_and_align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
nrof_samples = len(image_paths)
img_list = []
count_per_image = []
for i in xrange(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
count_per_image.append(len(bounding_boxes))
for j in range(len(bounding_boxes)):
det = np.squeeze(bounding_boxes[j,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images, count_per_image, nrof_samples
示例6: preprocess
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def preprocess(self, data):
json_data = json.load(data)
cnt = json_data['cnt']
raw_images = json_data['images']
images = []
for raw_image in raw_images:
img_data = raw_image.decode('base64')
img = Image.open(StringIO.StringIO(img_data))
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
images.append(prewhitened)
return images
示例7: preprocess
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def preprocess(self, data):
_, img_list = img_utils.decode_json_to_images(data)
images = []
for img in img_list:
img = misc.fromimage(img)
img = misc.imresize(img, (160, 160), interp='bilinear')
prewhitened = facenet.prewhiten(img)
images.append(prewhitened)
return images
示例8: load_and_align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in xrange(nrof_samples):
print(image_paths[i])
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images
示例9: align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def align_data(image_list, image_size, margin, pnet, rnet, onet):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
img_list = []
for x in range(len(image_list)):
img_size = np.asarray(image_list[x].shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(image_list[x], minsize, pnet, rnet, onet, threshold, factor)
nrof_samples = len(bounding_boxes)
if nrof_samples > 0:
for i in range(nrof_samples):
if bounding_boxes[i][4] > 0.95:
det = np.squeeze(bounding_boxes[i, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = image_list[x][bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
if len(img_list) > 0:
images = np.stack(img_list)
return images
else:
return None
示例10: generate_embedding
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def generate_embedding(self, face):
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
prewhiten_face = facenet.prewhiten(face.image)
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: [prewhiten_face], phase_train_placeholder: False}
return self.sess.run(embeddings, feed_dict=feed_dict)[0]
示例11: align_data
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import prewhiten [as 别名]
def align_data(image_list, image_size, margin, pnet, rnet, onet):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
img_list = []
for x in xrange(len(image_list)):
img_size = np.asarray(image_list[x].shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(image_list[x], minsize, pnet, rnet, onet, threshold, factor)
nrof_samples = len(bounding_boxes)
if nrof_samples > 0:
for i in xrange(nrof_samples):
if bounding_boxes[i][4] > 0.95:
det = np.squeeze(bounding_boxes[i, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = image_list[x][bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
if len(img_list) > 0:
images = np.stack(img_list)
return images
else:
return None