本文整理汇总了Python中facenet.load_model方法的典型用法代码示例。如果您正苦于以下问题:Python facenet.load_model方法的具体用法?Python facenet.load_model怎么用?Python facenet.load_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类facenet
的用法示例。
在下文中一共展示了facenet.load_model方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def __init__(self):
INT8ENABLE = False
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) #allow_growth=True, to do growth mem allocation
with self.sess.as_default():
graph_load = facenet.load_model(facenet_model_checkpoint)
self.sess.close()
tf.reset_default_graph()
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
## #For INT8 calib
if INT8ENABLE:
print("TensorRT INT8 Enabled and Running INT8 Calib")
input_map = np.random.random_sample((1,160,160,3))
inc=tf.constant(input_map, dtype=tf.float32)
dataset=tf.data.Dataset.from_tensors(inc)
dataset=dataset.repeat()
iterator=dataset.make_one_shot_iterator()
next_element=iterator.get_next()
out=tf.import_graph_def(graph_load, input_map={"input":next_element, "phase_train": False}, return_elements=[ "embeddings"])
self.sess.run(out)
graph_load=trt.calib_graph_to_infer_graph(graph_load)
#for node in trt_int8_graph.node:print("[NODE] ", node.name, node.op)
#for op in sess.graph.get_operations():print("[OP] ", op.name)
tf.import_graph_def(graph_load, input_map=None, name='')
示例2: load_model
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def load_model(self):
sess = tf.Session()
with sess.as_default():
# Load the model
facenet.load_model(self.model_dir)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self._images_placeholder=images_placeholder
self._embeddings=embeddings
self._phase_train_placeholder=phase_train_placeholder
self._sess = sess
示例3: __init__
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def __init__(self):
super(InceptionResnetV1Model, self).__init__('model', None, {})
# Load Facenet CNN
facenet.load_model(self.model_path)
# Save input and output tensors references
graph = tf.get_default_graph()
self.face_input = graph.get_tensor_by_name("input:0")
self.embedding_output = graph.get_tensor_by_name("embeddings:0")
示例4: main
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(args.model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
args.use_flipped_images, args.use_fixed_image_standardization)
示例5: main
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def main(args):
images = load_and_align_data(args.image_files, args.image_size, args.margin, args.gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(args.model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
nrof_images = len(args.image_files)
print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, args.image_files[i]))
print('')
# Print distance matrix
print('Distance matrix')
print(' ', end='')
for i in range(nrof_images):
print(' %1d ' % i, end='')
print('')
for i in range(nrof_images):
print('%1d ' % i, end='')
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
print(' %1.4f ' % dist, end='')
print('')
示例6: main
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def main(args):
#images, cout_per_image, nrof_samples = load_and_align_data(args.image_files,args.image_size, args.margin, args.gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(args.model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images , phase_train_placeholder:False}
emb = sess.run(embeddings, feed_dict=feed_dict)
classifier_filename_exp = os.path.expanduser(args.classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
print('Loaded classifier model from file "%s"\n' % classifier_filename_exp)
predictions = model.predict_proba(emb)
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
k=0
#print predictions
for i in range(nrof_samples):
print("\npeople in image %s :" %(args.image_files[i]))
for j in range(cout_per_image[i]):
print('%s: %.3f' % (class_names[best_class_indices[k]], best_class_probabilities[k]))
k+=1
示例7: main
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def main(args):
images, cout_per_image, nrof_samples = load_and_align_data(args.image_files,args.image_size, args.margin, args.gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(args.model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images , phase_train_placeholder:False}
emb = sess.run(embeddings, feed_dict=feed_dict)
classifier_filename_exp = os.path.expanduser(args.classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
print('Loaded classifier model from file "%s"\n' % classifier_filename_exp)
predictions = model.predict_proba(emb)
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
k=0
#print predictions
for i in range(nrof_samples):
print("\npeople in image %s :" %(args.image_files[i]))
for j in range(cout_per_image[i]):
print('%s: %.3f' % (class_names[best_class_indices[k]], best_class_probabilities[k]))
k+=1
示例8: __init__
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def __init__(self):
self.sess = tf.Session()
with self.sess.as_default():
facenet.load_model(facenet_model_checkpoint)
示例9: __init__
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def __init__(self):
super(InceptionResnetV1Model, self).__init__(scope='model')
# Load Facenet CNN
facenet.load_model(self.model_path)
# Save input and output tensors references
graph = tf.get_default_graph()
self.face_input = graph.get_tensor_by_name("input:0")
self.embedding_output = graph.get_tensor_by_name("embeddings:0")
示例10: init_triplet_model
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def init_triplet_model():
global track_struct
global triplet_graph
global triplet_sess
global eval_enqueue_op
global image_paths_placeholder
global labels_placeholder
global phase_train_placeholder
global batch_size_placeholder
global control_placeholder
global embeddings
global label_batch
global distance_metric
f_image_size = 160
distance_metric = 0
triplet_graph = tf.Graph()
with triplet_graph.as_default():
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (f_image_size, f_image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder,
labels_placeholder, control_placeholder],
name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size,
nrof_preprocess_threads, batch_size_placeholder)
triplet_sess = tf.Session(graph=triplet_graph)
with triplet_sess.as_default():
with triplet_graph.as_default():
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(track_struct['file_path']['triplet_model'], input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=triplet_sess)
return
示例11: feature_extract
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def feature_extract(feature_size, num_patch, max_length, patch_folder, triplet_model):
f_image_size = 160
distance_metric = 0
with tf.Graph().as_default():
with tf.Session() as sess:
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (f_image_size, f_image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder,
labels_placeholder, control_placeholder],
name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size,
nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(triplet_model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
fea_mat = np.zeros((num_patch,feature_size-4+2))
tracklet_list = os.listdir(patch_folder)
N_tracklet = len(tracklet_list)
cnt = 0
for n in range(N_tracklet):
tracklet_folder = patch_folder+'/'+tracklet_list[n]
patch_list = os.listdir(tracklet_folder)
# get patch list, track_id and fr_id, starts from 1
prev_cnt = cnt
for m in range(len(patch_list)):
# track_id
fea_mat[cnt,0] = n+1
# fr_id
fea_mat[cnt,1] = int(patch_list[m][-8:-4])
cnt = cnt+1
patch_list[m] = tracklet_folder+'/'+patch_list[m]
#print(n)
lfw_batch_size = len(patch_list)
emb_array = feature_encode(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder,
phase_train_placeholder,batch_size_placeholder, control_placeholder,
embeddings, label_batch, patch_list, lfw_batch_size, distance_metric)
fea_mat[prev_cnt:prev_cnt+lfw_batch_size,2:] = np.copy(emb_array)
return fea_mat
示例12: main
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import load_model [as 别名]
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# create output directory if it doesn't exist
output_dir = os.path.expanduser(args.output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# load the model
print("Loading trained model...\n")
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.trained_model_dir))
facenet.load_model(args.trained_model_dir, meta_file, ckpt_file)
# grab all image paths and labels
print("Finding image paths and targets...\n")
data = load_files(args.data_dir, load_content=False, shuffle=False)
labels_array = data['target']
paths = data['filenames']
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Generating embeddings from images...\n')
start_time = time.time()
batch_size = args.batch_size
nrof_images = len(paths)
nrof_batches = int(np.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in xrange(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True)
feed_dict = { images_placeholder:images, phase_train_placeholder:False}
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
time_avg_forward_pass = (time.time() - start_time) / float(nrof_images)
print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images))
print("Finally saving embeddings and gallery to: %s" % (output_dir))
# save the gallery and embeddings (signatures) as numpy arrays to disk
np.save(os.path.join(output_dir, "gallery.npy"), labels_array)
np.save(os.path.join(output_dir, "signatures.npy"), emb_array)