本文整理汇总了Python中lfw.get_paths方法的典型用法代码示例。如果您正苦于以下问题:Python lfw.get_paths方法的具体用法?Python lfw.get_paths怎么用?Python lfw.get_paths使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lfw
的用法示例。
在下文中一共展示了lfw.get_paths方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: validate_on_lfw
# 需要导入模块: import lfw [as 别名]
# 或者: from lfw import get_paths [as 别名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例2: load_testset
# 需要导入模块: import lfw [as 别名]
# 或者: from lfw import get_paths [as 别名]
def load_testset(size):
# Load images paths and labels
pairs = lfw.read_pairs(pairs_path)
paths, labels = lfw.get_paths(testset_path, pairs, file_extension)
# Random choice
permutation = np.random.choice(len(labels), size, replace=False)
paths_batch_1 = []
paths_batch_2 = []
for index in permutation:
paths_batch_1.append(paths[index * 2])
paths_batch_2.append(paths[index * 2 + 1])
labels = np.asarray(labels)[permutation]
paths_batch_1 = np.asarray(paths_batch_1)
paths_batch_2 = np.asarray(paths_batch_2)
# Load images
faces1 = facenet.load_data(paths_batch_1, False, False, image_size)
faces2 = facenet.load_data(paths_batch_2, False, False, image_size)
# Change pixel values to 0 to 1 values
min_pixel = min(np.min(faces1), np.min(faces2))
max_pixel = max(np.max(faces1), np.max(faces2))
faces1 = (faces1 - min_pixel) / (max_pixel - min_pixel)
faces2 = (faces2 - min_pixel) / (max_pixel - min_pixel)
# Convert labels to one-hot vectors
onehot_labels = []
for index in range(len(labels)):
if labels[index]:
onehot_labels.append([1, 0])
else:
onehot_labels.append([0, 1])
return faces1, faces2, np.array(onehot_labels)
示例3: main
# 需要导入模块: import lfw [as 别名]
# 或者: from lfw import get_paths [as 别名]
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(args.model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
args.use_flipped_images, args.use_fixed_image_standardization)
示例4: load_testset
# 需要导入模块: import lfw [as 别名]
# 或者: from lfw import get_paths [as 别名]
def load_testset(size):
# Load images paths and labels
pairs = lfw.read_pairs(pairs_path)
paths, labels = lfw.get_paths(testset_path, pairs)
# Random choice
permutation = np.random.choice(len(labels), size, replace=False)
paths_batch_1 = []
paths_batch_2 = []
for index in permutation:
paths_batch_1.append(paths[index * 2])
paths_batch_2.append(paths[index * 2 + 1])
labels = np.asarray(labels)[permutation]
paths_batch_1 = np.asarray(paths_batch_1)
paths_batch_2 = np.asarray(paths_batch_2)
# Load images
faces1 = facenet.load_data(paths_batch_1, False, False, image_size)
faces2 = facenet.load_data(paths_batch_2, False, False, image_size)
# Change pixel values to 0 to 1 values
min_pixel = min(np.min(faces1), np.min(faces2))
max_pixel = max(np.max(faces1), np.max(faces2))
faces1 = (faces1 - min_pixel) / (max_pixel - min_pixel)
faces2 = (faces2 - min_pixel) / (max_pixel - min_pixel)
# Convert labels to one-hot vectors
onehot_labels = []
for index in range(len(labels)):
if labels[index]:
onehot_labels.append([1, 0])
else:
onehot_labels.append([0, 1])
return faces1, faces2, np.array(onehot_labels)
示例5: test
# 需要导入模块: import lfw [as 别名]
# 或者: from lfw import get_paths [as 别名]
def test(args):
with tf.Graph().as_default():
with tf.Session() as sess:
#saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, args.model)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.test_list_dir))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_data_dir), pairs, args.test_list_dir)
image_size = args.image_size
print('image size',image_size)
images_placeholder = tf.placeholder(tf.float32,shape=(None,args.image_height,args.image_width,args.image_width),name='image')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
#network definition.
prelogits1 = network.infer(images_placeholder,args.embedding_size)
if args.fc_bn:
print('do batch norm after network')
prelogits = slim.batch_norm(prelogits1, is_training=phase_train_placeholder,epsilon=1e-5, scale=True,scope='softmax_bn')
#embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
embeddings = tf.identity(prelogits)
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on testing images')
batch_size = args.test_batch_size
nrof_images = len(paths)
nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
start_index = i*batch_size
print('handing {}/{}'.format(start_index,nrof_images))
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = utils.load_data(paths_batch, False, False, args.image_height,args.image_width,False,\
(args.image_height,args.image_width))
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
feats,a = sess.run([embeddings,prelogits], feed_dict=feed_dict)
# do not know for sure whether we should turn this on? it depends.
feats = utils.l2_normalize(feats)
emb_array[start_index:end_index,:] = feats
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array,
actual_issame, 0.001, nrof_folds=args.test_nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc) #
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)#fill_value="extrapolate"
print('Equal Error Rate (EER): %1.3f' % eer)
tpr1, fpr1, accuracy1, val1, val_std1, far1 = lfw.evaluate(emb_array,
actual_issame, 0.0001, nrof_folds=args.test_nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy1), np.std(accuracy1)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val1, val_std1, far1))
auc = metrics.auc(fpr1, tpr1)
print('Area Under Curve (AUC): %1.3f' % auc) #
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr1, tpr1)(x), 0., 1.)#fill_value="extrapolate"
print('Equal Error Rate (EER): %1.3f' % eer)