本文整理汇总了Python中facenet.center_loss方法的典型用法代码示例。如果您正苦于以下问题:Python facenet.center_loss方法的具体用法?Python facenet.center_loss怎么用?Python facenet.center_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类facenet
的用法示例。
在下文中一共展示了facenet.center_loss方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testCenterLoss
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import center_loss [as 别名]
def testCenterLoss(self):
batch_size = 16
nrof_features = 2
nrof_classes = 16
alfa = 0.5
with tf.Graph().as_default():
features = tf.placeholder(tf.float32, shape=(batch_size, nrof_features), name='features')
labels = tf.placeholder(tf.int32, shape=(batch_size,), name='labels')
# Define center loss
center_loss, centers = facenet.center_loss(features, labels, alfa, nrof_classes)
label_to_center = np.array( [
[-3,-3], [-3,-1], [-3,1], [-3,3],
[-1,-3], [-1,-1], [-1,1], [-1,3],
[ 1,-3], [ 1,-1], [ 1,1], [ 1,3],
[ 3,-3], [ 3,-1], [ 3,1], [ 3,3]
])
sess = tf.Session()
with sess.as_default():
sess.run(tf.global_variables_initializer())
np.random.seed(seed=666)
for _ in range(0,100):
# Create array of random labels
lbls = np.random.randint(low=0, high=nrof_classes, size=(batch_size,))
feats = create_features(label_to_center, batch_size, nrof_features, lbls)
center_loss_, centers_ = sess.run([center_loss, centers], feed_dict={features:feats, labels:lbls})
# After a large number of updates the estimated centers should be close to the true ones
np.testing.assert_almost_equal(centers_, label_to_center, decimal=5, err_msg='Incorrect estimated centers')
np.testing.assert_almost_equal(center_loss_, 0.0, decimal=5, err_msg='Incorrect center loss')
示例2: train
# 需要导入模块: import facenet [as 别名]
# 或者: from facenet import center_loss [as 别名]
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, step,
loss, train_op, summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy,
learning_rate, prelogits, prelogits_center_loss, random_rotate, random_crop, random_flip, prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
if lr<=0:
return False
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
control_array = np.ones_like(labels_array) * control_value
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
tensor_list = [loss, train_op, step, reg_losses, prelogits, cross_entropy_mean, learning_rate, prelogits_norm, accuracy, prelogits_center_loss]
if batch_number % 100 == 0:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(tensor_list + [summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step_)
else:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(tensor_list, feed_dict=feed_dict)
duration = time.time() - start_time
stat['loss'][step_-1] = loss_
stat['center_loss'][step_-1] = center_loss_
stat['reg_loss'][step_-1] = np.sum(reg_losses_)
stat['xent_loss'][step_-1] = cross_entropy_mean_
stat['prelogits_norm'][step_-1] = prelogits_norm_
stat['learning_rate'][epoch-1] = lr_
stat['accuracy'][step_-1] = accuracy_
stat['prelogits_hist'][epoch-1,:] += np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, loss_, cross_entropy_mean_, np.sum(reg_losses_), accuracy_, lr_, center_loss_))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, global_step=step_)
return True