本文整理汇总了Python中nets.inception.inception_v4_arg_scope方法的典型用法代码示例。如果您正苦于以下问题:Python inception.inception_v4_arg_scope方法的具体用法?Python inception.inception_v4_arg_scope怎么用?Python inception.inception_v4_arg_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nets.inception
的用法示例。
在下文中一共展示了inception.inception_v4_arg_scope方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testNoBatchNormScaleByDefault
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
示例2: testBatchNormScale
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with tf.contrib.slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
示例3: use_inceptionv4
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def use_inceptionv4(self):
image_size = inception.inception_v4.default_image_size
img_path = "../../data/misec_images/EnglishCockerSpaniel_simon.jpg"
checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"
with tf.Graph().as_default():
image_string = tf.read_file(img_path)
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v4_arg_scope()):
logits, _ = inception.inception_v4(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_path,
slim.get_model_variables('InceptionV4'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
self.disp_names(sorted_inds,probabilities)
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.title(img_path)
plt.show()
return
示例4: fine_tune_inception
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def fine_tune_inception(self):
train_dir = '/tmp/inception_finetuned/'
image_size = inception.inception_v4.default_image_size
checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"
flowers_data_dir = "../../data/flower"
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = self.load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v4_arg_scope()):
logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
total_loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels)
# total_loss = slim.losses.get_total_loss(add_regularization_losses=False)
# total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total_Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
number_of_steps = math.ceil(dataset.num_samples/32) * 1
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
init_fn=self.get_init_fn(checkpoint_path),
number_of_steps=number_of_steps)
print('Finished training. Last batch loss %f' % final_loss)
return
示例5: testNoBatchNormScaleByDefault
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
示例6: testBatchNormScale
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
示例7: use_fined_model
# 需要导入模块: from nets import inception [as 别名]
# 或者: from nets.inception import inception_v4_arg_scope [as 别名]
def use_fined_model(self):
image_size = inception.inception_v4.default_image_size
batch_size = 3
flowers_data_dir = "../../data/flower"
train_dir = '/tmp/inception_finetuned/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v4_arg_scope()):
logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
checkpoint_path = tf.train.latest_checkpoint(train_dir)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_path,
slim.get_variables_to_restore())
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
sess.run(tf.initialize_local_variables())
init_fn(sess)
np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
for i in range(batch_size):
image = np_images_raw[i, :, :, :]
true_label = np_labels[i]
predicted_label = np.argmax(np_probabilities[i, :])
predicted_name = dataset.labels_to_names[predicted_label]
true_name = dataset.labels_to_names[true_label]
plt.figure()
plt.imshow(image.astype(np.uint8))
plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
plt.axis('off')
plt.show()
return