本文整理汇总了Python中tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3_base方法的典型用法代码示例。如果您正苦于以下问题:Python inception_v3.inception_v3_base方法的具体用法?Python inception_v3.inception_v3_base怎么用?Python inception_v3.inception_v3_base使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim.python.slim.nets.inception_v3
的用法示例。
在下文中一共展示了inception_v3.inception_v3_base方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testBuildBaseNetwork
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception_v3.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
示例2: testBuildOnlyUptoFinalEndpoint
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
示例3: testBuildAndCheckAllEndPointsUptoMixed7c
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {
'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
示例4: testModelHasExpectedNumberOfParameters
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v3.inception_v3_arg_scope()):
inception_v3.inception_v3_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
示例5: inception_v3
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def inception_v3(nlabels, images, pkeep, is_training):
batch_norm_params = {
"is_training": is_training,
"trainable": True,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
weight_decay = 0.00004
stddev=0.1
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope("InceptionV3", "InceptionV3", [images]) as scope:
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected],
weights_regularizer=weights_regularizer,
trainable=True):
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(images, scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = tf.nn.dropout(net, pkeep, name='droplast')
net = flatten(net, scope="flatten")
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(net, weights), biases, name=scope.name)
_activation_summary(output)
return output
示例6: inception_v3_test
# 需要导入模块: from tensorflow.contrib.slim.python.slim.nets import inception_v3 [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base [as 别名]
def inception_v3_test(nlabels, images, pkeep, is_training):
batch_norm_params = {
"is_training": is_training,
"trainable": True,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
weight_decay = 0.00004
stddev=0.1
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope("InceptionV3", "InceptionV3", [images]) as scope:
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected],
weights_regularizer=weights_regularizer,
trainable=True):
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(images, scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = tf.nn.dropout(net, pkeep, name='droplast')
net = flatten(net, scope="flatten")
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(net, weights), biases, name=scope.name)
_activation_summary(output)
return output,net