本文整理汇总了Python中nets.mobilenet.mobilenet.mobilenet方法的典型用法代码示例。如果您正苦于以下问题:Python mobilenet.mobilenet方法的具体用法?Python mobilenet.mobilenet怎么用?Python mobilenet.mobilenet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nets.mobilenet.mobilenet
的用法示例。
在下文中一共展示了mobilenet.mobilenet方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: training_scope
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
示例2: training_scope
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
示例3: wrapped_partial
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
示例4: mobilenet_base
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
示例5: testCreation
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
示例6: testCreationNoClasses
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
示例7: testWithSplits
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
示例8: testWithOutputStride8
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
示例9: testDivisibleBy
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
示例10: testDivisibleByWithArgScope
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
示例11: testFineGrained
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
示例12: testWithOutputStride16
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
示例13: testWithOutputStride8AndExplicitPadding
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
示例14: testWithOutputStride16AndExplicitPadding
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
示例15: testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import mobilenet [as 别名]
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])