本文整理汇总了Python中nets.mobilenet.mobilenet.depth_multiplier方法的典型用法代码示例。如果您正苦于以下问题:Python mobilenet.depth_multiplier方法的具体用法?Python mobilenet.depth_multiplier怎么用?Python mobilenet.depth_multiplier使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nets.mobilenet.mobilenet
的用法示例。
在下文中一共展示了mobilenet.depth_multiplier方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testMultiplier
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def testMultiplier(self):
op = mobilenet.op
new_def = copy.deepcopy(mobilenet_v2.V2_DEF)
def inverse_multiplier(output_params, multiplier):
output_params['num_outputs'] = int(
output_params['num_outputs'] / multiplier)
new_def['spec'][0] = op(
slim.conv2d,
kernel_size=(3, 3),
multiplier_func=inverse_multiplier,
num_outputs=16)
_ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=new_def,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
# Expect first layer to be 160 (16 / 0.1), and other layers
# their max(original size * 0.1, 8)
self.assertEqual([160, 8, 48, 8, 48], s[:5])
示例2: mobilenet_base
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
示例3: testDivisibleByWithArgScope
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
示例4: testFineGrained
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
示例5: testMobilenetBase
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
示例6: __init__
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False):
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
"""
super(SSDMobileNetV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights,
use_explicit_padding, use_depthwise)
开发者ID:cagbal,项目名称:ros_people_object_detection_tensorflow,代码行数:37,代码来源:ssd_mobilenet_v2_feature_extractor.py
示例7: mobilenet_base
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
示例8: __init__
# 需要导入模块: from nets.mobilenet import mobilenet [as 别名]
# 或者: from nets.mobilenet.mobilenet import depth_multiplier [as 别名]
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
开发者ID:ambakick,项目名称:Person-Detection-and-Tracking,代码行数:38,代码来源:ssd_mobilenet_v2_feature_extractor.py