本文整理汇总了Python中tensorflow.contrib.framework.python.ops.arg_scope函数的典型用法代码示例。如果您正苦于以下问题:Python arg_scope函数的具体用法?Python arg_scope怎么用?Python arg_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了arg_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testVariableCollectionsWithArgScopeNested
def testVariableCollectionsWithArgScopeNested(self):
with self.test_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
示例2: testVariableCollectionsWithArgScopeNonNested
def testVariableCollectionsWithArgScopeNonNested(self):
with self.test_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
variables_lib2.variable('c', [])
self.assertListEqual([a], ops.get_collection('A'))
self.assertListEqual([b], ops.get_collection('B'))
示例3: alexnet_v2_arg_scope
def alexnet_v2_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
biases_initializer=init_ops.constant_initializer(0.1),
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
示例4: testReuseArgScope
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
pass
with arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
示例5: testArgScopeObjectCreatedWithinScopeInheritsArgScope
def testArgScopeObjectCreatedWithinScopeInheritsArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
with arg_scope([func1], b=2, d=10):
with arg_scope(get_scope_object()):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1], 'd': 10})
示例6: testClearArgScope
def testClearArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
func1_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as sc1:
self.assertEqual(sc1, func1_scope)
with arg_scope({}) as sc2:
self.assertEqual(sc2, {})
with arg_scope([]) as current_arg_scope:
self.assertEqual(current_arg_scope, func1_scope)
示例7: testNestedArgScope
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
示例8: testCurrentArgScopeNested
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]):
with arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
示例9: testNestedArgScopeObjectCreatedOutsideScopeOverridesArgScope
def testNestedArgScopeObjectCreatedOutsideScopeOverridesArgScope(self):
def get_scope_object():
with arg_scope([func1], a=1, b=None, c=[1]) as sc:
return sc
scope_object = get_scope_object()
with arg_scope([func1], b=2, d=10):
with arg_scope(scope_object):
args, kwargs = func1(0)
self.assertTupleEqual(args, (0,))
self.assertDictEqual(kwargs, {'a': 1, 'b': None, 'c': [1]})
示例10: resnet_arg_scope
def resnet_arg_scope(is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'is_training': is_training,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
示例11: testPartiallySharedArgScope
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with arg_scope([func1, func2], a=1, b=None):
with arg_scope([func1], c=[1]):
with arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
示例12: inception_v3_arg_scope
def inception_v3_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars',
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
updates_collections=ops.GraphKeys.UPDATE_OPS,
use_fused_batchnorm=True):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
batch_norm_decay: Decay for batch norm moving average
batch_norm_epsilon: Small float added to variance to avoid division by zero
updates_collections: Collections for the update ops of the layer
use_fused_batchnorm: Enable fused batchnorm.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': updates_collections,
# Use fused batch norm if possible.
'fused': use_fused_batchnorm,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
示例13: inception_v1_arg_scope
def inception_v1_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn't use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
if use_batch_norm:
normalizer_fn = layers_lib.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
示例14: testEndPointsV2
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [
resnet_v2.resnet_v2_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v2.resnet_v2_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3'
]
self.assertItemsEqual(expected, end_points)
示例15: grad_fn
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
# Recompute outputs
with framework_ops.control_dependencies(output_grads):
if use_data_dep_:
inputs = _force_data_dependency(output_grads, inputs)
with contrib_framework_ops.arg_scope(cached_arg_scope[0]):
with variable_scope.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not (isinstance(outputs, list) or isinstance(outputs, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = gradients_impl.gradients(outputs, inputs + variables, output_grads)
if tupleize_grads:
if use_data_dep_:
grads = _tuple_with_data_dep(grads)
else:
grads = control_flow_ops.tuple(grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
return grad_inputs, grad_vars