本文整理汇总了Python中tensorflow.contrib.layers.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python layers.conv2d方法的具体用法?Python layers.conv2d怎么用?Python layers.conv2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: vgg_arg_scope
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
return arg_sc
示例2: darkconv
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def darkconv(*args, **kwargs):
scope = kwargs.pop('scope', None)
onlyconv = kwargs.pop('onlyconv', False)
with tf.variable_scope(scope):
conv_kwargs = {
'padding': 'SAME',
'activation_fn': None,
'weights_initializer': variance_scaling_initializer(1.53846),
'weights_regularizer': l2(5e-4),
'biases_initializer': None,
'scope': 'conv'}
if onlyconv:
conv_kwargs.pop('biases_initializer')
with arg_scope([conv2d], **conv_kwargs):
x = conv2d(*args, **kwargs)
if onlyconv: return x
x = batch_norm(x, decay=0.99, center=False, scale=True,
epsilon=1e-5, scope='bn')
x = bias_add(x, scope='bias')
x = leaky_relu(x, alpha=0.1, name='lrelu')
return x
示例3: resBlock
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def resBlock(x, num_outputs, kernel_size=4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm,
scope=None):
assert num_outputs % 2 == 0 # num_outputs must be divided by channel_factor(2 here)
with tf.variable_scope(scope, 'resBlock'):
shortcut = x
if stride != 1 or x.get_shape()[3] != num_outputs:
shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride,
activation_fn=None, normalizer_fn=None, scope='shortcut')
x = tcl.conv2d(x, num_outputs / 2, kernel_size=1, stride=1, padding='SAME')
x = tcl.conv2d(x, num_outputs / 2, kernel_size=kernel_size, stride=stride, padding='SAME')
x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)
x += shortcut
x = normalizer_fn(x)
x = activation_fn(x)
return x
示例4: conv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def conv2d(self, *args, **kwargs):
"""Masks num_outputs from the function pointed to by 'conv2d'.
The object's parameterization has precedence over the given NUM_OUTPUTS
argument. The resolution of the op names uses
tf.contrib.framework.get_name_scope() and kwargs['scope'].
Args:
*args: Arguments for the operation.
**kwargs: Key arguments for the operation.
Returns:
The result of the application of the function_dict['conv2d'] to the given
'inputs', '*args' and '**kwargs' while possibly overriding NUM_OUTPUTS
according the parameterization.
Raises:
ValueError: If kwargs does not contain a key named 'scope'.
"""
fn, suffix = self._get_function_and_suffix('conv2d')
return self._mask(fn, suffix, *args, **kwargs)
示例5: testComplexNet
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testComplexNet(self):
parameterization = {'Branch0/Conv_1x1/Conv2D': 13, 'Conv3_1x1/Conv2D': 77}
decorator = ops.ConfigurableOps(parameterization=parameterization)
def conv2d(inputs, num_outputs, kernel_size, scope):
return decorator.conv2d(
inputs, num_outputs=num_outputs, kernel_size=kernel_size, scope=scope)
net = self.inputs
with tf.variable_scope('Branch0'):
branch_0 = conv2d(net, 1, 1, scope='Conv_1x1')
with tf.variable_scope('Branch1'):
branch_1 = conv2d(net, 2, 1, scope='Conv_1x1')
out_2 = conv2d(branch_1, 3, 3, scope='Conv_3x3')
net = conv2d(net, 1, 1, scope='Conv3_1x1')
output = tf.concat([net, branch_0, branch_1, out_2], -1)
expected_output_shape = self.inputs_shape
expected_output_shape[-1] = 95
self.assertEqual(expected_output_shape, output.shape.as_list())
self.assertEqual(2, decorator.constructed_ops['Branch1/Conv_1x1/Conv2D'])
self.assertEqual(13, decorator.constructed_ops['Branch0/Conv_1x1/Conv2D'])
self.assertEqual(77, decorator.constructed_ops['Conv3_1x1/Conv2D'])
self.assertEqual(3, decorator.constructed_ops['Branch1/Conv_3x3/Conv2D'])
示例6: testDifferentParameterization
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testDifferentParameterization(self, parameterization,
expected_first_shape, expected_conv2_shape):
alternate_num_outputs = 7
decorator = ops.ConfigurableOps(parameterization=parameterization)
with arg_scope([layers.conv2d], padding='VALID'):
first_out = decorator.conv2d(
self.inputs,
num_outputs=alternate_num_outputs,
kernel_size=3,
scope='first')
conv2_out = decorator.conv2d(
self.inputs,
num_outputs=alternate_num_outputs,
kernel_size=1,
scope='second')
self.assertAllEqual(expected_first_shape, first_out.shape.as_list())
self.assertAllEqual(expected_conv2_shape, conv2_out.shape.as_list())
示例7: testStrict_PartialParameterizationFails
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testStrict_PartialParameterizationFails(self):
partial_parameterization = {'first/Conv2D': 3}
default_num_outputs = 7
decorator = ops.ConfigurableOps(
parameterization=partial_parameterization, fallback_rule='strict')
decorator.conv2d(
self.inputs,
num_outputs=default_num_outputs,
kernel_size=3,
scope='first')
with self.assertRaisesRegexp(
KeyError, 'op_name \"second/Conv2D\" not found in parameterization'):
decorator.conv2d(
self.inputs,
num_outputs=default_num_outputs,
kernel_size=1,
scope='second')
示例8: testStrict_FullParameterizationPasses
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testStrict_FullParameterizationPasses(self, fallback_rule):
full_parameterization = {'first/Conv2D': 3, 'second/Conv2D': 13}
default_num_outputs = 7
decorator = ops.ConfigurableOps(
parameterization=full_parameterization, fallback_rule=fallback_rule)
first = decorator.conv2d(
self.inputs,
num_outputs=default_num_outputs,
kernel_size=3,
scope='first')
second = decorator.conv2d(
self.inputs,
num_outputs=default_num_outputs,
kernel_size=1,
scope='second')
self.assertAllEqual(3, first.shape.as_list()[3])
self.assertAllEqual(13, second.shape.as_list()[3])
示例9: testGetRegularizerForConcatWithNone
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testGetRegularizerForConcatWithNone(self, test_concat, depth):
image = tf.constant(0.0, shape=[1, 17, 19, 3])
conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
other_input = tf.add(
tf.identity(tf.constant(3.0, shape=[1, 17, 19, depth])), 3.0)
# other_input has None as regularizer.
concat = tf.concat([other_input, conv2], 3)
output = tf.add(concat, concat, name='output_out')
op = concat.op if test_concat else output.op
# Instantiate OpRegularizerManager.
op_handler_dict = self._default_op_handler_dict
op_handler_dict['Conv2D'] = StubConvSourceOpHandler(add_concat_model_stub)
op_reg_manager = orm.OpRegularizerManager([output.op], op_handler_dict)
expected_alive = add_concat_model_stub.expected_alive()
alive = op_reg_manager.get_regularizer(op).alive_vector
self.assertAllEqual([True] * depth, alive[:depth])
self.assertAllEqual(expected_alive['conv2'], alive[depth:])
示例10: testGroupingOps
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testGroupingOps(self, tested_op):
th = 0.5
image = tf.constant(0.5, shape=[1, 17, 19, 3])
conv1 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv1')
conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
res = tested_op(conv1, conv2)
# Instantiate OpRegularizerManager.
op_handler_dict = self._default_op_handler_dict
op_handler_dict['Conv2D'] = RandomConvSourceOpHandler(th)
op_reg_manager = orm.OpRegularizerManager([res.op], op_handler_dict)
alive = op_reg_manager.get_regularizer(res.op).alive_vector
conv1_reg = op_reg_manager.get_regularizer(conv1.op).regularization_vector
conv2_reg = op_reg_manager.get_regularizer(conv2.op).regularization_vector
with self.session():
self.assertAllEqual(alive, np.logical_or(conv1_reg.eval() > th,
conv2_reg.eval() > th))
示例11: testGather
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def testGather(self):
gather_index = [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]
with arg_scope(self._batch_norm_scope()):
inputs = tf.zeros([2, 4, 4, 3])
c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
gather = tf.gather(c1, gather_index, axis=3)
manager = orm.OpRegularizerManager(
[gather.op], self._default_op_handler_dict)
c1_reg = manager.get_regularizer(_get_op('conv1/Conv2D'))
gather_reg = manager.get_regularizer(_get_op('GatherV2'))
# Check regularizer indices.
self.assertAllEqual(list(range(10)), c1_reg.regularization_vector)
# This fails due to gather not being supported. Once gather is supported,
# this test can be enabled to verify that the regularization vector is
# gathered in the same ordering as the tensor.
# self.assertAllEqual(
# gather_index, gather_reg.regularization_vector)
# This test shows that gather is not supported. The regularization vector
# has the same initial ordering after the gather op scrambled the
# channels. Remove this once gather is supported.
self.assertAllEqual(list(range(10)), gather_reg.regularization_vector)
示例12: inception_v2_arg_scope
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:41,代码来源:inception_v2.py
示例13: alexnet_v2_arg_scope
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def alexnet_v2_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
biases_initializer=init_ops.constant_initializer(0.1),
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:11,代码来源:alexnet_v2.py
示例14: feature_extractor
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def feature_extractor(net, output_dim, cfg):
net = net - 0.5
min_feature_map_size = 4
assert output_dim % (
min_feature_map_size**2) == 0, 'output dim=%d' % output_dim
size = int(net.get_shape()[2])
print('Agent CNN:')
channels = cfg.base_channels
print(' ', str(net.get_shape()))
size /= 2
net = ly.conv2d(
net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu)
print(' ', str(net.get_shape()))
while size > min_feature_map_size:
if size == min_feature_map_size * 2:
channels = output_dim / (min_feature_map_size**2)
else:
channels *= 2
assert size % 2 == 0
size /= 2
net = ly.conv2d(
net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu)
print(' ', str(net.get_shape()))
print('before fc: ', net.get_shape()[1])
net = tf.reshape(net, [-1, output_dim])
net = tf.nn.dropout(net, cfg.dropout_keep_prob)
return net
# Output: float \in [0, 1]
示例15: cnn
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d [as 别名]
def cnn(net, is_train, cfg):
net = net - 0.5
channels = cfg.base_channels
size = int(net.get_shape()[2])
print('Critic CNN:')
print(' ', str(net.get_shape()))
size /= 2
net = ly.conv2d(
net,
num_outputs=channels,
kernel_size=4,
stride=2,
activation_fn=lrelu,
normalizer_fn=None)
print(' ', str(net.get_shape()))
while size > 4:
channels *= 2
size /= 2
net = ly.conv2d(
net,
num_outputs=channels,
kernel_size=4,
stride=2,
activation_fn=lrelu,
normalizer_fn=None,
normalizer_params={
'is_training': is_train,
'decay': 0.9,
'updates_collections': None
})
print(' ', str(net.get_shape()))
net = tf.reshape(net, [-1, 4 * 4 * channels])
return net
# Input: float \in [0, 1]