本文整理汇总了Python中tensorflow.contrib.layers.max_pool2d方法的典型用法代码示例。如果您正苦于以下问题:Python layers.max_pool2d方法的具体用法?Python layers.max_pool2d怎么用?Python layers.max_pool2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.max_pool2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _block_b_reduce
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_b_reduce(net, endpoints, scope='BlockReduceB'):
# 17 x 17 -> 8 x 8 reduce
with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
with tf.variable_scope(scope):
with tf.variable_scope('Br1_Pool'):
br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
with tf.variable_scope('Br2_3x3'):
br2 = layers.conv2d(net, 192, [1, 1], padding='SAME', scope='Conv1_1x1')
br2 = layers.conv2d(br2, 192, [3, 3], stride=2, scope='Conv2_3x3/2')
with tf.variable_scope('Br3_7x7x3'):
br3 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
br3 = layers.conv2d(br3, 256, [1, 7], padding='SAME', scope='Conv2_1x7')
br3 = layers.conv2d(br3, 320, [7, 1], padding='SAME', scope='Conv3_7x1')
br3 = layers.conv2d(br3, 320, [3, 3], stride=2, scope='Conv4_3x3/2')
net = tf.concat(3, [br1, br2, br3], name='Concat1')
endpoints[scope] = net
print('%s output shape: %s' % (scope, net.get_shape()))
return net
示例2: _extract_features
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _extract_features(self, preprocessed_inputs):
"""Extract features
Args:
preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
Return:
feature_maps: a list of extracted feature maps
"""
with arg_scope([conv2d], kernel_size=3, activation_fn=tf.nn.relu), \
arg_scope([max_pool2d], kernel_size=2, stride=2):
conv1 = conv2d(preprocessed_inputs, 32, scope='conv1') # 64
pool1 = max_pool2d(conv1, scope='pool1')
conv2 = conv2d(pool1, 64, scope='conv2') # 32
pool2 = max_pool2d(conv2, scope='pool2')
conv3 = conv2d(pool2, 128, scope='conv3') # 16
pool3 = max_pool2d(conv3, scope='pool3')
conv4 = conv2d(pool3, 256, scope='conv4') # 8
pool4 = max_pool2d(conv4, scope='pool4')
conv5 = conv2d(pool4, 256, scope='conv5') # 4
pool5 = max_pool2d(conv5, scope='pool5')
conv6 = conv2d(pool5, 256, scope='conv6') # 2
feature_maps_dict = {
'conv1': conv1, 'conv2': conv2, 'conv3': conv3,
'conv4': conv4, 'conv5': conv5, 'conv6': conv6 }
return feature_maps_dict
示例3: _extract_features
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _extract_features(self, preprocessed_inputs):
"""Extract features
Args:
preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
Return:
feature_maps: a list of extracted feature maps
"""
with arg_scope([conv2d], kernel_size=3, padding='SAME', stride=1), \
arg_scope([max_pool2d], stride=2):
conv1 = conv2d(preprocessed_inputs, 64, scope='conv1')
pool1 = max_pool2d(conv1, 2, scope='pool1')
conv2 = conv2d(pool1, 128, scope='conv2')
pool2 = max_pool2d(conv2, 2, scope='pool2')
conv3 = conv2d(pool2, 256, scope='conv3')
conv4 = conv2d(conv3, 256, scope='conv4')
pool4 = max_pool2d(conv4, 2, stride=[2, 1], scope='pool4')
conv5 = conv2d(pool4, 512, scope='conv5')
conv6 = conv2d(conv5, 512, scope='conv6')
pool6 = max_pool2d(conv6, 2, stride=[2, 1], scope='pool6')
conv7 = conv2d(pool6, 512, kernel_size=[2, 1], padding='VALID', scope='conv7')
feature_maps_dict = {
'conv1': conv1, 'conv2': conv2, 'conv3': conv3, 'conv4': conv4,
'conv5': conv5, 'conv6': conv6, 'conv7': conv7}
return feature_maps_dict
示例4: _squeezenet
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _squeezenet(images, num_classes=1000, data_format='NCHW'):
net = conv2d(images, 96, [2, 2], stride=2, scope='conv1')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
net = fire_module(net, 16, 64, scope='fire2', data_format=data_format)
net = fire_module(net, 16, 64, scope='fire3', data_format=data_format)
net = fire_module(net, 32, 128, scope='fire4', data_format=data_format)
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool4')
net = fire_module(net, 32, 128, scope='fire5', data_format=data_format)
net = fire_module(net, 48, 192, scope='fire6', data_format=data_format)
net = fire_module(net, 48, 192, scope='fire7', data_format=data_format)
net = fire_module(net, 64, 256, scope='fire8', data_format=data_format)
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
net = fire_module(net, 64, 256, scope='fire9', data_format=data_format)
net = conv2d(net, num_classes, [1, 1], stride=1, scope='conv10')
net = avg_pool2d(net, [13, 13], stride=1, scope='avgpool10')
squeeze_axes = [2, 3] if data_format == 'NCHW' else [1, 2]
logits = tf.squeeze(net, squeeze_axes, name='logits')
return logits
示例5: _squeezenet
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _squeezenet(images, num_classes=1000):
net = conv2d(images, 96, [7, 7], stride=2, scope='conv1')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
net = fire_module(net, 16, 64, scope='fire2')
net = fire_module(net, 16, 64, scope='fire3')
net = fire_module(net, 32, 128, scope='fire4')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool4')
net = fire_module(net, 32, 128, scope='fire5')
net = fire_module(net, 48, 192, scope='fire6')
net = fire_module(net, 48, 192, scope='fire7')
net = fire_module(net, 64, 256, scope='fire8')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
net = fire_module(net, 64, 256, scope='fire9')
net = conv2d(net, num_classes, [1, 1], stride=1, scope='conv10')
net = avg_pool2d(net, [13, 13], stride=1, scope='avgpool10')
logits = tf.squeeze(net, [2], name='logits')
return logits
示例6: subsample
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
示例7: max_pool2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def max_pool2d(self, *args, **kwargs):
return self._pass_through_mask(
self._function_dict['max_pool2d'], *args, **kwargs)
示例8: testCascadedGrouping
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def testCascadedGrouping(self):
inputs = tf.zeros([6, 8, 8, 10], name='prev')
with arg_scope(
[layers.conv2d, layers.max_pool2d],
kernel_size=1,
stride=1,
padding='SAME'):
net = layers.conv2d(inputs, 17, scope='conv/input')
first = layers.conv2d(net, num_outputs=17, scope='conv/first')
add_0 = tf.add(first, net, 'Add/first') # So conv/first must be 17.
second = layers.conv2d(add_0, num_outputs=17, scope='conv/second')
out = tf.add(net, second, 'Add/second') # So conv/second must be 17.
# Instantiate OpRegularizerManager.
op_handler_dict = self._default_op_handler_dict
op_handler_dict['Conv2D'] = IndexConvSourceOpHandler()
op_reg_manager = orm.OpRegularizerManager([out.op], op_handler_dict)
grouped_names = [
[op_slice.op.name for op_slice in group.op_slices]
for group in op_reg_manager._op_group_dict.values()]
expected = set([
'conv/second/Conv2D', 'Add/second', 'conv/first/Conv2D',
'conv/input/Conv2D', 'Add/first'
])
groups = []
for group in grouped_names:
filtered = []
for op_name in group:
if '/Conv2D' in op_name or 'Add/' in op_name:
filtered.append(op_name)
if filtered:
groups.append(set(filtered))
if DEBUG_PRINTS:
print('Group Found = ', filtered)
self.assertIn(expected, groups)
示例9: _block_a
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_a(net, endpoints, d=64, scope='BlockA'):
with tf.variable_scope(scope):
net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
return net
示例10: _block_b
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_b(net, endpoints, d=256, scope='BlockB'):
with tf.variable_scope(scope):
net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3')
net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
return net
示例11: _block_c
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_c(net, endpoints, d=256, scope='BlockC'):
with tf.variable_scope(scope):
net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3')
net = endpoints[scope+'/Conv4'] = layers.conv2d(net, d, [3, 3], scope='Conv4_3x3')
net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
return net
示例12: _build_vgg16
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _build_vgg16(
inputs,
num_classes=1000,
dropout_keep_prob=0.5,
is_training=True,
scope=''):
"""Blah"""
endpoints = {}
with tf.name_scope(scope, 'vgg16', [inputs]):
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
with arg_scope(
[layers.conv2d, layers.max_pool2d],
stride=1,
padding='SAME'):
net = _block_a(inputs, endpoints, d=64, scope='Scale1')
net = _block_a(net, endpoints, d=128, scope='Scale2')
net = _block_b(net, endpoints, d=256, scope='Scale3')
net = _block_b(net, endpoints, d=512, scope='Scale4')
net = _block_b(net, endpoints, d=512, scope='Scale5')
logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)
endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, endpoints
示例13: _build_vgg19
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _build_vgg19(
inputs,
num_classes=1000,
dropout_keep_prob=0.5,
is_training=True,
scope=''):
"""Blah"""
endpoints = {}
with tf.name_scope(scope, 'vgg19', [inputs]):
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
with arg_scope(
[layers.conv2d, layers.max_pool2d],
stride=1,
padding='SAME'):
net = _block_a(inputs, endpoints, d=64, scope='Scale1')
net = _block_a(net, endpoints, d=128, scope='Scale2')
net = _block_c(net, endpoints, d=256, scope='Scale3')
net = _block_c(net, endpoints, d=512, scope='Scale4')
net = _block_c(net, endpoints, d=512, scope='Scale5')
logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)
endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, endpoints
示例14: _block_a_reduce
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_a_reduce(net, endpoints, k=192, l=224, m=256, n=384, scope='BlockReduceA'):
# 35 x 35 -> 17 x 17 reduce
# inception-v4: k=192, l=224, m=256, n=384
# inception-resnet-v1: k=192, l=192, m=256, n=384
# inception-resnet-v2: k=256, l=256, m=384, n=384
# default padding = VALID
# default stride = 1
with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
with tf.variable_scope(scope):
with tf.variable_scope('Br1_Pool'):
br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
# 17 x 17 x input
with tf.variable_scope('Br2_3x3'):
br2 = layers.conv2d(net, n, [3, 3], stride=2, scope='Conv1_3x3/2')
# 17 x 17 x n
with tf.variable_scope('Br3_3x3Dbl'):
br3 = layers.conv2d(net, k, [1, 1], padding='SAME', scope='Conv1_1x1')
br3 = layers.conv2d(br3, l, [3, 3], padding='SAME', scope='Conv2_3x3')
br3 = layers.conv2d(br3, m, [3, 3], stride=2, scope='Conv3_3x3/2')
# 17 x 17 x m
net = tf.concat(3, [br1, br2, br3], name='Concat1')
# 17 x 17 x input + n + m
# 1024 for v4 (384 + 384 + 256)
# 896 for res-v1 (256 + 384 +256)
# 1152 for res-v2 (384 + 384 + 384)
endpoints[scope] = net
print('%s output shape: %s' % (scope, net.get_shape()))
return net
示例15: _block_stem_res
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import max_pool2d [as 别名]
def _block_stem_res(net, endpoints, scope='Stem'):
# Simpler _stem for inception-resnet-v1 network
# NOTE observe endpoints of first 3 layers
# default padding = VALID
# default stride = 1
with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
with tf.variable_scope(scope):
# 299 x 299 x 3
net = layers.conv2d(net, 32, [3, 3], stride=2, scope='Conv1_3x3/2')
endpoints[scope + '/Conv1'] = net
# 149 x 149 x 32
net = layers.conv2d(net, 32, [3, 3], scope='Conv2_3x3')
endpoints[scope + '/Conv2'] = net
# 147 x 147 x 32
net = layers.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv3_3x3')
endpoints[scope + '/Conv3'] = net
# 147 x 147 x 64
net = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
# 73 x 73 x 64
net = layers.conv2d(net, 80, [1, 1], padding='SAME', scope='Conv4_1x1')
# 73 x 73 x 80
net = layers.conv2d(net, 192, [3, 3], scope='Conv5_3x3')
# 71 x 71 x 192
net = layers.conv2d(net, 256, [3, 3], stride=2, scope='Conv6_3x3/2')
# 35 x 35 x 256
endpoints[scope] = net
print('%s output shape: %s' % (scope, net.get_shape()))
return net