当前位置: 首页>>代码示例>>Python>>正文


Python layers.max_pool2d方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.layers.max_pool2d方法的典型用法代码示例。如果您正苦于以下问题:Python layers.max_pool2d方法的具体用法?Python layers.max_pool2d怎么用?Python layers.max_pool2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers.python.layers.layers的用法示例。


在下文中一共展示了layers.max_pool2d方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: subsample

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.

  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.

  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:resnet_utils.py

示例2: subsample

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def subsample(inputs, factor, scope=None):
    """Subsamples the input along the spatial dimensions.

    Args:
      inputs: A `Tensor` of size [batch, height_in, width_in, channels].
      factor: The subsampling factor.
      scope: Optional variable_scope.

    Returns:
      output: A `Tensor` of size [batch, height_out, width_out, channels] with the
        input, either intact (if factor == 1) or subsampled (if factor > 1).
    """
    if factor == 1:
        return inputs
    else:
        return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 
开发者ID:POSTECH-IMLAB,项目名称:LaneSegmentationNetwork,代码行数:18,代码来源:resnet_utils.py

示例3: alexnet_v2_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def alexnet_v2_arg_scope(weight_decay=0.0005):
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      biases_initializer=init_ops.constant_initializer(0.1),
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope([layers.conv2d], padding='SAME'):
      with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:11,代码来源:alexnet_v2.py

示例4: subsample

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.
  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.
  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 
开发者ID:vicwer,项目名称:sense_classification,代码行数:16,代码来源:resnet_utils.py

示例5: dense_block

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def dense_block(inputs, depth, depth_bottleneck, stride, name, rate=1):
    depth_in = inputs.get_shape()[3]
    if depth == depth_in:
        if stride == 1:
            shortcut = inputs
        else:
            shortcut = layers.max_pool2d(inputs, [1, 1], stride=factor, scope=name+'_shortcut')
    else:
        shortcut = layers.conv2d(
            inputs,
            depth, [1, 1],
            stride=stride,
            activation_fn=None,
            scope=name+'_shortcut')
    if PRINT_LAYER_LOG:
        print(name+'_shortcut', shortcut.get_shape())

    residual = layers.conv2d(
        inputs, depth_bottleneck, [1, 1], stride=1, scope=name+'_conv1')
    if PRINT_LAYER_LOG:
        print(name+'_conv1', residual.get_shape())
    residual = resnet_utils.conv2d_same(
        residual, depth_bottleneck, 3, stride, rate=rate, scope=name+'_conv2')
    if PRINT_LAYER_LOG:
        print(name+'_conv2', residual.get_shape())
    residual = layers.conv2d(
        residual, depth, [1, 1], stride=1, activation_fn=None, scope=name+'_conv3')
    if PRINT_LAYER_LOG:
        print(name+'_conv3', residual.get_shape())
    output = nn_ops.relu(shortcut + residual)
    return output 
开发者ID:vicwer,项目名称:sense_classification,代码行数:33,代码来源:network.py

示例6: maxpool2x2

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def maxpool2x2(input, name):
    output = slim.max_pool2d(input, kernel_size=[2, 2], stride=2, scope=name)
    if PRINT_LAYER_LOG:
        print(name, output.get_shape())
    return output 
开发者ID:vicwer,项目名称:sense_classification,代码行数:7,代码来源:network.py

示例7: resnet_v1_backbone

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def resnet_v1_backbone(inputs,
              blocks,
              is_training=True,
              output_stride=None,
              include_root_block=True,
              reuse=None,
              scope=None):
  with variable_scope.variable_scope(
      scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    with arg_scope(
        [layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
        outputs_collections=end_points_collection):
      with arg_scope([layers.batch_norm], is_training=is_training):
        net = inputs
        if include_root_block:
          if output_stride is not None:
            if output_stride % 4 != 0:
              raise ValueError('The output_stride needs to be a multiple of 4.')
            output_stride /= 4
          net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
          net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope='pool1')
        net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
        # Convert end_points_collection into a dictionary of end_points.
        end_points = utils.convert_collection_to_dict(end_points_collection)

        return net, end_points 
开发者ID:HiKapok,项目名称:X-Detector,代码行数:29,代码来源:slim_resnet_utils.py

示例8: vgg_a

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:62,代码来源:vgg16.py

示例9: vgg_19

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def vgg_19(inputs,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_19'):
  """Oxford Net VGG 19-Layers version E Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:
    end_points_collection = sc.name + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 4, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:62,代码来源:vgg16.py

示例10: resnet_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    is_training: Whether or not we are training the parameters in the batch
      normalization layers of the model. (deprecated)
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'is_training': is_training,
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
  }

  with arg_scope(
      [layers_lib.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm):
    with arg_scope([layers.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
      with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:51,代码来源:resnet_utils.py

示例11: vgg_16

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def vgg_16(inputs,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16'):
  """Oxford Net VGG 16-Layers version D Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:62,代码来源:vgg.py

示例12: resnet_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    is_training: Whether or not we are training the parameters in the batch
      normalization layers of the model.
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'is_training': is_training,
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
  }

  with arg_scope(
      [layers_lib.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
      with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:52,代码来源:resnet_utils.py

示例13: truncated_vgg_16

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def truncated_vgg_16(inputs, is_training=True, scope='vgg_16'):
    """Oxford Net VGG 16-Layers version D Example.

    For use in SSD object detection network, which has this particular
    truncated version of VGG16 detailed in its paper.

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      scope: Optional scope for the variables.

    Returns:
      the last op containing the conv5 tensor and end_points dict.
    """
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
            outputs_collections=end_points_collection
        ):
            net = layers_lib.repeat(
                inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = layers_lib.repeat(
                net, 2, layers.conv2d, 128, [3, 3], scope='conv2'
            )
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = layers_lib.repeat(
                net, 3, layers.conv2d, 256, [3, 3], scope='conv3'
            )
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
            net = layers_lib.repeat(
                net, 3, layers.conv2d, 512, [3, 3], scope='conv4'
            )
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
            net = layers_lib.repeat(
                net, 3, layers.conv2d, 512, [3, 3], scope='conv5'
            )
            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(
                end_points_collection
            )
            return net, end_points 
开发者ID:Sargunan,项目名称:Table-Detection-using-Deep-learning,代码行数:45,代码来源:truncated_vgg.py

示例14: inference

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import max_pool2d [as 别名]
def inference(self, mode, inputs, scope='SenseCls'):
        is_training = mode
        with slim.arg_scope(network_arg_scope(is_training=is_training)):
            with tf.variable_scope(scope, reuse=False):
                conv0 = slim.conv2d(inputs,
                                    num_outputs=64,
                                    kernel_size=[7,7],
                                    stride=2,
                                    scope='conv0')
                if PRINT_LAYER_LOG:
                    print(conv0.name, conv0.get_shape())
                pool0 = slim.max_pool2d(conv0, kernel_size=[3, 3], stride=2, scope='pool0')
                if PRINT_LAYER_LOG:
                    print('pool0', pool0.get_shape())

                block0_0 = block(pool0, 64, 1, 'block0_0')
                block0_1 = block(block0_0, 64, 1, 'block0_1')
                block0_2 = block(block0_1, 64, 1, 'block0_2')

                block1_0 = block(block0_2, 128, 2, 'block1_0')
                block1_1 = block(block1_0, 128, 1, 'block1_1')
                block1_2 = block(block1_1, 128, 1, 'block1_2')
                block1_3 = block(block1_2, 128, 1, 'block1_3')

                block2_0 = block(block1_3, 256, 2, 'block2_0')
                block2_1 = block(block2_0, 256, 1, 'block2_1')
                block2_2 = block(block2_1, 256, 1, 'block2_2')
                block2_3 = block(block2_2, 256, 1, 'block2_3')
                block2_4 = block(block2_3, 256, 1, 'block2_4')
                block2_5 = block(block2_4, 256, 1, 'block2_5')

                block3_0 = block(block2_5, 512, 2, 'block3_0')
                block3_1 = block(block3_0, 512, 1, 'block3_1')
                block3_2 = block(block3_1, 512, 1, 'block3_2')

                net = tf.reduce_mean(block3_2, [1, 2], keepdims=True, name='global_pool_v4')
                if PRINT_LAYER_LOG:
                    print('avg_pool', net.get_shape())
                net = slim.flatten(net, scope='PreLogitsFlatten')
                net = slim.dropout(net, 0.8, is_training=is_training, scope='dropout')
                logits = fully_connected(net, cfg.classes, name='fc')
                if PRINT_LAYER_LOG:
                    print('logits', logits.get_shape())
                if is_training:
                    l2_loss = tf.add_n(tf.losses.get_regularization_losses())
                    return logits, l2_loss
                else:
                    return logits 
开发者ID:vicwer,项目名称:sense_classification,代码行数:50,代码来源:network.py


注:本文中的tensorflow.contrib.layers.python.layers.layers.max_pool2d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。