當前位置: 首頁>>代碼示例>>Python>>正文


Python ops.pad_to_multiple方法代碼示例

本文整理匯總了Python中object_detection.utils.ops.pad_to_multiple方法的典型用法代碼示例。如果您正苦於以下問題:Python ops.pad_to_multiple方法的具體用法?Python ops.pad_to_multiple怎麽用?Python ops.pad_to_multiple使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在object_detection.utils.ops的用法示例。


在下文中一共展示了ops.pad_to_multiple方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _extract_features

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def _extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    image_features = self.mobilenet_v2(
        ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))

    feature_maps = self.feature_map_generator({
        'layer_15/expansion_output': image_features[0],
        'layer_19': image_features[1]})

    return feature_maps.values() 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:24,代碼來源:ssd_mobilenet_v2_keras_feature_extractor.py

示例2: test_zero_padding

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def test_zero_padding(self):
    tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
    padded_tensor = ops.pad_to_multiple(tensor, 1)
    with self.test_session() as sess:
      padded_tensor_out = sess.run(padded_tensor)
    self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:8,代碼來源:ops_test.py

示例3: test_no_padding

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def test_no_padding(self):
    tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
    padded_tensor = ops.pad_to_multiple(tensor, 2)
    with self.test_session() as sess:
      padded_tensor_out = sess.run(padded_tensor)
    self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:8,代碼來源:ops_test.py

示例4: test_padding

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def test_padding(self):
    tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
    padded_tensor = ops.pad_to_multiple(tensor, 4)
    with self.test_session() as sess:
      padded_tensor_out = sess.run(padded_tensor)
    self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:8,代碼來源:ops_test.py

示例5: test_non_square_padding

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def test_non_square_padding(self):
    tensor = tf.constant([[[[0.], [0.]]]])
    padded_tensor = ops.pad_to_multiple(tensor, 2)
    with self.test_session() as sess:
      padded_tensor_out = sess.run(padded_tensor)
    self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:8,代碼來源:ops_test.py

示例6: extract_features

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=None, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              use_explicit_padding=self._use_explicit_padding,
              scope=scope)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
            base_feature_map_depth=0,
            num_layers=6,
            image_features={
                'image_features': image_features['Conv2d_11_pointwise']
            })
    return feature_maps.values() 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:39,代碼來源:ssd_mobilenet_v1_ppn_feature_extractor.py

示例7: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams_fn,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False,
               override_base_feature_extractor_hyperparams=False):
    """Resnet50 v1 Feature Extractor for SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to.
      conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
        and separable_conv2d ops in the layers that are added on top of the
        base feature extractor.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Whether to use explicit padding when extracting
        features. Default is False.
      use_depthwise: Whether to use depthwise convolutions. Default is False.
      override_base_feature_extractor_hyperparams: Whether to override
        hyperparameters of the base feature extractor with the one from
        `conv_hyperparams_fn`.
    """
    super(SSDResnet50V1PpnFeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50',
        reuse_weights, use_explicit_padding, use_depthwise,
        override_base_feature_extractor_hyperparams=(
            override_base_feature_extractor_hyperparams)) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:37,代碼來源:ssd_resnet_v1_ppn_feature_extractor.py

示例8: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams_fn,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False,
               override_base_feature_extractor_hyperparams=False):
    """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to. For EmbeddedSSD it must be set to 1.
      conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
        and separable_conv2d ops in the layers that are added on top of the
        base feature extractor.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Whether to use explicit padding when extracting
        features. Default is False.
      use_depthwise: Whether to use depthwise convolutions. Default is False.
      override_base_feature_extractor_hyperparams: Whether to override
        hyperparameters of the base feature extractor with the one from
        `conv_hyperparams_fn`.

    Raises:
      ValueError: upon invalid `pad_to_multiple` values.
    """
    if pad_to_multiple != 1:
      raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
                       'of 1.')

    super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
        override_base_feature_extractor_hyperparams) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:42,代碼來源:embedded_ssd_mobilenet_v1_feature_extractor.py

示例9: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams_fn,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False,
               override_base_feature_extractor_hyperparams=False):
    """MobileNetV1 Feature Extractor for SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to.
      conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
        and separable_conv2d ops in the layers that are added on top of the
        base feature extractor.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
        inputs so that the output dimensions are the same as if 'SAME' padding
        were used.
      use_depthwise: Whether to use depthwise convolutions. Default is False.
      override_base_feature_extractor_hyperparams: Whether to override
        hyperparameters of the base feature extractor with the one from
        `conv_hyperparams_fn`.
    """
    super(SSDMobileNetV1FeatureExtractor, self).__init__(
        is_training=is_training,
        depth_multiplier=depth_multiplier,
        min_depth=min_depth,
        pad_to_multiple=pad_to_multiple,
        conv_hyperparams_fn=conv_hyperparams_fn,
        reuse_weights=reuse_weights,
        use_explicit_padding=use_explicit_padding,
        use_depthwise=use_depthwise,
        override_base_feature_extractor_hyperparams=
        override_base_feature_extractor_hyperparams) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:43,代碼來源:ssd_mobilenet_v1_feature_extractor.py

示例10: extract_features

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    feature_map_layout = {
        'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''],
        'layer_depth': [-1, -1, 512, 256, 256, 128],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with slim.arg_scope(self._conv_hyperparams_fn()):
      with tf.variable_scope('InceptionV2',
                             reuse=self._reuse_weights) as scope:
        _, image_features = inception_v2.inception_v2_base(
            ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
            final_endpoint='Mixed_5c',
            min_depth=self._min_depth,
            depth_multiplier=self._depth_multiplier,
            scope=scope)
        feature_maps = feature_map_generators.multi_resolution_feature_maps(
            feature_map_layout=feature_map_layout,
            depth_multiplier=self._depth_multiplier,
            min_depth=self._min_depth,
            insert_1x1_conv=True,
            image_features=image_features)

    return feature_maps.values() 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:40,代碼來源:ssd_inception_v2_feature_extractor.py

示例11: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams,
               batch_norm_trainable=True,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False):
    """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to. For EmbeddedSSD it must be set to 1.
      conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
      batch_norm_trainable:  Whether to update batch norm parameters during
        training or not. When training with a small batch size
        (e.g. 1), it is desirable to disable batch norm update and use
        pretrained batch norm params.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Whether to use explicit padding when extracting
        features. Default is False.
      use_depthwise: Whether to use depthwise convolutions. Default is False.

    Raises:
      ValueError: upon invalid `pad_to_multiple` values.
    """
    if pad_to_multiple != 1:
      raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
                       'of 1.')

    super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams, batch_norm_trainable, reuse_weights,
        use_explicit_padding, use_depthwise) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:41,代碼來源:embedded_ssd_mobilenet_v1_feature_extractor.py

示例12: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams,
               batch_norm_trainable=True,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False):
    """MobileNetV1 Feature Extractor for SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to.
      conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
      batch_norm_trainable: Whether to update batch norm parameters during
        training or not. When training with a small batch size
        (e.g. 1), it is desirable to disable batch norm update and use
        pretrained batch norm params.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
        inputs so that the output dimensions are the same as if 'SAME' padding
        were used.
      use_depthwise: Whether to use depthwise convolutions. Default is False.
    """
    super(SSDMobileNetV1FeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams, batch_norm_trainable, reuse_weights,
        use_explicit_padding, use_depthwise) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:35,代碼來源:ssd_mobilenet_v1_feature_extractor.py

示例13: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams,
               batch_norm_trainable=True,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False):
    """Resnet101 v1 FPN Feature Extractor for SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to.
      conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
      batch_norm_trainable: Whether to update batch norm parameters during
        training or not. When training with a small batch size
        (e.g. 1), it is desirable to disable batch norm update and use
        pretrained batch norm params.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Whether to use explicit padding when extracting
        features. Default is False. UNUSED currently.
      use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
    """
    super(SSDResnet101V1FpnFeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams, resnet_v1.resnet_v1_101, 'resnet_v1_101', 'fpn',
        batch_norm_trainable, reuse_weights, use_explicit_padding) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:34,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py

示例14: __init__

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def __init__(self,
               is_training,
               depth_multiplier,
               min_depth,
               pad_to_multiple,
               conv_hyperparams,
               batch_norm_trainable=True,
               reuse_weights=None,
               use_explicit_padding=False,
               use_depthwise=False):
    """InceptionV2 Feature Extractor for SSD Models.

    Args:
      is_training: whether the network is in training mode.
      depth_multiplier: float depth multiplier for feature extractor.
      min_depth: minimum feature extractor depth.
      pad_to_multiple: the nearest multiple to zero pad the input height and
        width dimensions to.
      conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
      batch_norm_trainable: Whether to update batch norm parameters during
        training or not. When training with a small batch size
        (e.g. 1), it is desirable to disable batch norm update and use
        pretrained batch norm params.
      reuse_weights: Whether to reuse variables. Default is None.
      use_explicit_padding: Whether to use explicit padding when extracting
        features. Default is False.
      use_depthwise: Whether to use depthwise convolutions. Default is False.
    """
    super(SSDInceptionV2FeatureExtractor, self).__init__(
        is_training, depth_multiplier, min_depth, pad_to_multiple,
        conv_hyperparams, batch_norm_trainable, reuse_weights,
        use_explicit_padding, use_depthwise) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:34,代碼來源:ssd_inception_v2_feature_extractor.py

示例15: extract_features

# 需要導入模塊: from object_detection.utils import ops [as 別名]
# 或者: from object_detection.utils.ops import pad_to_multiple [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    feature_map_layout = {
        'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''],
        'layer_depth': [-1, -1, 512, 256, 256, 128],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with slim.arg_scope(self._conv_hyperparams):
      with tf.variable_scope('InceptionV2',
                             reuse=self._reuse_weights) as scope:
        _, image_features = inception_v2.inception_v2_base(
            ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
            final_endpoint='Mixed_5c',
            min_depth=self._min_depth,
            depth_multiplier=self._depth_multiplier,
            scope=scope)
        feature_maps = feature_map_generators.multi_resolution_feature_maps(
            feature_map_layout=feature_map_layout,
            depth_multiplier=self._depth_multiplier,
            min_depth=self._min_depth,
            insert_1x1_conv=True,
            image_features=image_features)

    return feature_maps.values() 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:40,代碼來源:ssd_inception_v2_feature_extractor.py


注:本文中的object_detection.utils.ops.pad_to_multiple方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。