當前位置: 首頁>>代碼示例>>Python>>正文


Python feature_map_generators.fpn_top_down_feature_maps方法代碼示例

本文整理匯總了Python中object_detection.models.feature_map_generators.fpn_top_down_feature_maps方法的典型用法代碼示例。如果您正苦於以下問題:Python feature_map_generators.fpn_top_down_feature_maps方法的具體用法?Python feature_map_generators.fpn_top_down_feature_maps怎麽用?Python feature_map_generators.fpn_top_down_feature_maps使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在object_detection.models.feature_map_generators的用法示例。


在下文中一共展示了feature_map_generators.fpn_top_down_feature_maps方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_get_expected_feature_map_shapes

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def test_get_expected_feature_map_shapes(self):
    image_features = [
        ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
        ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
        ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
        ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
    ]
    feature_maps = feature_map_generators.fpn_top_down_feature_maps(
        image_features=image_features, depth=128)

    expected_feature_map_shapes = {
        'top_down_block2': (4, 8, 8, 128),
        'top_down_block3': (4, 4, 4, 128),
        'top_down_block4': (4, 2, 2, 128),
        'top_down_block5': (4, 1, 1, 128)
    }

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      out_feature_maps = sess.run(feature_maps)
      out_feature_map_shapes = {key: value.shape
                                for key, value in out_feature_maps.items()}
      self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:26,代碼來源:feature_map_generators_test.py

示例2: test_get_expected_feature_map_shapes_with_depthwise

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def test_get_expected_feature_map_shapes_with_depthwise(self):
    image_features = [
        ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
        ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
        ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
        ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
    ]
    feature_maps = feature_map_generators.fpn_top_down_feature_maps(
        image_features=image_features, depth=128, use_depthwise=True)

    expected_feature_map_shapes = {
        'top_down_block2': (4, 8, 8, 128),
        'top_down_block3': (4, 4, 4, 128),
        'top_down_block4': (4, 2, 2, 128),
        'top_down_block5': (4, 1, 1, 128)
    }

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      out_feature_maps = sess.run(feature_maps)
      out_feature_map_shapes = {key: value.shape
                                for key, value in out_feature_maps.items()}
      self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:26,代碼來源:feature_map_generators_test.py

示例3: test_get_expected_feature_map_shapes

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def test_get_expected_feature_map_shapes(self):
    image_features = [
        tf.random_uniform([4, 8, 8, 256], dtype=tf.float32),
        tf.random_uniform([4, 4, 4, 256], dtype=tf.float32),
        tf.random_uniform([4, 2, 2, 256], dtype=tf.float32),
        tf.random_uniform([4, 1, 1, 256], dtype=tf.float32),
    ]
    feature_maps = feature_map_generators.fpn_top_down_feature_maps(
        image_features=image_features, depth=128)

    expected_feature_map_shapes = {
        'top_down_feature_map_0': (4, 8, 8, 128),
        'top_down_feature_map_1': (4, 4, 4, 128),
        'top_down_feature_map_2': (4, 2, 2, 128),
        'top_down_feature_map_3': (4, 1, 1, 128)
    }

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      out_feature_maps = sess.run(feature_maps)
      out_feature_map_shapes = {key: value.shape
                                for key, value in out_feature_maps.items()}
      self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:26,代碼來源:feature_map_generators_test.py

示例4: _build_feature_map_generator

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def _build_feature_map_generator(
      self, image_features, depth, use_keras, use_bounded_activations=False,
      use_native_resize_op=False, use_explicit_padding=False,
      use_depthwise=False):
    if use_keras:
      return feature_map_generators.KerasFpnTopDownFeatureMaps(
          num_levels=len(image_features),
          depth=depth,
          is_training=True,
          conv_hyperparams=self._build_conv_hyperparams(),
          freeze_batchnorm=False,
          use_depthwise=use_depthwise,
          use_explicit_padding=use_explicit_padding,
          use_bounded_activations=use_bounded_activations,
          use_native_resize_op=use_native_resize_op,
          scope=None,
          name='FeatureMaps',
      )
    else:
      def feature_map_generator(image_features):
        return feature_map_generators.fpn_top_down_feature_maps(
            image_features=image_features,
            depth=depth,
            use_depthwise=use_depthwise,
            use_explicit_padding=use_explicit_padding,
            use_bounded_activations=use_bounded_activations,
            use_native_resize_op=use_native_resize_op)
      return feature_map_generator 
開發者ID:ShivangShekhar,項目名稱:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代碼行數:30,代碼來源:feature_map_generators_test.py

示例5: _build_feature_map_generator

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def _build_feature_map_generator(
      self, image_features, depth, use_bounded_activations=False,
      use_native_resize_op=False, use_explicit_padding=False,
      use_depthwise=False):
    if tf_version.is_tf2():
      return feature_map_generators.KerasFpnTopDownFeatureMaps(
          num_levels=len(image_features),
          depth=depth,
          is_training=True,
          conv_hyperparams=self._build_conv_hyperparams(),
          freeze_batchnorm=False,
          use_depthwise=use_depthwise,
          use_explicit_padding=use_explicit_padding,
          use_bounded_activations=use_bounded_activations,
          use_native_resize_op=use_native_resize_op,
          scope=None,
          name='FeatureMaps',
      )
    else:
      def feature_map_generator(image_features):
        return feature_map_generators.fpn_top_down_feature_maps(
            image_features=image_features,
            depth=depth,
            use_depthwise=use_depthwise,
            use_explicit_padding=use_explicit_padding,
            use_bounded_activations=use_bounded_activations,
            use_native_resize_op=use_native_resize_op)
      return feature_map_generator 
開發者ID:tensorflow,項目名稱:models,代碼行數:30,代碼來源:feature_map_generators_test.py

示例6: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: depth multiplier is not supported.
    """
    if self._depth_multiplier != 1.0:
      raise ValueError('Depth multiplier not supported.')

    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        _, image_features = self._resnet_base_fn(
            inputs=ops.pad_to_multiple(preprocessed_inputs,
                                       self._pad_to_multiple),
            num_classes=None,
            is_training=self._is_training and self._batch_norm_trainable,
            global_pool=False,
            output_stride=None,
            store_non_strided_activations=True,
            scope=scope)
      image_features = self._filter_features(image_features)
      last_feature_map = image_features['block4']
    with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights):
      with slim.arg_scope(self._conv_hyperparams):
        for i in range(5, 7):
          last_feature_map = slim.conv2d(
              last_feature_map,
              num_outputs=256,
              kernel_size=[3, 3],
              stride=2,
              padding='SAME',
              scope='block{}'.format(i))
          image_features['bottomup_{}'.format(i)] = last_feature_map
        feature_maps = feature_map_generators.fpn_top_down_feature_maps(
            [
                image_features[key] for key in
                ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6']
            ],
            depth=256,
            scope='top_down_features')
    return feature_maps.values() 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:55,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py

示例7: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: depth multiplier is not supported.
    """
    if self._depth_multiplier != 1.0:
      raise ValueError('Depth multiplier not supported.')

    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = self._resnet_base_fn(
              inputs=ops.pad_to_multiple(preprocessed_inputs,
                                         self._pad_to_multiple),
              num_classes=None,
              is_training=None,
              global_pool=False,
              output_stride=None,
              store_non_strided_activations=True,
              scope=scope)
          image_features = self._filter_features(image_features)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope(self._fpn_scope_name,
                               reuse=self._reuse_weights):
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key])
               for key in ['block2', 'block3', 'block4']],
              depth=256)
          last_feature_map = fpn_features['top_down_block4']
          coarse_features = {}
          for i in range(5, 7):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=256,
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_block{}'.format(i))
            coarse_features['bottom_up_block{}'.format(i)] = last_feature_map
    return [fpn_features['top_down_block2'],
            fpn_features['top_down_block3'],
            fpn_features['top_down_block4'],
            coarse_features['bottom_up_block5'],
            coarse_features['bottom_up_block6']] 
開發者ID:ambakick,項目名稱:Person-Detection-and-Tracking,代碼行數:61,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py

示例8: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: depth multiplier is not supported.
    """
    if self._depth_multiplier != 1.0:
      raise ValueError('Depth multiplier not supported.')

    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = self._resnet_base_fn(
              inputs=ops.pad_to_multiple(preprocessed_inputs,
                                         self._pad_to_multiple),
              num_classes=None,
              is_training=None,
              global_pool=False,
              output_stride=None,
              store_non_strided_activations=True,
              scope=scope)
      image_features = self._filter_features(image_features)
      last_feature_map = image_features['block4']
    with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights):
      with slim.arg_scope(self._conv_hyperparams_fn()):
        for i in range(5, 7):
          last_feature_map = slim.conv2d(
              last_feature_map,
              num_outputs=256,
              kernel_size=[3, 3],
              stride=2,
              padding='SAME',
              scope='block{}'.format(i))
          image_features['bottomup_{}'.format(i)] = last_feature_map
        feature_maps = feature_map_generators.fpn_top_down_feature_maps(
            [
                image_features[key] for key in
                ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6']
            ],
            depth=256,
            scope='top_down_features')
    return feature_maps.values() 
開發者ID:itsamitgoel,項目名稱:Gun-Detector,代碼行數:58,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py

示例9: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=None, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              use_explicit_padding=self._use_explicit_padding,
              scope=scope)

      depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope('fpn', reuse=self._reuse_weights):
          feature_blocks = [
              'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
              'Conv2d_13_pointwise'
          ]
          base_fpn_max_level = min(self._fpn_max_level, 5)
          feature_block_list = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_block_list.append(feature_blocks[level - 2])
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key]) for key in feature_block_list],
              depth=depth_fn(256))
          feature_maps = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_maps.append(fpn_features['top_down_{}'.format(
                feature_blocks[level - 2])])
          last_feature_map = fpn_features['top_down_{}'.format(
              feature_blocks[base_fpn_max_level - 2])]
          # Construct coarse features
          for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=depth_fn(256),
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13))
            feature_maps.append(last_feature_map)
    return feature_maps 
開發者ID:BMW-InnovationLab,項目名稱:BMW-TensorFlow-Training-GUI,代碼行數:63,代碼來源:ssd_mobilenet_v1_fpn_feature_extractor.py

示例10: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = self._resnet_base_fn(
              inputs=ops.pad_to_multiple(preprocessed_inputs,
                                         self._pad_to_multiple),
              num_classes=None,
              is_training=None,
              global_pool=False,
              output_stride=None,
              store_non_strided_activations=True,
              min_base_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          image_features = self._filter_features(image_features)
      depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope(self._fpn_scope_name,
                               reuse=self._reuse_weights):
          base_fpn_max_level = min(self._fpn_max_level, 5)
          feature_block_list = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_block_list.append('block{}'.format(level - 1))
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key]) for key in feature_block_list],
              depth=depth_fn(self._additional_layer_depth))
          feature_maps = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_maps.append(
                fpn_features['top_down_block{}'.format(level - 1)])
          last_feature_map = fpn_features['top_down_block{}'.format(
              base_fpn_max_level - 1)]
          # Construct coarse features
          for i in range(base_fpn_max_level, self._fpn_max_level):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=depth_fn(self._additional_layer_depth),
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_block{}'.format(i))
            feature_maps.append(last_feature_map)
    return feature_maps 
開發者ID:ShivangShekhar,項目名稱:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代碼行數:62,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py

示例11: extract_features

# 需要導入模塊: from object_detection.models import feature_map_generators [as 別名]
# 或者: from object_detection.models.feature_map_generators import fpn_top_down_feature_maps [as 別名]
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = self._resnet_base_fn(
              inputs=ops.pad_to_multiple(preprocessed_inputs,
                                         self._pad_to_multiple),
              num_classes=None,
              is_training=None,
              global_pool=False,
              output_stride=None,
              store_non_strided_activations=True,
              min_base_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          image_features = self._filter_features(image_features)
      depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope(self._fpn_scope_name,
                               reuse=self._reuse_weights):
          base_fpn_max_level = min(self._fpn_max_level, 5)
          feature_block_list = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_block_list.append('block{}'.format(level - 1))
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key]) for key in feature_block_list],
              depth=depth_fn(self._additional_layer_depth),
              use_native_resize_op=self._use_native_resize_op)
          feature_maps = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_maps.append(
                fpn_features['top_down_block{}'.format(level - 1)])
          last_feature_map = fpn_features['top_down_block{}'.format(
              base_fpn_max_level - 1)]
          # Construct coarse features
          for i in range(base_fpn_max_level, self._fpn_max_level):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=depth_fn(self._additional_layer_depth),
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_block{}'.format(i))
            feature_maps.append(last_feature_map)
    return feature_maps 
開發者ID:tensorflow,項目名稱:models,代碼行數:63,代碼來源:ssd_resnet_v1_fpn_feature_extractor.py


注:本文中的object_detection.models.feature_map_generators.fpn_top_down_feature_maps方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。