当前位置: 首页>>代码示例>>Python>>正文


Python box_predictor.BOX_ENCODINGS属性代码示例

本文整理汇总了Python中object_detection.core.box_predictor.BOX_ENCODINGS属性的典型用法代码示例。如果您正苦于以下问题:Python box_predictor.BOX_ENCODINGS属性的具体用法?Python box_predictor.BOX_ENCODINGS怎么用?Python box_predictor.BOX_ENCODINGS使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在object_detection.core.box_predictor的用法示例。


在下文中一共展示了box_predictor.BOX_ENCODINGS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_get_boxes_with_five_classes

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_with_five_classes(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4,
    )
    box_predictions = mask_box_predictor.predict(
        image_features, num_predictions_per_location=1, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       class_predictions_with_background_shape) = sess.run(
           [tf.shape(box_encodings),
            tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
      self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:26,代码来源:box_predictor_test.py

示例2: test_get_boxes_with_five_classes

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_with_five_classes(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4,
    )
    box_predictions = mask_box_predictor.predict(
        [image_features], num_predictions_per_location=[1],
        scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       class_predictions_with_background_shape) = sess.run(
           [tf.shape(box_encodings),
            tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
      self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6]) 
开发者ID:cagbal,项目名称:ros_people_object_detection_tensorflow,代码行数:27,代码来源:box_predictor_test.py

示例3: test_do_not_return_instance_masks_without_request

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_do_not_return_instance_masks_without_request(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4)
    box_predictions = mask_box_predictor.predict(
        [image_features], num_predictions_per_location=[1],
        scope='BoxPredictor')
    self.assertEqual(len(box_predictions), 2)
    self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
    self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
                    in box_predictions) 
开发者ID:cagbal,项目名称:ros_people_object_detection_tensorflow,代码行数:18,代码来源:box_predictor_test.py

示例4: test_get_boxes_for_five_aspect_ratios_per_location

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):

    def graph_fn(image_features):
      conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          depth=32,
          num_layers_before_predictor=1,
          box_code_size=4)
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(
        graph_fn, [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
开发者ID:cagbal,项目名称:ros_people_object_detection_tensorflow,代码行数:25,代码来源:box_predictor_test.py

示例5: test_get_boxes_with_five_classes

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_with_five_classes(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4,
    )
    box_predictions = mask_box_predictor.predict(
        [image_features], num_predictions_per_location=[1],
        scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       class_predictions_with_background_shape) = sess.run(
           [tf.shape(box_encodings),
            tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
      self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6]) 
开发者ID:ambakick,项目名称:Person-Detection-and-Tracking,代码行数:27,代码来源:box_predictor_test.py

示例6: test_do_not_return_instance_masks_without_request

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_do_not_return_instance_masks_without_request(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4)
    box_predictions = mask_box_predictor.predict(
        [image_features], num_predictions_per_location=[1],
        scope='BoxPredictor')
    self.assertEqual(len(box_predictions), 2)
    self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
    self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
                    in box_predictions) 
开发者ID:ambakick,项目名称:Person-Detection-and-Tracking,代码行数:18,代码来源:box_predictor_test.py

示例7: _predict

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def _predict(self, image_features, num_predictions_per_location):
    batch_size = image_features.get_shape().as_list()[0]
    num_anchors = (image_features.get_shape().as_list()[1]
                   * image_features.get_shape().as_list()[2])
    code_size = 4
    zero = tf.reduce_sum(0 * image_features)
    box_encodings = zero + tf.zeros(
        (batch_size, num_anchors, 1, code_size), dtype=tf.float32)
    class_predictions_with_background = zero + tf.zeros(
        (batch_size, num_anchors, self.num_classes + 1), dtype=tf.float32)
    return {box_predictor.BOX_ENCODINGS: box_encodings,
            box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
            class_predictions_with_background} 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:15,代码来源:test_utils.py

示例8: _predict_rpn_proposals

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def _predict_rpn_proposals(self, rpn_box_predictor_features):
    """Adds box predictors to RPN feature map to predict proposals.

    Note resulting tensors will not have been postprocessed.

    Args:
      rpn_box_predictor_features: A 4-D float32 tensor with shape
        [batch, height, width, depth] to be used for predicting proposal boxes
        and corresponding objectness scores.

    Returns:
      box_encodings: 3-D float tensor of shape
        [batch_size, num_anchors, self._box_coder.code_size] containing
        predicted boxes.
      objectness_predictions_with_background: 3-D float tensor of shape
        [batch_size, num_anchors, 2] containing class
        predictions (logits) for each of the anchors.  Note that this
        tensor *includes* background class predictions (at class index 0).

    Raises:
      RuntimeError: if the anchor generator generates anchors corresponding to
        multiple feature maps.  We currently assume that a single feature map
        is generated for the RPN.
    """
    num_anchors_per_location = (
        self._first_stage_anchor_generator.num_anchors_per_location())
    if len(num_anchors_per_location) != 1:
      raise RuntimeError('anchor_generator is expected to generate anchors '
                         'corresponding to a single feature map.')
    box_predictions = self._first_stage_box_predictor.predict(
        rpn_box_predictor_features,
        num_anchors_per_location[0],
        scope=self.first_stage_box_predictor_scope)

    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    return (tf.squeeze(box_encodings, axis=2),
            objectness_predictions_with_background) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:41,代码来源:faster_rcnn_meta_arch.py

示例9: test_do_not_return_instance_masks_and_keypoints_without_request

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_do_not_return_instance_masks_and_keypoints_without_request(self):
    image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
    mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
        is_training=False,
        num_classes=5,
        fc_hyperparams=self._build_arg_scope_with_hyperparams(),
        use_dropout=False,
        dropout_keep_prob=0.5,
        box_code_size=4)
    box_predictions = mask_box_predictor.predict(
        image_features, num_predictions_per_location=1, scope='BoxPredictor')
    self.assertEqual(len(box_predictions), 2)
    self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
    self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
                    in box_predictions) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:17,代码来源:box_predictor_test.py

示例10: test_get_boxes_for_five_aspect_ratios_per_location

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=5, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)])
      self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
      self.assertAllEqual(objectness_predictions_shape, [4, 320, 1]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:30,代码来源:box_predictor_test.py

示例11: test_get_boxes_for_one_aspect_ratio_per_location

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=1, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)])
      self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])
      self.assertAllEqual(objectness_predictions_shape, [4, 64, 1]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:30,代码来源:box_predictor_test.py

示例12: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=num_classes_without_background,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features,
        num_predictions_per_location=5,
        scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape, class_predictions_with_background_shape
      ) = sess.run([
          tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
      self.assertAllEqual(class_predictions_with_background_shape,
                          [4, 320, num_classes_without_background+1]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:35,代码来源:box_predictor_test.py

示例13: test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=5, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:36,代码来源:box_predictor_test.py

示例14: _predict

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def _predict(self, image_features, num_predictions_per_location):
    image_feature = image_features[0]
    combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
        image_feature)
    batch_size = combined_feature_shape[0]
    num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
    code_size = 4
    zero = tf.reduce_sum(0 * image_feature)
    num_class_slots = self.num_classes
    if self._add_background_class:
      num_class_slots = num_class_slots + 1
    box_encodings = zero + tf.zeros(
        (batch_size, num_anchors, 1, code_size), dtype=tf.float32)
    class_predictions_with_background = zero + tf.zeros(
        (batch_size, num_anchors, num_class_slots), dtype=tf.float32)
    masks = zero + tf.zeros(
        (batch_size, num_anchors, self.num_classes, DEFAULT_MASK_SIZE,
         DEFAULT_MASK_SIZE),
        dtype=tf.float32)
    predictions_dict = {
        box_predictor.BOX_ENCODINGS:
            box_encodings,
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
            class_predictions_with_background
    }
    if self._predict_mask:
      predictions_dict[box_predictor.MASK_PREDICTIONS] = masks

    return predictions_dict 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:31,代码来源:test_utils.py

示例15: build

# 需要导入模块: from object_detection.core import box_predictor [as 别名]
# 或者: from object_detection.core.box_predictor import BOX_ENCODINGS [as 别名]
def build(self, input_shapes):
    """Creates the variables of the layer."""
    if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]):
      raise ValueError('This box predictor was constructed with %d heads,'
                       'but there are %d inputs.' %
                       (len(self._prediction_heads[BOX_ENCODINGS]),
                        len(input_shapes)))
    for stack_index, input_shape in enumerate(input_shapes):
      net = []

      # Add additional conv layers before the class predictor.
      features_depth = static_shape.get_depth(input_shape)
      depth = max(min(features_depth, self._max_depth), self._min_depth)
      tf.logging.info(
          'depth of additional conv before box predictor: {}'.format(depth))

      if depth > 0 and self._num_layers_before_predictor > 0:
        for i in range(self._num_layers_before_predictor):
          net.append(keras.Conv2D(depth, [1, 1],
                                  name='SharedConvolutions_%d/Conv2d_%d_1x1_%d'
                                  % (stack_index, i, depth),
                                  padding='SAME',
                                  **self._conv_hyperparams.params()))
          net.append(self._conv_hyperparams.build_batch_norm(
              training=(self._is_training and not self._freeze_batchnorm),
              name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm'
              % (stack_index, i, depth)))
          net.append(self._conv_hyperparams.build_activation_layer(
              name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation'
              % (stack_index, i, depth),
          ))
      # Until certain bugs are fixed in checkpointable lists,
      # this net must be appended only once it's been filled with layers
      self._shared_nets.append(net)
    self.built = True 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:37,代码来源:convolutional_keras_box_predictor.py


注:本文中的object_detection.core.box_predictor.BOX_ENCODINGS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。