當前位置: 首頁>>代碼示例>>Python>>正文


Python box_predictor.ConvolutionalBoxPredictor方法代碼示例

本文整理匯總了Python中object_detection.core.box_predictor.ConvolutionalBoxPredictor方法的典型用法代碼示例。如果您正苦於以下問題:Python box_predictor.ConvolutionalBoxPredictor方法的具體用法?Python box_predictor.ConvolutionalBoxPredictor怎麽用?Python box_predictor.ConvolutionalBoxPredictor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在object_detection.core.box_predictor的用法示例。


在下文中一共展示了box_predictor.ConvolutionalBoxPredictor方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_get_boxes_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=5, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)])
      self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
      self.assertAllEqual(objectness_predictions_shape, [4, 320, 1]) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:30,代碼來源:box_predictor_test.py

示例2: test_get_boxes_for_one_aspect_ratio_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=1, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)])
      self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])
      self.assertAllEqual(objectness_predictions_shape, [4, 64, 1]) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:30,代碼來源:box_predictor_test.py

示例3: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=num_classes_without_background,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features,
        num_predictions_per_location=5,
        scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    class_predictions_with_background = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]

    init_op = tf.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape, class_predictions_with_background_shape
      ) = sess.run([
          tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
      self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
      self.assertAllEqual(class_predictions_with_background_shape,
                          [4, 320, num_classes_without_background+1]) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:35,代碼來源:box_predictor_test.py

示例4: test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
        is_training=False,
        num_classes=0,
        conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
        min_depth=0,
        max_depth=32,
        num_layers_before_predictor=1,
        use_dropout=True,
        dropout_keep_prob=0.8,
        kernel_size=1,
        box_code_size=4
    )
    box_predictions = conv_box_predictor.predict(
        image_features, num_predictions_per_location=5, scope='BoxPredictor')
    box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
    objectness_predictions = box_predictions[
        box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1]) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:36,代碼來源:box_predictor_test.py

示例5: test_get_boxes_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:30,代碼來源:box_predictor_test.py

示例6: test_get_boxes_for_one_aspect_ratio_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[1],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:29,代碼來源:box_predictor_test.py

示例7: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=num_classes_without_background,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features],
          num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:35,代碼來源:box_predictor_test.py

示例8: test_get_boxes_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
開發者ID:ambakick,項目名稱:Person-Detection-and-Tracking,代碼行數:30,代碼來源:box_predictor_test.py

示例9: test_get_boxes_for_one_aspect_ratio_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[1],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
開發者ID:ambakick,項目名稱:Person-Detection-and-Tracking,代碼行數:29,代碼來源:box_predictor_test.py

示例10: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=num_classes_without_background,
          conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features],
          num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
開發者ID:ambakick,項目名稱:Person-Detection-and-Tracking,代碼行數:35,代碼來源:box_predictor_test.py

示例11: test_get_boxes_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
      objectness_predictions = box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
開發者ID:ShreyAmbesh,項目名稱:Traffic-Rule-Violation-Detection-System,代碼行數:28,代碼來源:box_predictor_test.py

示例12: test_get_boxes_for_one_aspect_ratio_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=0,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features], num_predictions_per_location=[1],
          scope='BoxPredictor')
      box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
      objectness_predictions = box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
開發者ID:ShreyAmbesh,項目名稱:Traffic-Rule-Violation-Detection-System,代碼行數:28,代碼來源:box_predictor_test.py

示例13: test_get_multi_class_predictions_for_five_aspect_ratios_per_location

# 需要導入模塊: from object_detection.core import box_predictor [as 別名]
# 或者: from object_detection.core.box_predictor import ConvolutionalBoxPredictor [as 別名]
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
          is_training=False,
          num_classes=num_classes_without_background,
          conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
          min_depth=0,
          max_depth=32,
          num_layers_before_predictor=1,
          use_dropout=True,
          dropout_keep_prob=0.8,
          kernel_size=1,
          box_code_size=4
      )
      box_predictions = conv_box_predictor.predict(
          [image_features],
          num_predictions_per_location=[5],
          scope='BoxPredictor')
      box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
      class_predictions_with_background = box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
開發者ID:ShreyAmbesh,項目名稱:Traffic-Rule-Violation-Detection-System,代碼行數:33,代碼來源:box_predictor_test.py


注:本文中的object_detection.core.box_predictor.ConvolutionalBoxPredictor方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。