当前位置: 首页>>代码示例>>Python>>正文


Python eval_pb2.EvalConfig方法代码示例

本文整理汇总了Python中object_detection.protos.eval_pb2.EvalConfig方法的典型用法代码示例。如果您正苦于以下问题:Python eval_pb2.EvalConfig方法的具体用法?Python eval_pb2.EvalConfig怎么用?Python eval_pb2.EvalConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在object_detection.protos.eval_pb2的用法示例。


在下文中一共展示了eval_pb2.EvalConfig方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_configs_from_pipeline_file

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def get_configs_from_pipeline_file():
  """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig.

  Reads evaluation config from file specified by pipeline_config_path flag.

  Returns:
    model_config: a model_pb2.DetectionModel
    eval_config: a eval_pb2.EvalConfig
    input_config: a input_reader_pb2.InputReader
  """
  pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
  with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
    text_format.Merge(f.read(), pipeline_config)

  model_config = pipeline_config.model
  if FLAGS.eval_training_data:
    eval_config = pipeline_config.train_config
  else:
    eval_config = pipeline_config.eval_config
  input_config = pipeline_config.eval_input_reader

  return model_config, eval_config, input_config 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:eval.py

示例2: test_get_eval_metric_ops_for_coco_detections

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1,
                                                   max_gt_boxes=None,
                                                   scale_to_absolute=False):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict(batch_size=batch_size,
                                           max_gt_boxes=max_gt_boxes,
                                           scale_to_absolute=scale_to_absolute)
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op = metric_ops['DetectionBoxes_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op)
      metrics = sess.run(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertNotIn('DetectionMasks_Precision/mAP', metrics) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:23,代码来源:eval_util_test.py

示例3: test_get_eval_metric_ops_for_coco_detections_and_masks

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(
      self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'coco_mask_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict(batch_size=batch_size,
                                           max_gt_boxes=max_gt_boxes,
                                           scale_to_absolute=scale_to_absolute)
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
    _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op_boxes)
      sess.run(update_op_masks)
      metrics = sess.run(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:25,代码来源:eval_util_test.py

示例4: test_get_eval_metric_ops_for_coco_detections_and_resized_masks

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
      self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'coco_mask_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict(batch_size=batch_size,
                                           max_gt_boxes=max_gt_boxes,
                                           scale_to_absolute=scale_to_absolute,
                                           resized_groundtruth_masks=True)
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
    _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op_boxes)
      sess.run(update_op_masks)
      metrics = sess.run(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:26,代码来源:eval_util_test.py

示例5: test_get_eval_metric_ops_for_coco_detections

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict()
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op = metric_ops['DetectionBoxes_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op)
      metrics = sess.run(metrics)
      print(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertNotIn('DetectionMasks_Precision/mAP', metrics) 
开发者ID:BMW-InnovationLab,项目名称:BMW-TensorFlow-Training-GUI,代码行数:20,代码来源:eval_util_test.py

示例6: test_get_eval_metric_ops_for_coco_detections_and_masks

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'coco_mask_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict()
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
    _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op_boxes)
      sess.run(update_op_masks)
      metrics = sess.run(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) 
开发者ID:BMW-InnovationLab,项目名称:BMW-TensorFlow-Training-GUI,代码行数:22,代码来源:eval_util_test.py

示例7: test_get_eval_metric_ops_for_coco_detections_and_resized_masks

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'coco_mask_metrics'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict(resized_groundtruth_masks=True)
    metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
        eval_config, categories, eval_dict)
    _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
    _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']

    with self.test_session() as sess:
      metrics = {}
      for key, (value_op, _) in metric_ops.iteritems():
        metrics[key] = value_op
      sess.run(update_op_boxes)
      sess.run(update_op_masks)
      metrics = sess.run(metrics)
      self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
      self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) 
开发者ID:BMW-InnovationLab,项目名称:BMW-TensorFlow-Training-GUI,代码行数:22,代码来源:eval_util_test.py

示例8: get_configs_from_multiple_files

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def get_configs_from_multiple_files():
  """Reads evaluation configuration from multiple config files.

  Reads the evaluation config from the following files:
    model_config: Read from --model_config_path
    eval_config: Read from --eval_config_path
    input_config: Read from --input_config_path

  Returns:
    model_config: a model_pb2.DetectionModel
    eval_config: a eval_pb2.EvalConfig
    input_config: a input_reader_pb2.InputReader
  """
  eval_config = eval_pb2.EvalConfig()
  with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f:
    text_format.Merge(f.read(), eval_config)

  model_config = model_pb2.DetectionModel()
  with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
    text_format.Merge(f.read(), model_config)

  input_config = input_reader_pb2.InputReader()
  with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
    text_format.Merge(f.read(), input_config)

  return model_config, eval_config, input_config 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:28,代码来源:eval.py

示例9: test_get_eval_metric_ops_raises_error_with_unsupported_metric

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['unsupported_metric'])
    categories = self._get_categories_list()
    eval_dict = self._make_evaluation_dict()
    with self.assertRaises(ValueError):
      eval_util.get_eval_metric_ops_for_evaluators(
          eval_config, categories, eval_dict) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:10,代码来源:eval_util_test.py

示例10: test_get_eval_metric_ops_for_evaluators

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_eval_metric_ops_for_evaluators(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'coco_mask_metrics'])
    eval_config.include_metrics_per_category = True

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    self.assertTrue(evaluator_options['coco_detection_metrics'][
        'include_metrics_per_category'])
    self.assertTrue(evaluator_options['coco_mask_metrics'][
        'include_metrics_per_category']) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:14,代码来源:eval_util_test.py

示例11: test_get_evaluator_with_no_evaluator_options

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:15,代码来源:eval_util_test.py

示例12: _update_retain_original_images

# 需要导入模块: from object_detection.protos import eval_pb2 [as 别名]
# 或者: from object_detection.protos.eval_pb2 import EvalConfig [as 别名]
def _update_retain_original_images(eval_config, retain_original_images):
  """Updates eval config with option to retain original images.

  The eval_config object is updated in place, and hence not returned.

  Args:
    eval_config: A eval_pb2.EvalConfig.
    retain_original_images: Boolean indicating whether to retain original images
      in eval mode.
  """
  eval_config.retain_original_images = retain_original_images 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:13,代码来源:config_util.py


注:本文中的object_detection.protos.eval_pb2.EvalConfig方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。