當前位置: 首頁>>代碼示例>>Python>>正文


Python label_map_util.create_categories_from_labelmap方法代碼示例

本文整理匯總了Python中object_detection.utils.label_map_util.create_categories_from_labelmap方法的典型用法代碼示例。如果您正苦於以下問題:Python label_map_util.create_categories_from_labelmap方法的具體用法?Python label_map_util.create_categories_from_labelmap怎麽用?Python label_map_util.create_categories_from_labelmap使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在object_detection.utils.label_map_util的用法示例。


在下文中一共展示了label_map_util.create_categories_from_labelmap方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_create_categories_from_labelmap

# 需要導入模塊: from object_detection.utils import label_map_util [as 別名]
# 或者: from object_detection.utils.label_map_util import create_categories_from_labelmap [as 別名]
def test_create_categories_from_labelmap(self):
    label_map_string = """
      item {
        id:1
        name:'dog'
      }
      item {
        id:2
        name:'cat'
      }
    """
    label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
    with tf.gfile.Open(label_map_path, 'wb') as f:
      f.write(label_map_string)

    categories = label_map_util.create_categories_from_labelmap(label_map_path)
    self.assertListEqual([{
        'name': u'dog',
        'id': 1
    }, {
        'name': u'cat',
        'id': 2
    }], categories) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:25,代碼來源:label_map_util_test.py

示例2: read_data_and_evaluate

# 需要導入模塊: from object_detection.utils import label_map_util [as 別名]
# 或者: from object_detection.utils.label_map_util import create_categories_from_labelmap [as 別名]
def read_data_and_evaluate(input_config, eval_config):
  """Reads pre-computed object detections and groundtruth from tf_record.

  Args:
    input_config: input config proto of type
      object_detection.protos.InputReader.
    eval_config: evaluation config proto of type
      object_detection.protos.EvalConfig.

  Returns:
    Evaluated detections metrics.

  Raises:
    ValueError: if input_reader type is not supported or metric type is unknown.
  """
  if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    input_paths = input_config.tf_record_input_reader.input_path

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    object_detection_evaluators = evaluator.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    skipped_images = 0
    processed_images = 0
    for input_path in _generate_filenames(input_paths):
      tf.logging.info('Processing file: {0}'.format(input_path))

      record_iterator = tf.python_io.tf_record_iterator(path=input_path)
      data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

      for string_record in record_iterator:
        tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                               processed_images)
        processed_images += 1

        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        if decoded_dict:
          object_detection_evaluator.add_single_ground_truth_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
          object_detection_evaluator.add_single_detected_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
        else:
          skipped_images += 1
          tf.logging.info('Skipped images: {0}'.format(skipped_images))

    return object_detection_evaluator.evaluate()

  raise ValueError('Unsupported input_reader_config.') 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:59,代碼來源:offline_eval_map_corloc.py

示例3: main

# 需要導入模塊: from object_detection.utils import label_map_util [as 別名]
# 或者: from object_detection.utils.label_map_util import create_categories_from_labelmap [as 別名]
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(
        FLAGS.pipeline_config_path,
        os.path.join(FLAGS.eval_dir, 'pipeline.config'),
        overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build, model_config=model_config, is_training=False)

  def get_next(config):
    return dataset_builder.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  categories = label_map_util.create_categories_from_labelmap(
      input_config.label_map_path)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  graph_rewriter_fn = None
  if 'graph_rewriter_config' in configs:
    graph_rewriter_fn = graph_rewriter_builder.build(
        configs['graph_rewriter_config'], is_training=False)

  evaluator.evaluate(
      create_input_dict_fn,
      model_fn,
      eval_config,
      categories,
      FLAGS.checkpoint_dir,
      FLAGS.eval_dir,
      graph_hook_fn=graph_rewriter_fn) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:57,代碼來源:eval.py

示例4: evaluate

# 需要導入模塊: from object_detection.utils import label_map_util [as 別名]
# 或者: from object_detection.utils.label_map_util import create_categories_from_labelmap [as 別名]
def evaluate(res_dir, annotations, label_map_path, full_report):
  '''
  Calculate OID metrics via evaluator class included in TF models repository
  https://github.com/tensorflow/models/tree/master/research/object_detection/metrics

  Reads pre-computed object detections and groundtruth.

  Args:
    res_dir: pre-computed object detections directory
    annotations: groundtruth (file with annotations)
    label_map_path: labelmap file

  Returns:
    Evaluated detections metrics.
  '''
  class EvaluatorConfig:
    metrics_set = ['open_images_V2_detection_metrics']

  eval_config = EvaluatorConfig()

  categories = label_map_util.create_categories_from_labelmap(label_map_path)
  class_map = label_map_util.get_label_map_dict(label_map_path, False, False)

  object_detection_evaluators = evaluator.get_evaluators(
      eval_config, categories)
  # Support a single evaluator
  object_detection_evaluator = object_detection_evaluators[0]

  print('Loading annotations...')
  ann = get_annotations(annotations, class_map)

  files = ck_utils.get_files(res_dir)
  for file_index, file_name in enumerate(files):
    if full_report:
      print('Loading detections and annotations for {} ({} of {}) ...'.format(file_name, file_index+1, len(files)))
    elif (file_index+1) % 100 == 0:
      print('Loading detections and annotations: {} of {} ...'.format(file_index+1, len(files)))
    det_file = os.path.join(res_dir, file_name)
    key = os.path.splitext(file_name)[0]
    detection = new_detection(key)
    fill_annotations(detection, ann[key])
    fill_detection(detection, det_file)

    object_detection_evaluator.add_single_ground_truth_image_info(
        detection[standard_fields.DetectionResultFields.key],
        detection)
    object_detection_evaluator.add_single_detected_image_info(
        detection[standard_fields.DetectionResultFields.key],
        detection)

  all_metrics = object_detection_evaluator.evaluate()
  mAP = all_metrics['OpenImagesV2_Precision/mAP@0.5IOU']

  return mAP, 0, all_metrics 
開發者ID:ctuning,項目名稱:ck-mlperf,代碼行數:56,代碼來源:calc_metrics_oid.py

示例5: read_data_and_evaluate

# 需要導入模塊: from object_detection.utils import label_map_util [as 別名]
# 或者: from object_detection.utils.label_map_util import create_categories_from_labelmap [as 別名]
def read_data_and_evaluate(input_config, eval_config):
  """Reads pre-computed object detections and groundtruth from tf_record.

  Args:
    input_config: input config proto of type
      object_detection.protos.InputReader.
    eval_config: evaluation config proto of type
      object_detection.protos.EvalConfig.

  Returns:
    Evaluated detections metrics.

  Raises:
    ValueError: if input_reader type is not supported or metric type is unknown.
  """
  if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    input_paths = input_config.tf_record_input_reader.input_path

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    object_detection_evaluators = eval_util.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    skipped_images = 0
    processed_images = 0
    for input_path in _generate_filenames(input_paths):
      tf.logging.info('Processing file: {0}'.format(input_path))

      record_iterator = tf.python_io.tf_record_iterator(path=input_path)
      data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

      for string_record in record_iterator:
        tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                               processed_images)
        processed_images += 1

        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        if decoded_dict:
          object_detection_evaluator.add_single_ground_truth_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
          object_detection_evaluator.add_single_detected_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
        else:
          skipped_images += 1
          tf.logging.info('Skipped images: {0}'.format(skipped_images))

    return object_detection_evaluator.evaluate()

  raise ValueError('Unsupported input_reader_config.') 
開發者ID:tensorflow,項目名稱:models,代碼行數:59,代碼來源:offline_eval_map_corloc.py


注:本文中的object_detection.utils.label_map_util.create_categories_from_labelmap方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。