本文整理汇总了Python中object_detection.utils.dataset_util.read_examples_list方法的典型用法代码示例。如果您正苦于以下问题:Python dataset_util.read_examples_list方法的具体用法?Python dataset_util.read_examples_list怎么用?Python dataset_util.read_examples_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.dataset_util
的用法示例。
在下文中一共展示了dataset_util.read_examples_list方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_records
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def create_records(data_dir, to_path='data/train.tfrecord'):
annotations_dir, examples_path = get_fun_paths(data_dir)
writer = tf.python_io.TFRecordWriter(to_path)
labels = {}
examples_list = dataset_util.read_examples_list(examples_path)
assert len(examples_list) > 0, examples_path
for i, example in enumerate(examples_list):
path = os.path.join(annotations_dir, example + '.xml')
data = xml_to_dict(path)
assert 'object' in data, data['filename']
labels[i] = [k['name'] for k in data['object']]
try:
tf_example = dict_to_tf_example(data, data_dir, label_map_dict)
except Exception as e: #TODO(SS): remove me
print(e)
import pdb; pdb.set_trace()
writer.write(tf_example.SerializeToString())
writer.close()
return labels # to inspect a bit
示例2: test_read_examples_list
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def test_read_examples_list(self):
example_list_data = """example1 1\nexample2 2"""
example_list_path = os.path.join(self.get_temp_dir(), 'examples.txt')
with tf.gfile.Open(example_list_path, 'wb') as f:
f.write(example_list_data)
examples = dataset_util.read_examples_list(example_list_path)
self.assertListEqual(['example1', 'example2'], examples)
示例3: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def main(_):
if FLAGS.set not in SETS:
raise ValueError('set must be in : {}'.format(SETS))
if FLAGS.year not in YEARS:
raise ValueError('year must be in : {}'.format(YEARS))
data_dir = FLAGS.data_dir
years = ['VOC2007', 'VOC2012']
if FLAGS.year != 'merged':
years = [FLAGS.year]
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
for year in years:
logging.info('Reading from PASCAL %s dataset.', year)
examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main',
'aeroplane_' + FLAGS.set + '.txt')
annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir)
examples_list = dataset_util.read_examples_list(examples_path)
for idx, example in enumerate(examples_list):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples_list))
path = os.path.join(annotations_dir, example + '.xml')
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict,
FLAGS.ignore_difficult_instances)
writer.write(tf_example.SerializeToString())
writer.close()
示例4: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')
create_tf_record(train_output_path, label_map_dict, annotations_dir,
image_dir, train_examples)
create_tf_record(val_output_path, label_map_dict, annotations_dir,
image_dir, val_examples)
示例5: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def main(_):
label_map_dict = label_map_util.get_label_map_dict('annotations/label_map.pbtxt')
logging.info('Reading from Pet dataset.')
image_dir = 'images'
annotations_dir = 'annotations'
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = 'train.record'
val_output_path = 'val.record'
create_tf_record(train_output_path, label_map_dict, annotations_dir,
image_dir, train_examples)
create_tf_record(val_output_path, label_map_dict, annotations_dir,
image_dir, val_examples)
示例6: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import read_examples_list [as 别名]
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')
if FLAGS.faces_only:
train_output_path = os.path.join(FLAGS.output_dir,
'pet_train_with_masks.record')
val_output_path = os.path.join(FLAGS.output_dir,
'pet_val_with_masks.record')
create_tf_record(train_output_path, label_map_dict, annotations_dir,
image_dir, train_examples, faces_only=FLAGS.faces_only)
create_tf_record(val_output_path, label_map_dict, annotations_dir,
image_dir, val_examples, faces_only=FLAGS.faces_only)