本文整理汇总了Python中object_detection.utils.dataset_util.make_initializable_iterator方法的典型用法代码示例。如果您正苦于以下问题:Python dataset_util.make_initializable_iterator方法的具体用法?Python dataset_util.make_initializable_iterator怎么用?Python dataset_util.make_initializable_iterator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.dataset_util
的用法示例。
在下文中一共展示了dataset_util.make_initializable_iterator方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_build_tf_record_input_reader_and_load_instance_masks
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto, batch_size=1)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual(
(1, 1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
示例2: test_build_tf_record_input_reader_with_additional_channels
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader_with_additional_channels(self):
tf_record_path = self.create_tf_record(has_additional_channels=True)
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto, batch_size=2,
num_additional_channels=2)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertEquals((2, 4, 5, 5),
output_dict[fields.InputDataFields.image].shape)
示例3: test_build_tf_record_input_reader_and_load_instance_masks
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual(
(1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
示例4: test_make_initializable_iterator_with_hashTable
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_make_initializable_iterator_with_hashTable(self):
keys = [1, 0, -1]
dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
table = tf.contrib.lookup.HashTable(
initializer=tf.contrib.lookup.KeyValueTensorInitializer(
keys=keys,
values=list(reversed(keys))),
default_value=100)
dataset = dataset.map(table.lookup)
data = dataset_util.make_initializable_iterator(dataset).get_next()
init = tf.tables_initializer()
with self.test_session() as sess:
sess.run(init)
self.assertAllEqual(sess.run(data), [-1, 100, 1, 100])
示例5: test_build_tf_record_input_reader
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto, batch_size=1)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in output_dict)
self.assertEquals((1, 4, 5, 3),
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([[2]],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals(
(1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0][0])
示例6: get_next
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def get_next(self, config):
return dataset_util.make_initializable_iterator(
dataset_builder.build(config)).get_next()
示例7: test_build_tf_record_input_reader
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in output_dict)
self.assertEquals((4, 5, 3),
output_dict[fields.InputDataFields.image].shape)
self.assertEquals([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
示例8: test_build_tf_record_input_reader_with_batch_size_two_and_masks
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2,
max_num_boxes=2,
num_classes=3,
spatial_image_shape=[4, 5])).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual(
[2, 2, 4, 5],
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
示例9: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.gfile.MakeDirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config,
os.path.join(FLAGS.eval_dir, name),
overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=False)
def get_next(config):
return dataset_util.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
label_map = label_map_util.load_labelmap(input_config.label_map_path)
max_num_classes = max([item.id for item in label_map.item])
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes)
if FLAGS.run_once:
eval_config.max_evals = 1
evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
FLAGS.checkpoint_dir, FLAGS.eval_dir)
示例10: test_build_tf_record_input_reader_with_batch_size_two
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def test_build_tf_record_input_reader_with_batch_size_two(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2,
max_num_boxes=2,
num_classes=3,
spatial_image_shape=[4, 5])).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual([2, 4, 5, 3],
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([2, 2, 3],
output_dict[fields.InputDataFields.groundtruth_classes].
shape)
self.assertAllEqual([2, 2, 4],
output_dict[fields.InputDataFields.groundtruth_boxes].
shape)
self.assertAllEqual(
[[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]],
output_dict[fields.InputDataFields.groundtruth_boxes])
示例11: main
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import make_initializable_iterator [as 别名]
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.gfile.MakeDirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config,
os.path.join(FLAGS.eval_dir, name),
overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=False)
def get_next(config):
return dataset_util.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
label_map = label_map_util.load_labelmap(input_config.label_map_path)
max_num_classes = max([item.id for item in label_map.item])
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes)
if FLAGS.run_once:
eval_config.max_evals = 1
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=False)
evaluator.evaluate(
create_input_dict_fn,
model_fn,
eval_config,
categories,
FLAGS.checkpoint_dir,
FLAGS.eval_dir,
graph_hook_fn=graph_rewriter_fn)