本文整理汇总了Python中tensorflow.contrib.slim.python.slim.data.parallel_reader.get_data_files方法的典型用法代码示例。如果您正苦于以下问题:Python parallel_reader.get_data_files方法的具体用法?Python parallel_reader.get_data_files怎么用?Python parallel_reader.get_data_files使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim.python.slim.data.parallel_reader
的用法示例。
在下文中一共展示了parallel_reader.get_data_files方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_config
# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import get_data_files [as 别名]
def print_config(flags, dataset, save_dir = None, print_to_file = True):
def do_print(stream=None):
print('\n# =========================================================================== #', file=stream)
print('# Training flags:', file=stream)
print('# =========================================================================== #', file=stream)
pprint(flags.__flags, stream=stream)
print('\n# =========================================================================== #', file=stream)
print('# seglink net parameters:', file=stream)
print('# =========================================================================== #', file=stream)
vars = globals()
for key in vars:
var = vars[key]
if util.dtype.is_number(var) or util.dtype.is_str(var) or util.dtype.is_list(var) or util.dtype.is_tuple(var):
pprint('%s=%s'%(key, str(var)), stream = stream)
print('\n# =========================================================================== #', file=stream)
print('# Training | Evaluation dataset files:', file=stream)
print('# =========================================================================== #', file=stream)
data_files = parallel_reader.get_data_files(dataset.data_sources)
pprint(sorted(data_files), stream=stream)
print('', file=stream)
do_print(None)
if print_to_file:
# Save to a text file as well.
if save_dir is None:
save_dir = flags.train_dir
util.io.mkdir(save_dir)
path = util.io.join_path(save_dir, 'training_config.txt')
with open(path, "a") as out:
do_print(out)
示例2: _verify_all_data_sources_read
# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import get_data_files [as 别名]
def _verify_all_data_sources_read(self, shared_queue):
with self.test_session():
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
num_readers = len(tfrecord_paths)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=num_readers)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files)
key, value = p_reader.read(filename_queue)
count0 = 0
count1 = 0
count2 = 0
num_reads = 50
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
self.assertGreater(count0, 0)
self.assertGreater(count1, 0)
self.assertGreater(count2, 0)
self.assertEquals(count0 + count1 + count2, num_reads)
示例3: print_configuration
# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import get_data_files [as 别名]
def print_configuration(flags, ssd_params, data_sources, save_dir=None):
"""Print the training configuration.
"""
def print_config(stream=None):
print('\n# =========================================================================== #', file=stream)
print('# Training | Evaluation flags:', file=stream)
print('# =========================================================================== #', file=stream)
pprint(flags, stream=stream)
print('\n# =========================================================================== #', file=stream)
print('# SSD net parameters:', file=stream)
print('# =========================================================================== #', file=stream)
pprint(dict(ssd_params._asdict()), stream=stream)
print('\n# =========================================================================== #', file=stream)
print('# Training | Evaluation dataset files:', file=stream)
print('# =========================================================================== #', file=stream)
data_files = parallel_reader.get_data_files(data_sources)
pprint(data_files, stream=stream)
print('', file=stream)
print_config(None)
# Save to a text file as well.
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
path = os.path.join(save_dir, 'training_config.txt')
with open(path, "w") as out:
print_config(out)
示例4: print_config
# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import get_data_files [as 别名]
def print_config(flags, dataset, save_dir = None, print_to_file = True):
def do_print(stream=None):
print(util.log.get_date_str(), file = stream)
print('\n# =========================================================================== #', file=stream)
print('# Training flags:', file=stream)
print('# =========================================================================== #', file=stream)
def print_ckpt(path):
ckpt = util.tf.get_latest_ckpt(path)
if ckpt is not None:
print('Resume Training from : %s'%(ckpt), file = stream)
return True
return False
if not print_ckpt(flags.train_dir):
print_ckpt(flags.checkpoint_path)
pprint(flags.__flags, stream=stream)
print('\n# =========================================================================== #', file=stream)
print('# pixel_link net parameters:', file=stream)
print('# =========================================================================== #', file=stream)
vars = globals()
for key in vars:
var = vars[key]
if util.dtype.is_number(var) or util.dtype.is_str(var) or util.dtype.is_list(var) or util.dtype.is_tuple(var):
pprint('%s=%s'%(key, str(var)), stream = stream)
print('\n# =========================================================================== #', file=stream)
print('# Training | Evaluation dataset files:', file=stream)
print('# =========================================================================== #', file=stream)
data_files = parallel_reader.get_data_files(dataset.data_sources)
pprint(sorted(data_files), stream=stream)
print('', file=stream)
do_print(None)
if print_to_file:
# Save to a text file as well.
if save_dir is None:
save_dir = flags.train_dir
util.io.mkdir(save_dir)
path = util.io.join_path(save_dir, 'training_config.txt')
with open(path, "a") as out:
do_print(out)