本文整理匯總了Python中tensorflow.contrib.slim.python.slim.data.parallel_reader.parallel_read方法的典型用法代碼示例。如果您正苦於以下問題:Python parallel_reader.parallel_read方法的具體用法?Python parallel_reader.parallel_read怎麽用?Python parallel_reader.parallel_read使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.slim.python.slim.data.parallel_reader
的用法示例。
在下文中一共展示了parallel_reader.parallel_read方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testTFRecordReader
# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def testTFRecordReader(self):
with self.test_session():
self._tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
key, value = parallel_reader.parallel_read(
self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
flowers = 0
num_reads = 100
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
示例2: __init__
# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self,
dataset,
num_readers=1,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
seed=None):
"""Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
seed: The seed to use if shuffling.
"""
_, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
super(DatasetDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset.num_samples)
示例3: __init__
# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self, dataset, num_readers=1, shuffle=True, num_epochs=None,
common_queue_capacity=256, common_queue_min=128, seed=None):
"""Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
seed: The seed to use if shuffling.
"""
_, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
super(DatasetDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset.num_samples)
示例4: __init__
# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
if record_key in items:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items.append(record_key)
tensors.append(key)
super(DatasetDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset.num_samples)