本文整理汇总了Python中tensorflow.python.ops.data_flow_ops.RandomShuffleQueue方法的典型用法代码示例。如果您正苦于以下问题:Python data_flow_ops.RandomShuffleQueue方法的具体用法?Python data_flow_ops.RandomShuffleQueue怎么用?Python data_flow_ops.RandomShuffleQueue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.data_flow_ops
的用法示例。
在下文中一共展示了data_flow_ops.RandomShuffleQueue方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: shuffle_join
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def shuffle_join(tensor_list_list, capacity,
min_ad, phase):
name = 'shuffel_input'
types = _dtypes(tensor_list_list)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_ad,
dtypes=types)
# Build enque Operations
_enqueue_join(queue, tensor_list_list)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad),
dtypes.float32) * (1. / (capacity - min_ad)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%s/fraction_over_%d_of_%d_full" %
(name + '_' + phase, min_ad, capacity - min_ad))
tf.summary.scalar(summary_name, full)
dequeued = queue.dequeue(name='shuffel_deqeue')
# dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return dequeued
示例2: _shuffle_batch
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
示例3: _shuffle_batch_join
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
示例4: _shuffle_batch
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_tensor_or_none(keep_input)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
示例5: _shuffle_batch_join
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_tensor_or_none(keep_input)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
示例6: testRandomShuffleQueue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def testRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
示例7: testReadUpToFromRandomShuffleQueue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def testReadUpToFromRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=55,
min_after_dequeue=28,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []])
self._verify_read_up_to_out(shared_queue)
示例8: _shuffle_batch
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
if capacity <= min_after_dequeue:
raise ValueError("capacity %d must be bigger than min_after_dequeue %d."
% (capacity, min_after_dequeue))
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.to_float(
math_ops.maximum(0, queue.size() - min_after_dequeue)) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:39,代码来源:input.py
示例9: _shuffle_batch_join
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.to_float(
math_ops.maximum(0, queue.size() - min_after_dequeue)) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:40,代码来源:input.py
示例10: __init__
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.TFRecordReader, common_queue)
common_queue = tf.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
示例11: __init__
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue [as 别名]
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue