本文整理汇总了Python中tensorflow.python.training.queue_runner.add_queue_runner函数的典型用法代码示例。如果您正苦于以下问题:Python add_queue_runner函数的具体用法?Python add_queue_runner怎么用?Python add_queue_runner使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add_queue_runner函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _configure_readers_by
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
示例2: _get_stratified_batch_from_tensors
def _get_stratified_batch_from_tensors(val, label, reject_probs, batch_size,
queue_threads=3):
"""Reject examples one-at-a-time based on class."""
# Make rejection probabilities into a tensor so they can be dynamically
# accessed by tensors.
reject_probs = constant_op.constant(
reject_probs, dtype=dtypes.float32, name='rejection_probabilities')
# Make queue that will have proper class proportions. Contains exactly one
# batch at a time.
val_shape = val.get_shape()
label_shape = label.get_shape()
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=[val_shape, label_shape],
dtypes=[val.dtype, label.dtype],
name='batched_queue')
# Conditionally enqueue.
eq_tf = array_ops.reshape(math_ops.greater(
random_ops.random_uniform([1]),
array_ops.slice(reject_probs, [label], [1])),
[])
conditional_enqueue = control_flow_ops.cond(
eq_tf,
lambda: final_q.enqueue([val, label]),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * queue_threads))
return final_q.dequeue_many(batch_size)
示例3: _apply_transform
def _apply_transform(self, transform_input):
filename_queue = input_ops.string_input_producer(self._work_units,
shuffle=self.shuffle,
seed=self._seed)
if self.shuffle:
queue = data_flow_ops.RandomShuffleQueue(
capacity=self.queue_capacity,
min_after_dequeue=self.min_after_dequeue,
dtypes=[dtypes.string, dtypes.string],
shapes=[[], []],
seed=self.seed)
else:
queue = data_flow_ops.FIFOQueue(capacity=self.queue_capacity,
dtypes=[dtypes.string, dtypes.string],
shapes=[[], []])
enqueue_ops = []
for _ in range(self.num_threads):
reader = self._reader_cls(**self._reader_kwargs)
enqueue_ops.append(queue.enqueue(reader.read(filename_queue)))
runner = queue_runner.QueueRunner(queue, enqueue_ops)
queue_runner.add_queue_runner(runner)
dequeued = queue.dequeue_many(self.batch_size)
# pylint: disable=not-callable
return self.return_type(*dequeued)
示例4: _get_stratified_batch_from_tensors
def _get_stratified_batch_from_tensors(val_list, label, accept_probs,
batch_size, queue_threads=3):
"""Accepts examples one-at-a-time based on class."""
# Make queue that will have proper class proportions. Contains exactly one
# batch at a time.
vals_shapes = [val.get_shape() for val in val_list]
vals_dtypes = [val.dtype for val in val_list]
label_shape = label.get_shape()
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=vals_shapes + [label_shape],
dtypes=vals_dtypes + [label.dtype],
name='batched_queue')
# Conditionally enqueue.
tensors_to_enqueue = val_list + [label]
eq_tf = array_ops.reshape(math_ops.less(
random_ops.random_uniform([1]),
array_ops.slice(accept_probs, [label], [1])),
[])
conditional_enqueue = control_flow_ops.cond(
eq_tf,
lambda: final_q.enqueue(tensors_to_enqueue),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * queue_threads))
return final_q.dequeue_many(batch_size)
示例5: _make_per_class_queues
def _make_per_class_queues(data, labels, num_classes, queue_capacity,
threads_per_queue):
"""Creates per-class-queues based on data and labels."""
# Create one queue per class.
queues = []
per_data_shape = data.get_shape().with_rank_at_least(1)[1:]
per_data_shape.assert_is_fully_defined()
for i in range(num_classes):
q = data_flow_ops.FIFOQueue(capacity=queue_capacity,
shapes=per_data_shape, dtypes=[data.dtype],
name='stratified_sample_class%d_queue' % i)
logging_ops.scalar_summary('queue/stratified_sample_class%d' % i, q.size())
queues.append(q)
# Partition tensors according to labels.
partitions = data_flow_ops.dynamic_partition(data, labels, num_classes)
# Enqueue each tensor on the per-class-queue.
for i in range(num_classes):
enqueue_op = queues[i].enqueue_many(partitions[i]),
queue_runner.add_queue_runner(queue_runner.QueueRunner(
queues[i], [enqueue_op] * threads_per_queue))
return queues
示例6: prefetch_queue
def prefetch_queue(tensors,
capacity=8,
num_threads=1,
dynamic_pad=False,
shared_name=None,
name=None):
"""Creates a queue to prefetch tensors from `tensors`.
A queue runner for enqueuing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.train.batch()` and enqueue the pre-assembled batches. Ops that dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
num_threads: An integer. Number of threads running the enqueue op.
dynamic_pad: Boolean. Whether to allow variable dimensions in input shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
# We sort the keys so the order is consistent across runs.
names = list(sorted(tensors.keys()))
tensor_list = list([tensors[n] for n in names])
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = _which_queue(dynamic_pad)(
capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
return queue
示例7: _make_per_class_queues
def _make_per_class_queues(tensor_list, labels, num_classes, queue_capacity, threads_per_queue):
"""Creates per-class-queues based on data and labels."""
# Create one queue per class.
queues = []
data_shapes = []
data_dtypes = []
for data_tensor in tensor_list:
per_data_shape = data_tensor.get_shape().with_rank_at_least(1)[1:]
per_data_shape.assert_is_fully_defined()
data_shapes.append(per_data_shape)
data_dtypes.append(data_tensor.dtype)
for i in range(num_classes):
q = data_flow_ops.FIFOQueue(
capacity=queue_capacity, shapes=data_shapes, dtypes=data_dtypes, name="stratified_sample_class%d_queue" % i
)
logging_ops.scalar_summary("queue/%s/stratified_sample_class%d" % (q.name, i), q.size())
queues.append(q)
# Partition tensors according to labels. `partitions` is a list of lists, of
# size num_classes X len(tensor_list). The number of tensors in partition `i`
# should be the same for all tensors.
all_partitions = [data_flow_ops.dynamic_partition(data, labels, num_classes) for data in tensor_list]
partitions = [[cur_partition[i] for cur_partition in all_partitions] for i in range(num_classes)]
# Enqueue each tensor on the per-class-queue.
for i in range(num_classes):
enqueue_op = (queues[i].enqueue_many(partitions[i]),)
queue_runner.add_queue_runner(queue_runner.QueueRunner(queues[i], [enqueue_op] * threads_per_queue))
return queues
示例8: create_input_queues
def create_input_queues(image, label, capacity=100):
"""Creates Queues a FIFO Queue out of Input tensor objects.
This function is no longer used in the input pipeline.
However it took me a while to understand queuing and it might be useful
fot someone at some point.
Args:
image: an image tensor object, generated by queues.
label: an label tensor object, generated by queues.
Returns: Two FiFO Queues
"""
#create input queues
im_queue = tf.FIFOQueue(capacity, dtypes.uint8)
enqueue_op = im_queue.enqueue(image)
queue_runner.add_queue_runner(queue_runner.QueueRunner(im_queue,
[enqueue_op]))
label_queue = tf.FIFOQueue(capacity, dtypes.uint8)
enqueue_op = label_queue.enqueue(label)
queue_runner.add_queue_runner(queue_runner.QueueRunner(label_queue,
[enqueue_op]))
return im_queue, label_queue
示例9: _fn
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
示例10: read
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueing in the `common_queue` is automatically added to
the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(queue_runner.QueueRunner(self._common_queue, enqueue_ops))
return self._common_queue.dequeue(name=name)
示例11: input_producer
def input_producer(input_tensor, element_shape=None, num_epochs=None,
shuffle=True, seed=None, capacity=32, shared_name=None,
summary_name=None, name=None):
"""Output the rows of `input_tensor` to a queue for an input pipeline.
Args:
input_tensor: A tensor with the rows to produce. Must be at least
one-dimensional. Must either have a fully-defined shape, or
`element_shape` must be defined.
element_shape: (Optional.) A `TensorShape` representing the shape of a
row of `input_tensor`, if it cannot be inferred.
num_epochs: (Optional.) An integer. If specified `input_producer` produces
each row of `input_tensor` `num_epochs` times before generating an
`OutOfRange` error. If not specified, `input_producer` can cycle through
the rows of `input_tensor` an unlimited number of times.
shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
within each epoch.
seed: (Optional.) An integer. The seed to use if `shuffle` is true.
capacity: (Optional.) The capacity of the queue to be used for buffering
the input.
shared_name: (Optional.) If set, this queue will be shared under the given
name across multiple sessions.
summary_name: (Optional.) If set, a scalar summary for the current queue
size will be generated, using this name as part of the tag.
name: (Optional.) A name for queue.
Returns:
A queue with the output rows. A `QueueRunner` for the queue is
added to the current `QUEUE_RUNNER` collection of the current
graph.
Raises:
ValueError: If the shape of the input cannot be inferred from the arguments.
"""
with ops.name_scope(name, "input_producer", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
if not element_shape.is_fully_defined():
raise ValueError("Either `input_tensor` must have a fully defined shape "
"or `element_shape` must be specified")
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[input_tensor.dtype.base_dtype],
shapes=[element_shape],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
if summary_name is not None:
logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
示例12: _input_producer
def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity, name, summary_name):
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]], name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
summary_ops.scalar_summary(
"queue/%s/%s" % (q.name, summary_name), math_ops.cast(q.size(), dtypes.float32) * (1.0 / capacity)
)
return q
示例13: prefetch_queue
def prefetch_queue(tensors,
capacity=8,
shared_name=None,
name=None):
"""Creates a queue to prefetech tensors from `tensors`.
A queue runner for enqueing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.train.batch()` and enqueue the pre-assembled batches. Ops that dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
names = list(tensors.keys())
tensor_list = list(tensors.values())
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors, name=name)
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, [enqueue_op]))
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.to_float(queue.size()) * (1. / capacity))
return queue
示例14: _queue_parsed_features
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
示例15: _conditional_batch
def _conditional_batch(tensors, accept_prob, batch_size, queue_threads=10):
"""Conditionally enqueue tensors based on accept_prob.
Specifically, enqueue the element if accept_prob > rand_unif([0, 1]).
Args:
tensors: List of tensors to enqueue.
accept_prob: Acceptance probability per example.
batch_size: Size of batch.
queue_threads: Number of threads enqueuing in the final queue.
Returns:
List of batched tensors.
Raises:
ValueError: `accept_prob` isn't 0D.
"""
accept_prob.get_shape().assert_has_rank(0)
# Determine shapes and types of to-be-enqueued-tensors.
shapes_list = []
dtypes_list = []
for tensor in tensors:
cur_shape = tensor.get_shape()
cur_shape.assert_is_fully_defined()
shapes_list.append(cur_shape)
dtypes_list.append(tensor.dtype)
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=shapes_list,
dtypes=dtypes_list,
name='batched_queue')
logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size())
# Conditionally enqueue.
# Reshape enqueue op to match no_op's shape.
eq_tf = math_ops.less(random_ops.random_uniform([]), accept_prob)
conditional_enqueue = control_flow_ops.cond(
eq_tf,
lambda: final_q.enqueue(tensors),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * queue_threads))
out_tensor = final_q.dequeue_many(batch_size)
# Queues return a single tensor if the list of enqued tensors is one. Since we
# want the type to be the same in all cases, always return a list.
if isinstance(out_tensor, ops.Tensor):
out_tensor = [out_tensor]
return out_tensor