本文整理匯總了Python中tensorflow.python.training.queue_runner.QueueRunner方法的典型用法代碼示例。如果您正苦於以下問題:Python queue_runner.QueueRunner方法的具體用法?Python queue_runner.QueueRunner怎麽用?Python queue_runner.QueueRunner使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.training.queue_runner
的用法示例。
在下文中一共展示了queue_runner.QueueRunner方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: prefetch_op
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
示例2: _add_remote_queue_runner
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _add_remote_queue_runner(self, queue, enq_ops):
"""Adds a remote queue runner to the graph.
These queue runners differ from the standard in two ways: First,
they never close their queue. Second, they are added to the
`Feeder.REMOTE_QUEUE_RUNNERS` collection, rather than
`ops.GraphKeys.QUEUE_RUNNERS`, so they can be started/stopped
seperately.
Args:
queue: The queue.
enq_ops: A list of ops which perform enqueues (each on its own thread).
"""
runner = queue_runner.QueueRunner(
queue,
enq_ops,
cancel_op=self._fake_op,
close_op=self._fake_op)
queue_runner.add_queue_runner(
runner, collection=Feeder.REMOTE_QUEUE_RUNNERS)
示例3: get_chief_queue_runner
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
示例4: range_input_producer
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.name_scope(name, "input_producer", [limit]) as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, "fraction_of_%d_full" % capacity, name)
示例5: _enqueue_join
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.get_shape().ndims == 1:
enqueue_ops = [enqueue_fn(_select_which_to_enqueue(x, keep_input))
for x in tensor_list_list]
else:
enqueue_ops = [_smart_cond(
keep_input,
lambda: enqueue_fn(tl), # pylint:disable=cell-var-from-loop
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
示例6: _enqueue
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
"""Enqueue `tensor_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.get_shape().ndims == 1:
enqueue_ops = [
enqueue_fn(_select_which_to_enqueue(tensor_list, keep_input))] * threads
else:
enqueue_ops = [_smart_cond(
keep_input,
lambda: enqueue_fn(tensor_list),
control_flow_ops.no_op)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
示例7: read
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueing in the `common_queue` is automatically added to
the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
return self._common_queue.dequeue(name=name)
示例8: set_fed_tensors
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def set_fed_tensors(self, tensors):
"""Sets fed tensors."""
enq_op = self._local_q.enqueue(tensors)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._local_q, [enq_op]))
示例9: set_many_fed_tensors
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def set_many_fed_tensors(self, tensors):
"""Sets batches fed tensors."""
enq_op = self._local_q.enqueue_many(tensors)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._local_q, [enq_op]))
示例10: next_batch
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
示例11: range_input_producer
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.name_scope(name, "input_producer", [limit]) as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, name, "fraction_of_%d_full" % capacity)
示例12: _enqueue_join
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input is None:
enqueue_ops = [enqueue_fn(tl) for tl in tensor_list_list]
else:
enqueue_ops = [control_flow_ops.cond(
keep_input,
lambda: enqueue_fn(tl),
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
示例13: _enqueue
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
"""Enqueue `tensor_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input is None:
enqueue_ops = [enqueue_fn(tensor_list)] * threads
else:
enqueue_ops = [control_flow_ops.cond(
keep_input,
lambda: enqueue_fn(tensor_list),
control_flow_ops.no_op)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
示例14: _configure_readers_by
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
示例15: _enqueue_join
# 需要導入模塊: from tensorflow.python.training import queue_runner [as 別名]
# 或者: from tensorflow.python.training.queue_runner import QueueRunner [as 別名]
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.shape.ndims == 1:
enqueue_ops = [enqueue_fn(_select_which_to_enqueue(x, keep_input))
for x in tensor_list_list]
else:
enqueue_ops = [_smart_cond(
keep_input,
lambda: enqueue_fn(tl), # pylint:disable=cell-var-from-loop
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:17,代碼來源:input.py