本文整理匯總了Python中tensorflow.python.framework.dtypes.resource方法的典型用法代碼示例。如果您正苦於以下問題:Python dtypes.resource方法的具體用法?Python dtypes.resource怎麽用?Python dtypes.resource使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.dtypes
的用法示例。
在下文中一共展示了dtypes.resource方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: num_records_produced
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
This is the same as the number of Read executions that have
succeeded.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops._reader_num_records_produced_v2(self._reader_ref,
name=name)
else:
return gen_io_ops._reader_num_records_produced(self._reader_ref,
name=name)
示例2: serialize_state
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def serialize_state(self, name=None):
"""Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops._reader_serialize_state_v2(self._reader_ref, name=name)
else:
return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
示例3: restore_state
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def restore_state(self, state, name=None):
"""Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an
Unimplemented error.
Args:
state: A string Tensor.
Result of a SerializeState of a Reader with matching type.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops._reader_restore_state_v2(
self._reader_ref, state, name=name)
else:
return gen_io_ops._reader_restore_state(
self._reader_ref, state, name=name)
示例4: _resource_apply_sparse_duplicate_indices
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
示例5: _resource_apply_sparse
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
示例6: __init__
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def __init__(self, iterator_resource, initializer, output_types,
output_shapes):
"""Creates a new iterator from the given iterator resource.
NOTE(mrry): Most users will not call this initializer directly, and will
instead use `Iterator.from_dataset()` or `Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this iterator.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
self._output_types = output_types
self._output_shapes = output_shapes
示例7: _graph_def_from_concrete_fn
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def _graph_def_from_concrete_fn(cfs):
if len(cfs) != 1:
raise NotImplementedError("Only a single concrete function is supported.")
frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False)
graph_def = frozen_fn.graph.as_graph_def(add_shapes=True)
# run a Grappler's constant folding pass.
fn_inputs = [t for t in frozen_fn.inputs if t.dtype != _dtypes.resource]
graph_def = _run_graph_optimizations(
graph_def,
fn_inputs,
frozen_fn.outputs,
config=_get_grappler_config(["constfold", "dependency"]),
graph=frozen_fn.graph,
)
return graph_def
示例8: is_closed
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def is_closed(self, name=None):
""" Returns true if queue is closed.
This operation returns true if the queue is closed and false if the queue
is open.
Args:
name: A name for the operation (optional).
Returns:
True if the queue is closed and false if the queue is open.
"""
if name is None:
name = "%s_Is_Closed" % self._name
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref,name=name)
else:
return gen_data_flow_ops.queue_is_closed_(self._queue_ref,name=name)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:20,代碼來源:data_flow_ops.py
示例9: restore_iterator
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def restore_iterator(iterator, path, name=None):
r"""Restores the state of the `iterator` from the checkpoint saved at `path` using "SaveIterator".
Args:
iterator: A `Tensor` of type `resource`.
path: A `Tensor` of type `string`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"RestoreIterator", iterator=iterator, path=path, name=name)
return _op
else:
iterator = _ops.convert_to_tensor(iterator, _dtypes.resource)
path = _ops.convert_to_tensor(path, _dtypes.string)
_inputs_flat = [iterator, path]
_attrs = None
_result = _execute.execute(b"RestoreIterator", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
return _result
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:26,代碼來源:gen_dataset_ops.py
示例10: _reader_reset_v2
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def _reader_reset_v2(reader_handle, name=None):
r"""Restore a Reader to its initial clean state.
Args:
reader_handle: A `Tensor` of type `resource`. Handle to a Reader.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"ReaderResetV2", reader_handle=reader_handle, name=name)
return _op
else:
reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource)
_inputs_flat = [reader_handle]
_attrs = None
_result = _execute.execute(b"ReaderResetV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
return _result
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:24,代碼來源:gen_io_ops.py
示例11: enqueue
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def enqueue(self, vals, name=None):
"""Enqueues one element to this queue.
If the queue is full when this operation executes, it will block
until the element has been enqueued.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
@{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary containing
the values to enqueue.
name: A name for the operation (optional).
Returns:
The operation that enqueues a new tuple of tensors to the queue.
"""
with ops.name_scope(name, "%s_enqueue" % self._name,
self._scope_vals(vals)) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_enqueue_v2(
self._queue_ref, vals, name=scope)
else:
return gen_data_flow_ops._queue_enqueue(
self._queue_ref, vals, name=scope)
示例12: dequeue
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def dequeue(self, name=None):
"""Dequeues one element from this queue.
If the queue is empty when this operation executes, it will block
until there is an element to dequeue.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed, the queue is empty, and there are no pending
enqueue operations that can fulfill this request,
`tf.errors.OutOfRangeError` will be raised. If the session is
@{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
name: A name for the operation (optional).
Returns:
The tuple of tensors that was dequeued.
"""
if name is None:
name = "%s_Dequeue" % self._name
if self._queue_ref.dtype == _dtypes.resource:
ret = gen_data_flow_ops._queue_dequeue_v2(
self._queue_ref, self._dtypes, name=name)
else:
ret = gen_data_flow_ops._queue_dequeue(
self._queue_ref, self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
op = ret[0].op
for output, shape in zip(op.values(), self._shapes):
output.set_shape(shape)
return self._dequeue_return_value(ret)
示例13: close
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes this queue.
This operation signals that no more elements will be enqueued in
the given queue. Subsequent `enqueue` and `enqueue_many`
operations will fail. Subsequent `dequeue` and `dequeue_many`
operations will continue to succeed if sufficient elements remain
in the queue. Subsequent `dequeue` and `dequeue_many` operations
that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests will also
be cancelled.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False` (described above).
name: A name for the operation (optional).
Returns:
The operation that closes the queue.
"""
if name is None:
name = "%s_Close" % self._name
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_close_v2(
self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
else:
return gen_data_flow_ops._queue_close(
self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
示例14: size
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def size(self, name=None):
"""Compute the number of elements in this queue.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this queue.
"""
if name is None:
name = "%s_Size" % self._name
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_size_v2(self._queue_ref, name=name)
else:
return gen_data_flow_ops._queue_size(self._queue_ref, name=name)
示例15: read
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import resource [as 別名]
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has
finished with the previous file).
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (key, value).
key: A string scalar Tensor.
value: A string scalar Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops._reader_read_v2(self._reader_ref, queue_ref, name=name)
else:
# For compatibility with pre-resource queues, create a ref(string) tensor
# which can be looked up as the same queue by a resource manager.
old_queue_op = gen_data_flow_ops._fake_queue(queue_ref)
return gen_io_ops._reader_read(self._reader_ref, old_queue_op, name=name)