本文整理汇总了Python中tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices方法的典型用法代码示例。如果您正苦于以下问题:Python ops.convert_to_tensor_or_indexed_slices方法的具体用法?Python ops.convert_to_tensor_or_indexed_slices怎么用?Python ops.convert_to_tensor_or_indexed_slices使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops.convert_to_tensor_or_indexed_slices方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _NextIteration
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _NextIteration(data, name=None):
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
示例2: _AsTensorList
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
示例3: with_dependencies
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also @{tf.tuple$tuple} and @{tf.group$group}.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
示例4: with_dependencies
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
示例5: _Identity
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return gen_array_ops._ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
示例6: exit
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.shape, name)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
示例7: with_dependencies
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: A list of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
dependencies + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
示例8: with_dependencies
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also @{tf.tuple$tuple} and @{tf.group$group}.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if context.in_eager_mode():
return output_tensor
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:38,代码来源:control_flow_ops.py
示例9: _SwitchRefOrTensor
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
示例10: _SwitchRefOrTensor
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_false)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
示例11: _Enter
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, use_input_shape=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype and use_ref:
result = ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
result = enter(data, frame_name, is_constant, parallel_iterations,
name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape, name=name)
indices = enter(data.indices, frame_name, is_constant,
parallel_iterations, name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = enter(dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = enter(data.shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.shape.get_shape())
return sparse_tensor.SparseTensor(indices, values, dense_shape)
示例12: switch
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.convert_to_tensor_or_indexed_slices(data, dtype=dtype,
name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.shape, pred, name="dense_shape")
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
示例13: merge
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [ops.convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype.is_ref_dtype for v in inputs]):
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.shape for inp in inputs], name="dense_shape")
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
示例14: _SwitchRefOrTensor
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices [as 别名]
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:44,代码来源:control_flow_ops.py