本文整理汇总了Python中tensorflow.python.framework.ops.convert_n_to_tensor方法的典型用法代码示例。如果您正苦于以下问题:Python ops.convert_n_to_tensor方法的具体用法?Python ops.convert_n_to_tensor怎么用?Python ops.convert_n_to_tensor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops.convert_n_to_tensor方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Assert
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
with tf.control_dependencies([assert_op]):
... code using x ...
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
示例2: testFloat
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
示例3: testInt
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
示例4: Assert
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
x = tf.with_dependencies([assert_op], x)
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
示例5: _merge_summary
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def _merge_summary(inputs, name=None):
r"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of at least 1 `Tensor` objects with type `string`.
Can be of any shape. Each must contain serialized `Summary` protocol
buffers.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`. Scalar. Serialized `Summary` protocol buffer.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'merge_summary' Op, not %r." % inputs)
_attr_N = len(inputs)
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"MergeSummary", inputs=inputs, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"))
else:
inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N)
_result = _execute.execute(b"MergeSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MergeSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:44,代码来源:gen_logging_ops.py
示例6: padded_batch_dataset
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def padded_batch_dataset(input_dataset, batch_size, padded_shapes, padding_values, output_shapes, name=None):
r"""Creates a dataset that batches and pads `batch_size` elements from the input.
Args:
input_dataset: A `Tensor` of type `variant`.
batch_size: A `Tensor` of type `int64`.
A scalar representing the number of elements to accumulate in a
batch.
padded_shapes: A list of at least 1 `Tensor` objects with type `int64`.
A list of int64 tensors representing the desired padded shapes
of the corresponding output components. These shapes may be partially
specified, using `-1` to indicate that a particular dimension should be
padded to the maximum size of all batch elements.
padding_values: A list of `Tensor` objects.
A list of scalars containing the padding value to use for
each of the outputs.
output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
if not isinstance(padded_shapes, (list, tuple)):
raise TypeError(
"Expected list for 'padded_shapes' argument to "
"'padded_batch_dataset' Op, not %r." % padded_shapes)
_attr_N = len(padded_shapes)
if not isinstance(output_shapes, (list, tuple)):
raise TypeError(
"Expected list for 'output_shapes' argument to "
"'padded_batch_dataset' Op, not %r." % output_shapes)
output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"PaddedBatchDataset", input_dataset=input_dataset,
batch_size=batch_size, padded_shapes=padded_shapes,
padding_values=padding_values, output_shapes=output_shapes, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Toutput_types", _op.get_attr("Toutput_types"), "output_shapes",
_op.get_attr("output_shapes"), "N", _op.get_attr("N"))
else:
_attr_Toutput_types, padding_values = _execute.convert_to_mixed_eager_tensors(padding_values, _ctx)
_attr_Toutput_types = [_t.as_datatype_enum for _t in _attr_Toutput_types]
input_dataset = _ops.convert_to_tensor(input_dataset, _dtypes.variant)
batch_size = _ops.convert_to_tensor(batch_size, _dtypes.int64)
padded_shapes = _ops.convert_n_to_tensor(padded_shapes, _dtypes.int64)
_inputs_flat = [input_dataset, batch_size] + list(padded_shapes) + list(padding_values)
_attrs = ("Toutput_types", _attr_Toutput_types, "output_shapes",
output_shapes, "N", _attr_N)
_result = _execute.execute(b"PaddedBatchDataset", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PaddedBatchDataset", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:59,代码来源:gen_dataset_ops.py
示例7: zip_dataset
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def zip_dataset(input_datasets, output_types, output_shapes, name=None):
r"""Creates a dataset that zips together `input_datasets`.
Args:
input_datasets: A list of at least 1 `Tensor` objects with type `variant`.
output_types: A list of `tf.DTypes` that has length `>= 1`.
output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
if not isinstance(input_datasets, (list, tuple)):
raise TypeError(
"Expected list for 'input_datasets' argument to "
"'zip_dataset' Op, not %r." % input_datasets)
_attr_N = len(input_datasets)
if not isinstance(output_types, (list, tuple)):
raise TypeError(
"Expected list for 'output_types' argument to "
"'zip_dataset' Op, not %r." % output_types)
output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
if not isinstance(output_shapes, (list, tuple)):
raise TypeError(
"Expected list for 'output_shapes' argument to "
"'zip_dataset' Op, not %r." % output_shapes)
output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"ZipDataset", input_datasets=input_datasets,
output_types=output_types, output_shapes=output_shapes, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("output_types", _op.get_attr("output_types"), "output_shapes",
_op.get_attr("output_shapes"), "N", _op.get_attr("N"))
else:
input_datasets = _ops.convert_n_to_tensor(input_datasets, _dtypes.variant)
_inputs_flat = list(input_datasets)
_attrs = ("output_types", output_types, "output_shapes", output_shapes,
"N", _attr_N)
_result = _execute.execute(b"ZipDataset", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ZipDataset", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:49,代码来源:gen_dataset_ops.py
示例8: string_join
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import convert_n_to_tensor [as 别名]
def string_join(inputs, separator="", name=None):
r"""Joins the strings in the given list of string tensors into one tensor;
with the given separator (default is an empty separator).
Args:
inputs: A list of at least 1 `Tensor` objects with type `string`.
A list of string tensors. The tensors must all have the same shape,
or be scalars. Scalars may be mixed in; these will be broadcast to the shape
of non-scalar inputs.
separator: An optional `string`. Defaults to `""`.
string, an optional join separator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'string_join' Op, not %r." % inputs)
_attr_N = len(inputs)
if separator is None:
separator = ""
separator = _execute.make_str(separator, "separator")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"StringJoin", inputs=inputs, separator=separator, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "separator", _op.get_attr("separator"))
else:
inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N, "separator", separator)
_result = _execute.execute(b"StringJoin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"StringJoin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:44,代码来源:gen_string_ops.py