本文整理汇总了Python中tensorflow.python.framework.dtypes.bool方法的典型用法代码示例。如果您正苦于以下问题:Python dtypes.bool方法的具体用法?Python dtypes.bool怎么用?Python dtypes.bool使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.dtypes
的用法示例。
在下文中一共展示了dtypes.bool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _parse_kwargs_as_attrs
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
示例2: var
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
devs_squared = math_ops.square(x - m)
return math_ops.reduce_mean(
devs_squared, reduction_indices=axis, keep_dims=keepdims)
示例3: mean
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keep_dims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
示例4: _streaming_auc
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def _streaming_auc(predictions, labels, weights=None, class_id=None,
curve="ROC"):
# pylint: disable=missing-docstring
predictions = math_ops.to_float(predictions)
if labels.dtype.base_dtype != dtypes.bool:
logging.warning("Casting %s labels to bool.", labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.streaming_auc(
predictions, labels, weights=weights, curve=curve)
示例5: __init__
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
self._inputs = inputs
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = shape_list(sequence_length)[0]
示例6: sample
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def sample(self, time, outputs, state, name=None):
"""Gets a sample for one step."""
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
示例7: summarize_tensor
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def summarize_tensor(tensor, tag=None):
"""Summarize a tensor using a suitable summary type.
This function adds a summary op for `tensor`. The type of summary depends on
the shape of `tensor`. For scalars, a `scalar_summary` is created, for all
other tensors, `histogram_summary` is used.
Args:
tensor: The tensor to summarize
tag: The tag to use, if None then use tensor's op's name.
Returns:
The summary op created or None for string tensors.
"""
# Skips string tensors and boolean tensors (not handled by the summaries).
if (tensor.dtype.is_compatible_with(dtypes.string) or
tensor.dtype.base_dtype == dtypes.bool):
return None
if tensor.get_shape().ndims == 0:
# For scalars, use a scalar summary.
return _add_scalar_summary(tensor, tag)
else:
# We may land in here if the rank is still unknown. The histogram won't
# hurt if this ends up being a scalar.
return _add_histogram_summary(tensor, tag)
示例8: false_negatives
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
labels = math_ops.cast(labels, dtype=dtypes.bool)
predictions = math_ops.cast(predictions, dtype=dtypes.bool)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
示例9: zeros
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
示例10: ones
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
try:
shape = tensor_shape.as_shape(shape)
output = constant(one, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
示例11: assert_type
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
raise TypeError(
'%s %s must be of type %s' % (message, tensor.op.name, tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
示例12: same_dynamic_shape
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
示例13: _FilterBool
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
示例14: clear_session
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION = None
phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase
示例15: any
# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import bool [as 别名]
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
axis = _normalize_axis(axis, ndim(x))
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)