本文整理汇总了Python中tensorflow.python.ops.check_ops.assert_greater_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_greater_equal函数的具体用法?Python assert_greater_equal怎么用?Python assert_greater_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_greater_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_doesnt_raise_when_both_empty
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
示例2: test_doesnt_raise_when_equal
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
out.eval()
示例3: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims_ = tensor_util.constant_value(event_ndims)
assertions = []
if not event_ndims.dtype.is_integer:
raise ValueError("Expected integer dtype, got dtype {}".format(
event_ndims.dtype))
if event_ndims_ is not None:
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar event_ndims, got shape {}".format(
event_ndims.shape))
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
if event_ndims.shape.is_fully_defined():
if event_ndims.shape.ndims != 0:
raise ValueError("Expected scalar shape, got ndims {}".format(
event_ndims.shape.ndims))
elif self.validate_args:
assertions += [
check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")]
return assertions
示例4: test_doesnt_raise_when_greater_equal_and_broadcastable_shapes
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
示例5: _single_batch_sampler
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
示例6: test_raises_when_less_equal_but_non_broadcastable_shapes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
out.eval()
示例7: test_raises_when_less
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
示例8: _validate_aux_loss_weight
def _validate_aux_loss_weight(aux_loss_weight, name='aux_loss_weight'):
if isinstance(aux_loss_weight, ops.Tensor):
aux_loss_weight.shape.assert_is_compatible_with([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(aux_loss_weight, 0.0)]):
aux_loss_weight = array_ops.identity(aux_loss_weight)
elif aux_loss_weight is not None and aux_loss_weight < 0:
raise ValueError('`%s` must be greater than 0. Instead, was %s' %
(name, aux_loss_weight))
return aux_loss_weight
示例9: check
def check(t):
samples_batch_shape = array_ops.shape(samples)[1:]
broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
samples_batch_shape, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = array_ops.size(samples_batch_shape)
ge = check_ops.assert_greater_equal(
samples_batch_ndims, array_ops.rank(t))
eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
示例10: test_raises_when_less_equal_but_non_broadcastable_shapes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
示例11: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
assert_static(min_event_ndims)
event_ndims_ = get_static_value(event_ndims, np.int32)
assertions = []
if event_ndims_ is not None:
if min_event_ndims > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims))
elif self.validate_args:
assertions += [
check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
return assertions
示例12: _check_valid_event_ndims
def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
"""Check whether event_ndims is atleast min_event_ndims."""
min_event_ndims_ = (min_event_ndims if isinstance(min_event_ndims, int)
else tensor_util.constant_value(min_event_ndims))
event_ndims_ = (event_ndims if isinstance(event_ndims, int)
else tensor_util.constant_value(event_ndims))
if min_event_ndims_ is not None and event_ndims_ is not None:
if min_event_ndims_ > event_ndims_:
raise ValueError("event_ndims ({}) must be larger than "
"min_event_ndims ({})".format(
event_ndims_, min_event_ndims_))
return []
if self.validate_args:
return [check_ops.assert_greater_equal(event_ndims, min_event_ndims)]
return []
示例13: _minimum_mean
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point tensor of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point tensor of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point tensor of lower bounds on the distributions'
supports.
name: A name for this operation (optional).
Returns:
bound: Floating-point tensor of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[-1])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
示例14: _maybe_check_valid_shape
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
示例15: batch_gather_with_default
def batch_gather_with_default(params,
indices,
default_value='',
name=None):
"""Same as `batch_gather` but inserts `default_value` for invalid indices.
This operation is similar to `batch_gather` except that it will substitute
the value for invalid indices with `default_value` as the contents.
See `batch_gather` for more details.
Args:
params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
`M>0`).
indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
default_value: A value to be inserted in places where `indices` are out of
bounds. Must be the same dtype as params and either a scalar or rank 1.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example:
```python
>>> params = tf.ragged.constant([
['a', 'b', 'c'],
['d'],
[],
['e']])
>>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]])
>>> batch_gather_with_default(params, indices, 'FOO')
[['b', 'c', 'FOO'], [], [], ['e', 'FOO']]
```
"""
with ops.name_scope(name, 'RaggedBatchGatherWithDefault'):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params',
)
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices',
)
default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor(
default_value, name='default_value',
)
# TODO(hterry): lift this restriction and support default_values of
# of rank > 1
if (default_value.shape.ndims is not 0
and default_value.shape.ndims is not 1):
raise ValueError('"default_value" must be a scalar or vector')
upper_bounds = None
if indices.shape.ndims is None:
raise ValueError('Indices must have a known rank.')
if params.shape.ndims is None:
raise ValueError('Params must have a known rank.')
num_batch_dimensions = indices.shape.ndims - 1
pad = None
# The logic for this works as follows:
# - create a padded params, where:
# padded_params[b1...bn, 0] = default_value
# padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0)
# - create an `upper_bounds` Tensor that contains the number of elements
# in each innermost rank. Broadcast `upper_bounds` to be the same shape
# as `indices`.
# - check to see which index in `indices` are out of bounds and substitute
# it with the index containing `default_value` (the first).
# - call batch_gather with the indices adjusted.
with ops.control_dependencies([
check_ops.assert_greater_equal(array_ops.rank(params),
array_ops.rank(indices))]):
if ragged_tensor.is_ragged(params):
row_lengths = ragged_array_ops.expand_dims(
params.row_lengths(axis=num_batch_dimensions),
axis=-1)
upper_bounds = math_ops.cast(row_lengths, indices.dtype)
pad_shape = _get_pad_shape(params, indices)
pad = ragged_tensor_shape.broadcast_to(
default_value, pad_shape)
else:
params_shape = array_ops.shape(params)
pad_shape = array_ops.concat([
params_shape[:num_batch_dimensions],
[1],
params_shape[num_batch_dimensions + 1:params.shape.ndims]
], 0)
upper_bounds = params_shape[num_batch_dimensions]
pad = array_ops.broadcast_to(default_value, pad_shape)
# Add `default_value` as the first value in the innermost (ragged) rank.
pad = math_ops.cast(pad, params.dtype)
padded_params = array_ops.concat(
[pad, params], axis=num_batch_dimensions)
# Adjust the indices by substituting out-of-bound indices to the
# default-value index (which is the first element)
shifted_indices = indices + 1
is_out_of_bounds = (indices < 0) | (indices > upper_bounds)
#.........这里部分代码省略.........