本文整理汇总了Python中tensorflow.python.ops.check_ops.assert_rank_at_least函数的典型用法代码示例。如果您正苦于以下问题:Python assert_rank_at_least函数的具体用法?Python assert_rank_at_least怎么用?Python assert_rank_at_least使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_rank_at_least函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval()
示例2: _assert_valid_alpha
def _assert_valid_alpha(self, alpha, validate_args):
alpha = ops.convert_to_tensor(alpha, name="alpha")
if not validate_args:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
示例3: test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
示例4: test_rank_one_tensor_raises_if_rank_too_small_static_rank
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank at least 2"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
示例5: maybe_check_quadrature_param
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if param.shape[-1].value != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name, param.shape[-1].value))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
示例6: _check_alpha
def _check_alpha(self, alpha):
alpha = ops.convert_to_tensor(alpha, name='alpha')
if not self.strict:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
示例7: test_rank_zero_tensor_raises_if_rank_too_small_static_rank
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval()
示例8: test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
示例9: lbeta
def lbeta(x, name='lbeta'):
r"""Computes `ln(|Beta(x)|)`, reducing along the last dimension.
Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define
```Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)```
And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
`lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)`. In other words,
the last dimension is treated as the `z` vector.
Note that if `z = [u, v]`, then
`Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt`, which defines the traditional
bivariate beta function.
Args:
x: A rank `n + 1` `Tensor` with type `float`, or `double`.
name: A name for the operation (optional).
Returns:
The logarithm of `|Beta(x)|` reducing along the last dimension.
Raises:
ValueError: If `x` is empty with rank one or less.
"""
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x, name='x')
x = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(x, 1)], x)
is_empty = math_ops.equal(0, array_ops.size(x))
def nonempty_lbeta():
last_index = array_ops.size(array_ops.shape(x)) - 1
log_prod_gamma_x = math_ops.reduce_sum(
math_ops.lgamma(x),
reduction_indices=last_index)
sum_x = math_ops.reduce_sum(x, reduction_indices=last_index)
log_gamma_sum_x = math_ops.lgamma(sum_x)
result = log_prod_gamma_x - log_gamma_sum_x
result.set_shape(x.get_shape()[:-1])
return result
def empty_lbeta():
# If x is empty, return version with one less dimension.
# Can only do this if rank >= 2.
assertion = check_ops.assert_rank_at_least(x, 2)
with ops.control_dependencies([assertion]):
return array_ops.squeeze(x, squeeze_dims=[0])
static_size = x.get_shape().num_elements()
if static_size is not None:
if static_size > 0:
return nonempty_lbeta()
else:
return empty_lbeta()
else:
return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta)
示例10: _forward
def _forward(self, x):
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
示例11: __init__
def __init__(self,
alpha,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
alpha: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Dirichlet distributions.
validate_args: `Boolean`, default `False`. Whether to assert valid values
for parameters `alpha` and `x` in `prob` and `log_prob`. If `False`,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet distributions,
# also known as a Beta distribution.
dist = Dirichlet([1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = Dirichlet([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha]) as ns:
alpha = ops.convert_to_tensor(alpha, name="alpha")
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_rank_at_least(alpha, 1)
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._alpha_sum = math_ops.reduce_sum(alpha,
reduction_indices=[-1],
keep_dims=False)
super(Dirichlet, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._alpha_sum],
name=ns)
示例12: __init__
def __init__(self,
alpha,
validate_args=True,
allow_nan_stats=False,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
alpha: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Dirichlet distributions.
validate_args: Whether to assert valid values for parameters `alpha` and
`x` in `prob` and `log_prob`. If `False`, correct behavior is not
guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet distributions,
# also known as a Beta distribution.
dist = Dirichlet([1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = Dirichlet([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
with ops.op_scope([alpha], name):
alpha = ops.convert_to_tensor(alpha, name="alpha_before_deps")
with ops.control_dependencies([
check_ops.assert_positive(alpha), check_ops.assert_rank_at_least(
alpha, 1)
] if validate_args else []):
alpha = array_ops.identity(alpha, name="alpha")
self._alpha = alpha
self._name = name
# Used for mean/mode/variance/entropy computations
self._alpha_0 = math_ops.reduce_sum(alpha,
reduction_indices=[-1],
keep_dims=False)
self._get_batch_shape = self._alpha_0.get_shape()
self._get_event_shape = self._alpha.get_shape().with_rank_at_least(1)[-1:]
self._validate_args = validate_args
self._allow_nan_stats = allow_nan_stats
示例13: _prob
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
示例14: _maybe_assert_valid_concentration
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
示例15: _check_chol
def _check_chol(self, chol):
"""Verify that `chol` is proper."""
chol = ops.convert_to_tensor(chol, name='chol')
if not self.verify_pd:
return chol
shape = array_ops.shape(chol)
rank = array_ops.rank(chol)
is_matrix = check_ops.assert_rank_at_least(chol, 2)
is_square = check_ops.assert_equal(
array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))
deps = [is_matrix, is_square]
deps.append(check_ops.assert_positive(self._diag))
return control_flow_ops.with_dependencies(deps, chol)