本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_all函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_all函数的具体用法?Python reduce_all怎么用?Python reduce_all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_all函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _loss_fn
def _loss_fn(labels, logits):
check_labels = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(labels, labels_input)),
data=[labels])
check_logits = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(logits, logits_input)),
data=[logits])
with ops.control_dependencies([check_labels, check_logits]):
return constant_op.constant(loss)
示例2: _compute_energy_change
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the energy change."""
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
# Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
# since they're a mouthful and lets us inline more.
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # log_potential
lp1 = -proposed_target_log_prob # proposed_log_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
示例3: testUniformSamplePdf
def testUniformSamplePdf(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
示例4: next_inputs
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
示例5: same_dynamic_shape
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`Boolean` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
lambda: math_ops.reduce_all(math_ops.equal(
array_ops.concat_v2((
array_ops.shape(a),
array_ops.shape(b)), 0),
array_ops.concat_v2((
array_ops.shape(b),
array_ops.shape(a)), 0))),
lambda: constant_op.constant(False))
示例6: _call_loss_fn
def _call_loss_fn(loss_fn, labels, logits, features):
"""Calls loss_fn and checks the returned shape.
Args:
loss_fn: The loss function.
labels: Processed labels Tensor.
logits: Logits Tensor of shape [batch_size, logits_dimension].
features: Features dict.
Returns:
Loss Tensor with shape [batch_size, 1].
"""
loss_fn_args = util.fn_args(loss_fn)
kwargs = {}
if 'features' in loss_fn_args:
kwargs['features'] = features
unweighted_loss = loss_fn(labels=labels, logits=logits, **kwargs)
batch_size = array_ops.shape(logits)[0]
loss_shape = array_ops.shape(unweighted_loss)
check_shape_op = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(loss_shape, [batch_size, 1])),
data=[
'loss_fn must return Tensor of shape [batch_size, 1]. Given: ',
loss_shape])
with ops.control_dependencies([check_shape_op]):
return array_ops.identity(unweighted_loss)
示例7: _assert_has_shape
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape],
)
示例8: assert_close
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
"""Assert that that x and y are within machine epsilon of each other.
Args:
x: Numeric `Tensor`
y: Numeric `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
"""
message = message or ""
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
if x.dtype.is_integer:
return check_ops.assert_equal(
x, y, data=data, summarize=summarize, message=message, name=name)
with ops.name_scope(name, "assert_close", [x, y, data]):
tol = np.finfo(x.dtype.as_numpy_dtype).resolution
if data is None:
data = [
message,
"Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
y.name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
return control_flow_ops.Assert(
condition, data, summarize=summarize)
示例9: is_strictly_increasing
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.op_scope([x], name, "is_strictly_increasing"):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
示例10: testUniformSamplePdf
def testUniformSamplePdf(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
math_ops.reduce_all(uniform.pdf(uniform.sample(10)) > 0).eval())
示例11: testNonSequenceNestedStructure
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([2, 3], dataset.output_shapes)
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
self.assertEquals(dtypes.int64, get_next.dtype)
self.assertEquals([3], get_next.shape)
示例12: testNonSequenceNestedStructure
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([2, 3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int64, get_next().dtype)
self.assertEqual([3], get_next().shape)
示例13: random_crop
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.op_scope([value, size], name, "random_crop") as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = logging_ops.Assert(math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(array_ops.shape(shape), dtype=size.dtype,
maxval=size.dtype.max, seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
示例14: _assert_batch_positive_definite
def _assert_batch_positive_definite(sigma_chol):
"""Add assertions checking that the sigmas are all Positive Definite.
Given `sigma_chol == cholesky(sigma)`, it is sufficient to check that
`all(diag(sigma_chol) > 0)`. This is because to check that a matrix is PD,
it is sufficient that its cholesky factorization is PD, and to check that a
triangular matrix is PD, it is sufficient to check that its diagonal
entries are positive.
Args:
sigma_chol: N-D. The lower triangular cholesky decomposition of `sigma`.
Returns:
An assertion op to use with `control_dependencies`, verifying that
`sigma_chol` is positive definite.
"""
sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
return logging_ops.Assert(
math_ops.reduce_all(sigma_batch_diag > 0),
[
"sigma_chol is not positive definite. batched diagonals: ",
sigma_batch_diag,
" shaped: ",
array_ops.shape(sigma_batch_diag),
],
)
示例15: assert_less_equal
def assert_less_equal(x, y, data=None, summarize=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
with ops.op_scope([x, y, data], name, 'assert_less_equal'):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
'Condition x <= y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return logging_ops.Assert(condition, data, summarize=summarize)