本文整理汇总了Python中tensorflow.python.ops.array_ops.broadcast_static_shape函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_static_shape函数的具体用法?Python broadcast_static_shape怎么用?Python broadcast_static_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了broadcast_static_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _reduce_jacobian_det_over_event
def _reduce_jacobian_det_over_event(
self, y, ildj, min_event_ndims, event_ndims):
"""Reduce jacobian over event_ndims - min_event_ndims."""
if not self.is_constant_jacobian:
return math_ops.reduce_sum(
ildj,
self._get_event_reduce_dims(min_event_ndims, event_ndims))
# In this case, we need to tile the jacobian over the event and reduce.
y_rank = array_ops.rank(y)
y_shape = array_ops.shape(y)[
y_rank - event_ndims : y_rank - min_event_ndims]
ones = array_ops.ones(y_shape, ildj.dtype)
reduced_ildj = math_ops.reduce_sum(
ones * ildj,
axis=self._get_event_reduce_dims(min_event_ndims, event_ndims))
# The multiplication by ones can change the inferred static shape so we try
# to recover as much as possible.
if (isinstance(event_ndims, int) and
y.get_shape().ndims and ildj.get_shape().ndims):
y_shape = y.get_shape()
y_shape = y_shape[y_shape.ndims - event_ndims :
y_shape.ndims - min_event_ndims]
ildj_shape = ildj.get_shape()
broadcast_shape = array_ops.broadcast_static_shape(
ildj_shape, y_shape)
reduced_ildj.set_shape(
broadcast_shape[: broadcast_shape.ndims - (
event_ndims - min_event_ndims)])
return reduced_ildj
示例2: _possibly_broadcast_batch_shape
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.get_shape():
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_dynamic(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
示例3: benchmarkBatchMatMulBroadcast
def benchmarkBatchMatMulBroadcast(self):
for (a_shape, b_shape) in self.shape_pairs:
with compat.forward_compatibility_horizon(2019, 4, 26):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_a = variables.Variable(
GetRandomNormalInput(a_shape, np.float32))
matrix_b = variables.Variable(
GetRandomNormalInput(b_shape, np.float32))
variables.global_variables_initializer().run()
# Use batch matmul op's internal broadcasting.
self.run_op_benchmark(
sess,
math_ops.matmul(matrix_a, matrix_b),
min_iters=50,
name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))
# Manually broadcast the input matrices using the broadcast_to op.
broadcasted_batch_shape = array_ops.broadcast_static_shape(
matrix_a.shape[:-2], matrix_b.shape[:-2])
broadcasted_a_shape = broadcasted_batch_shape.concatenate(
matrix_a.shape[-2:])
broadcasted_b_shape = broadcasted_batch_shape.concatenate(
matrix_b.shape[-2:])
self.run_op_benchmark(
sess,
math_ops.matmul(
array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
min_iters=50,
name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
a_shape, b_shape))
示例4: _check_shapes
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.get_shape(), self.v.get_shape())
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
self.base_operator.domain_dimension.assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
uv_shape[-1].assert_is_compatible_with(self._diag_update.get_shape()[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.get_shape()[:-1])
示例5: _reduce_jacobian_det_over_event
def _reduce_jacobian_det_over_event(
self, y, ildj, min_event_ndims, event_ndims):
"""Reduce jacobian over event_ndims - min_event_ndims."""
# In this case, we need to tile the Jacobian over the event and reduce.
y_rank = array_ops.rank(y)
y_shape = array_ops.shape(y)[
y_rank - event_ndims : y_rank - min_event_ndims]
ones = array_ops.ones(y_shape, ildj.dtype)
reduced_ildj = math_ops.reduce_sum(
ones * ildj,
axis=self._get_event_reduce_dims(min_event_ndims, event_ndims))
# The multiplication by ones can change the inferred static shape so we try
# to recover as much as possible.
event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)
if (event_ndims_ is not None and
y.shape.ndims is not None and
ildj.shape.ndims is not None):
y_shape = y.shape[y.shape.ndims - event_ndims_ :
y.shape.ndims - min_event_ndims]
broadcast_shape = array_ops.broadcast_static_shape(ildj.shape, y_shape)
reduced_ildj.set_shape(
broadcast_shape[: broadcast_shape.ndims - (
event_ndims_ - min_event_ndims)])
return reduced_ildj
示例6: _broadcast_shape
def _broadcast_shape(shape1, shape2):
"""Convenience function which statically broadcasts shape when possible."""
if (tensor_util.constant_value(shape1) is not None and
tensor_util.constant_value(shape2) is not None):
return array_ops.broadcast_static_shape(
tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
return array_ops.broadcast_dynamic_shape(shape1, shape2)
示例7: _static_check_for_broadcastable_batch_shape
def _static_check_for_broadcastable_batch_shape(operators):
"""ValueError if operators determined to have non-broadcastable shapes."""
if len(operators) < 2:
return
# This will fail if they cannot be broadcast together.
batch_shape = operators[0].batch_shape
for op in operators[1:]:
batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)
示例8: _finish_prob_for_one_fiber
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims], self.batch_shape))
return prob
示例9: _finish_log_prob_for_one_fiber
def _finish_log_prob_for_one_fiber(self, y, x, ildj):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob += math_ops.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override:
log_prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return log_prob
示例10: _prob
def _prob(self, y):
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(y)
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(ildj)
if self._is_maybe_event_override:
prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return prob
示例11: _log_prob
def _log_prob(self, y):
x = self.bijector.inverse(y)
ildj = self.bijector.inverse_log_det_jacobian(y)
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob = ildj + log_prob
if self._is_maybe_event_override:
log_prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return log_prob
示例12: determine_batch_event_shapes
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with ops.name_scope(name="determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = array_ops.shape(grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (array_ops.broadcast_static_shape(event_shape, shape),
array_ops.broadcast_dynamic_shape(
event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.shift.shape[:-1])
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, array_ops.shape(aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:], array_ops.shape(aff.shift)[-1:])
if aff.scale is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.scale.batch_shape)
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tensor_shape.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[array_ops.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
示例13: prefer_static_broadcast_shape
def prefer_static_broadcast_shape(
shape1, shape2, name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
if (tensor_util.constant_value(shape1) is not None and
tensor_util.constant_value(shape2) is not None):
return array_ops.broadcast_static_shape(
tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
return array_ops.broadcast_dynamic_shape(shape1, shape2)
示例14: prefer_static_broadcast_shape
def prefer_static_broadcast_shape(
shape1, shape2, name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
def make_shape_tensor(x):
return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)
def get_tensor_shape(s):
if isinstance(s, tensor_shape.TensorShape):
return s
s_ = tensor_util.constant_value(make_shape_tensor(s))
if s_ is not None:
return tensor_shape.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tensor_shape.TensorShape):
return make_shape_tensor(s)
if s.is_fully_defined():
return make_shape_tensor(s.as_list())
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return array_ops.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
示例15: get_broadcast_shape
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
if s_shape.is_fully_defined():
return s_shape.as_list()
# Fallback on dynamic.
d_shape = array_ops.shape(tensors[0])
for t in tensors[1:]:
d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
return d_shape