本文整理汇总了Python中tensorflow.compat.v2.broadcast_to方法的典型用法代码示例。如果您正苦于以下问题:Python v2.broadcast_to方法的具体用法?Python v2.broadcast_to怎么用?Python v2.broadcast_to使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.broadcast_to方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: full_like
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a).data
dtype = dtype or utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return arrays_lib.tensor_to_ndarray(
tf.broadcast_to(fill_value.data, tf.shape(a)))
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# TODO(wangpeng): utils.np_doc can't handle np.array because np.array is a
# builtin function. Make utils.np_doc support builtin functions.
示例2: tril
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = tf.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return utils.tensor_to_ndarray(
tf.where(tf.broadcast_to(mask, tf.shape(m)), m, z))
示例3: triu
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = tf.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return utils.tensor_to_ndarray(
tf.where(tf.broadcast_to(mask, tf.shape(m)), z, m))
示例4: _tf_gcd
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _tf_gcd(x1, x2):
def _gcd_cond_fn(x1, x2):
return tf.reduce_any(x2 != 0)
def _gcd_body_fn(x1, x2):
# tf.math.mod will raise an error when any element of x2 is 0. To avoid
# that, we change those zeros to ones. Their values don't matter because
# they won't be used.
x2_safe = tf.where(x2 != 0, x2, tf.constant(1, x2.dtype))
x1, x2 = (tf.where(x2 != 0, x2, x1),
tf.where(x2 != 0, tf.math.mod(x1, x2_safe),
tf.constant(0, x2.dtype)))
return (tf.where(x1 < x2, x2, x1), tf.where(x1 < x2, x1, x2))
if (not np.issubdtype(x1.dtype.as_numpy_dtype, np.integer) or
not np.issubdtype(x2.dtype.as_numpy_dtype, np.integer)):
raise ValueError("Arguments to gcd must be integers.")
shape = tf.broadcast_static_shape(x1.shape, x2.shape)
x1 = tf.broadcast_to(x1, shape)
x2 = tf.broadcast_to(x2, shape)
gcd, _ = tf.while_loop(_gcd_cond_fn, _gcd_body_fn,
(tf.math.abs(x1), tf.math.abs(x2)))
return gcd
示例5: _batch_jacobian
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _batch_jacobian(y, x, tape):
"""Computes a Jacobian w.r.t. last dimensions of y and x."""
# y and x must have the same batch dimensions.
# For input shapes (b, dy), (b, dx) yields shape (b, dy, dx).
d = y.shape.as_list()[-1]
if d is None:
raise ValueError("Last dimension of state Tensors must be known.")
grads = []
for i in range(d):
w = tf.broadcast_to(tf.one_hot(i, d, dtype=y.dtype), y.shape)
# We must use tf.UnconnectedGradients.ZERO here and below, because some
# state components may legitimately not depend on each other or some of the
# params.
grad = tape.gradient(y, x, output_gradients=w,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
grads.append(grad)
return tf.stack(grads, axis=-2)
示例6: _conditional_variance_x
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _conditional_variance_x(self, t, mr_t, sigma_t):
"""Computes the variance of x(t), see [1], Eq. 10.41."""
t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
var_x_between_vol_knots = self._variance_int(self._padded_knots,
self._jump_locations,
self._jump_values_vol,
self._jump_values_mr)
varx_at_vol_knots = tf.concat(
[self._zero_padding,
_cumsum_using_matvec(var_x_between_vol_knots)],
axis=1)
time_index = tf.searchsorted(self._jump_locations, t)
vn = tf.concat(
[self._zero_padding,
self._jump_locations], axis=1)
var_x_t = self._variance_int(
tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
var_x_t = var_x_t + tf.gather(varx_at_vol_knots, time_index, batch_dims=1)
var_x_t = (var_x_t[:, 1:] - var_x_t[:, :-1]) * tf.math.exp(
-2 * tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
return var_x_t
示例7: _prob
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _prob(self, y):
"""Called by the base class to compute likelihoods."""
# Convert to (channels, 1, batch) format by collapsing dimensions and then
# commuting channels to front.
y = tf.broadcast_to(
y, tf.broadcast_dynamic_shape(tf.shape(y), self.batch_shape_tensor()))
shape = tf.shape(y)
y = tf.reshape(y, (-1, 1, self.batch_shape.num_elements()))
y = tf.transpose(y, (2, 1, 0))
# Evaluate densities.
# We can use the special rule below to only compute differences in the left
# tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
# for large x, 0 for small x. Subtracting two numbers close to 0 can be done
# with much higher precision than subtracting two numbers close to 1.
lower = self._logits_cumulative(y - .5)
upper = self._logits_cumulative(y + .5)
# Flip signs if we can move more towards the left tail of the sigmoid.
sign = tf.stop_gradient(-tf.math.sign(lower + upper))
p = abs(tf.sigmoid(sign * upper) - tf.sigmoid(sign * lower))
# Convert back to (broadcasted) input tensor shape.
p = tf.transpose(p, (2, 1, 0))
p = tf.reshape(p, shape)
return p
示例8: full
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
"""Returns an array with given shape and dtype filled with `fill_value`.
Args:
shape: A valid shape object. Could be a native python object or an object
of type ndarray, numpy.ndarray or tf.TensorShape.
fill_value: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the `fill_value`. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
Raises:
ValueError: if `fill_value` can not be broadcast to shape `shape`.
"""
fill_value = asarray(fill_value, dtype=dtype)
if utils.isscalar(shape):
shape = tf.reshape(shape, [1])
return arrays_lib.tensor_to_ndarray(tf.broadcast_to(fill_value.data, shape))
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
示例9: broadcast_to
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
return full(shape, array)
示例10: polyval
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def polyval(p, x):
def f(p, x):
if p.shape.rank == 0:
p = tf.reshape(p, [1])
p = tf.unstack(p)
# TODO(wangpeng): Make tf version take a tensor for p instead of a list.
y = tf.math.polyval(p, x)
# If the polynomial is 0-order, numpy requires the result to be broadcast to
# `x`'s shape.
if len(p) == 1:
y = tf.broadcast_to(y, x.shape)
return y
return _bin_op(f, p, x)
示例11: _prepare_pde_coeff
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _prepare_pde_coeff(raw_coeff, value_grid):
# Converts values received from second_order_coeff_fn and similar Callables
# into a format usable further down in the pipeline.
if raw_coeff is None:
return None
dtype = value_grid.dtype
coeff = tf.convert_to_tensor(raw_coeff, dtype=dtype)
coeff = tf.broadcast_to(coeff, tf.shape(value_grid))
return coeff
示例12: _prepare_boundary_conditions
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _prepare_boundary_conditions(boundary_tensor, value_grid, batch_rank, dim):
"""Prepares values received from boundary_condition callables."""
if boundary_tensor is None:
return None
boundary_tensor = tf.convert_to_tensor(boundary_tensor, value_grid.dtype)
# Broadcast to the shape of the boundary: it is the shape of value grid with
# one dimension removed.
dim_to_remove = batch_rank + dim
broadcast_shape = []
# Shape slicing+concatenation seems error-prone, so let's do it simply.
for i, size in enumerate(value_grid.shape):
if i != dim_to_remove:
broadcast_shape.append(size)
return tf.broadcast_to(boundary_tensor, broadcast_shape)
示例13: _prepare_pde_coeffs
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _prepare_pde_coeffs(raw_coeffs, value_grid):
"""Prepares values received from second_order_coeff_fn and similar."""
if raw_coeffs is None:
return None
dtype = value_grid.dtype
coeffs = tf.convert_to_tensor(raw_coeffs, dtype=dtype)
broadcast_shape = tf.shape(value_grid)
coeffs = tf.broadcast_to(coeffs, broadcast_shape)
return coeffs
示例14: _prepare_boundary_conditions
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _prepare_boundary_conditions(boundary_tensor, value_grid):
"""Prepares values received from boundary_condition callables."""
if boundary_tensor is None:
return None
boundary_tensor = tf.convert_to_tensor(boundary_tensor, value_grid.dtype)
# Broadcast to batch dimensions.
broadcast_shape = tf.shape(value_grid)[:-1]
return tf.broadcast_to(boundary_tensor, broadcast_shape)
示例15: _try_broadcast_to
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import broadcast_to [as 别名]
def _try_broadcast_to(x, batch_shape, name):
"""Broadcasts batch shape of `x` to a `batch_shape` if possible."""
batch_shape_x = x.shape.as_list()[:-1]
if batch_shape_x != batch_shape:
try:
np.broadcast_to(np.zeros(batch_shape_x), batch_shape)
except ValueError:
raise ValueError('Batch shapes of `{2}` should be broadcastable with {0} '
'but it is {1} instead'.format(
batch_shape, batch_shape_x, name))
return tf.broadcast_to(x, batch_shape + x.shape[-1:])
return x