本文整理匯總了Python中torch.broadcast_tensors方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.broadcast_tensors方法的具體用法?Python torch.broadcast_tensors怎麽用?Python torch.broadcast_tensors使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.broadcast_tensors方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: smooth_l1_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""Function that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if target.requires_grad:
ret = _smooth_l1_loss(input, target)
if reduction != 'none':
ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
ret = torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
return ret
示例2: l1_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if target.requires_grad:
ret = torch.abs(input - target)
if reduction != 'none':
ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
ret = torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
return ret
示例3: mse_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def mse_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if target.requires_grad:
ret = (input - target) ** 2
if reduction != 'none':
ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
ret = torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
return ret
示例4: div
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def div(self, y):
r"""Divides each element of :attr:`self` with the scalar :attr:`y` or
each element of the tensor :attr:`y` and returns a new resulting tensor.
For `y` a scalar:
.. math::
\text{out}_i = \frac{\text{self}_i}{\text{y}}
For `y` a tensor:
.. math::
\text{out}_i = \frac{\text{self}_i}{\text{y}_i}
Note for :attr:`y` a tensor, the shapes of :attr:`self` and :attr:`y` must be
`broadcastable`_.
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics""" # noqa: B950
result = self.clone()
if isinstance(y, CrypTensor):
result.share = torch.broadcast_tensors(result.share, y.share)[0].clone()
elif is_tensor(y):
result.share = torch.broadcast_tensors(result.share, y)[0].clone()
return result.div_(y)
示例5: test_torch_broadcast_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def test_torch_broadcast_tensor(self):
"""Test torch.broadcast_tensor on CUDALongTensor"""
x = get_random_test_tensor(size=(1, 5), is_float=False)
y = get_random_test_tensor(size=(5, 1), is_float=False)
x_cuda = CUDALongTensor(x)
y_cuda = CUDALongTensor(y)
a, b = torch.broadcast_tensors(x, y)
a_cuda, b_cuda = torch.broadcast_tensors(x_cuda, y_cuda)
self.assertTrue(
type(a_cuda) == CUDALongTensor, "result should be a CUDALongTensor"
)
self.assertTrue(
type(b_cuda) == CUDALongTensor, "result should be a CUDALongTensor"
)
self._check_int(
a, a_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor"
)
self._check_int(
b, b_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor"
)
示例6: _define_transdist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def _define_transdist(loc: torch.Tensor, scale: torch.Tensor, inc_dist: Distribution, ndim: int):
loc, scale = torch.broadcast_tensors(loc, scale)
shape = loc.shape[:-ndim] if ndim > 0 else loc.shape
return TransformedDistribution(
inc_dist.expand(shape), AffineTransform(loc, scale, event_dim=ndim)
)
示例7: proj_tangent
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def proj_tangent(x, u):
assert x.shape[-2:] == u.shape[-2:], "Wrong shapes"
x, u = torch.broadcast_tensors(x, u)
x_shape = x.shape
x = x.reshape(-1, x_shape[-2], x_shape[-1])
u = u.reshape(-1, x_shape[-2], x_shape[-1])
xt = x.transpose(-1, -2)
batch_size, n = x.shape[0:2]
I = torch.eye(n, dtype=x.dtype, device=x.device)
I = I.expand_as(x)
mu = x * u
A = linalg.block_matrix([[I, x], [xt, I]])
B = A[:, :, 1:]
z1 = mu.sum(dim=-1).unsqueeze(-1)
zt1 = mu.sum(dim=-2).unsqueeze(-1)
b = torch.cat([z1, zt1], dim=1,)
rhs = B.transpose(1, 2) @ (b - A[:, :, 0:1])
lhs = B.transpose(1, 2) @ B
zeta, _ = torch.solve(rhs, lhs)
alpha = torch.cat(
[torch.ones(batch_size, 1, 1, dtype=x.dtype), zeta[:, 0 : n - 1]], dim=1
)
beta = zeta[:, n - 1 : 2 * n - 1]
rgrad = mu - (alpha + beta.transpose(-1, -2)) * x
rgrad = rgrad.reshape(x_shape)
return rgrad
示例8: _pointwise_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def _pointwise_loss(lambd, lambd_optimized, input, target, reduction='mean'):
if target.requires_grad:
d = lambd(input, target)
if reduction == 'none':
return d
return torch.mean(d) if reduction == 'mean' else torch.sum(d)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
return lambd_optimized(expanded_input, expanded_target, _Reduction.get_enum(reduction))
示例9: _feature_dropout
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def _feature_dropout(self, p=0.5, training=True, inplace=False):
"""Randomly zeros out entire channels in the input tensor with probability
:attr:`p`. (a channel is a nD feature map, e.g., the :math:`j`-th channel
of the :math:`i`-th sample in the batched input is a nD tensor
:math:`\text{input}[i, j]`)."""
assert self.dim() >= 2, "feature dropout requires dimension to be at least 2"
assert p >= 0.0 and p <= 1.0, "dropout probability has to be between 0 and 1"
if not training:
if inplace:
return self
else:
return self.clone()
# take first 2 dimensions
feature_dropout_size = self.size()[0:2]
# create dropout tensor over the first two dimensions
rand_tensor = MPCTensor.rand(feature_dropout_size, device=self.device)
feature_dropout_tensor = rand_tensor > p
# Broadcast to remaining dimensions
for i in range(2, self.dim()):
feature_dropout_tensor = feature_dropout_tensor.unsqueeze(i)
feature_dropout_tensor.share, self.share = torch.broadcast_tensors(
feature_dropout_tensor.share, self.share
)
if inplace:
result_tensor = self.mul_(feature_dropout_tensor).div_(1 - p)
else:
result_tensor = self.mul(feature_dropout_tensor).div_(1 - p)
return result_tensor
# Comparators
示例10: div
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def div(self, y):
"""Divide by a given tensor"""
result = self.clone()
if isinstance(y, CrypTensor):
result.share = torch.broadcast_tensors(result.share, y.share)[0].clone()
elif is_tensor(y):
result.share = torch.broadcast_tensors(result.share, y)[0].clone()
return result.div_(y)
示例11: __xor__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def __xor__(self, y):
"""Bitwise XOR operator (element-wise)"""
result = self.clone()
if isinstance(y, BinarySharedTensor):
broadcast_tensors = torch.broadcast_tensors(result.share, y.share)
result.share = broadcast_tensors[0].clone()
elif is_tensor(y):
broadcast_tensors = torch.broadcast_tensors(result.share, y)
result.share = broadcast_tensors[0].clone()
return result.__ixor__(y)
示例12: __and__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def __and__(self, y):
"""Bitwise AND operator (element-wise)"""
result = self.clone()
# TODO: Remove explicit broadcasts to allow smaller beaver triples
if isinstance(y, BinarySharedTensor):
broadcast_tensors = torch.broadcast_tensors(result.share, y.share)
result.share = broadcast_tensors[0].clone()
elif is_tensor(y):
broadcast_tensors = torch.broadcast_tensors(result.share, y)
result.share = broadcast_tensors[0].clone()
return result.__iand__(y)
示例13: broadcast_tensors
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def broadcast_tensors(*tensors):
tensor_list = [t.data for t in tensors]
results = torch.broadcast_tensors(*tensor_list)
results = [CUDALongTensor(t) for t in results]
return results
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def forward(ctx, input, p=0.5, training=True, inplace=False):
# inference mode:
if not training:
if inplace:
return input
else:
return input.clone()
# training mode:
feature_dropout_size = input.size()[0:2]
cryptensor_type = crypten.get_cryptensor_type(input)
rand_tensor = crypten.rand(
feature_dropout_size, cryptensor_type=cryptensor_type
)
boolean_mask = rand_tensor > p
for i in range(2, input.dim()):
boolean_mask = boolean_mask.unsqueeze(i)
boolean_mask.share, tensor = torch.broadcast_tensors(
boolean_mask.share, input.share
)
if inplace:
result = input.mul_(boolean_mask).div_(1 - p)
else:
result = input.mul(boolean_mask).div_(1 - p)
ctx.save_multiple_for_backward([boolean_mask, p])
return result
示例15: broadcast_and_squeeze
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import broadcast_tensors [as 別名]
def broadcast_and_squeeze(*args):
assert all([is_tensor(ar) for ar in args]), 'at least 1 object is not torch tensor'
if all([np.prod(val.shape[2:]) == 1 for val in args]):
args = [val.contiguous().view(size=val.shape[:2] + tuple([1, 1])) for val in args]
uniformed_values = uniform_shapes(*args)
broadcasted_values = torch.broadcast_tensors(*uniformed_values)
return broadcasted_values