本文整理汇总了Python中torch._C属性的典型用法代码示例。如果您正苦于以下问题:Python torch._C属性的具体用法?Python torch._C怎么用?Python torch._C使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类torch
的用法示例。
在下文中一共展示了torch._C属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: max_unpool1d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def max_unpool1d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
# type: (Tensor, Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], Optional[BroadcastingList1[int]]) -> Tensor # noqa
r"""Computes a partial inverse of :class:`MaxPool1d`.
See :class:`~torch.nn.MaxUnpool1d` for details.
"""
kernel_size = _single(kernel_size)
if stride is not None:
_stride = _single(torch.jit._unwrap_optional(stride))
else:
_stride = kernel_size
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding,
output_size)
return torch._C._nn.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size + [1]).squeeze(3)
示例2: max_unpool2d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
# type: (Tensor, Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], Optional[BroadcastingList2[int]]) -> Tensor # noqa
r"""Computes a partial inverse of :class:`MaxPool2d`.
See :class:`~torch.nn.MaxUnpool2d` for details.
"""
kernel_size = _pair(kernel_size)
if stride is not None:
_stride = _pair(torch.jit._unwrap_optional(stride))
else:
_stride = kernel_size
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding,
output_size)
return torch._C._nn.max_unpool2d(input, indices, output_size)
示例3: adaptive_avg_pool3d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def adaptive_avg_pool3d(input, output_size):
# type: (Tensor, BroadcastingList3[int]) -> Tensor
r"""
Applies a 3D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
"""
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool3d(input, _output_size)
# Activation functions
示例4: glu
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def glu(input, dim=-1):
# type: (Tensor, int) -> Tensor
r"""
glu(input, dim=-1) -> Tensor
The gated linear unit. Computes:
.. math ::
H = A \times \sigma(B)
where `input` is split in half along `dim` to form `A` and `B`.
See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.
Args:
input (Tensor): input tensor
dim (int): dimension on which to split the input
"""
if input.dim() == 0:
raise RuntimeError("glu does not suppport scalars because halving size must be even")
return torch._C._nn.glu(input, dim)
示例5: smooth_l1_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""Function that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if target.requires_grad:
ret = _smooth_l1_loss(input, target)
if reduction != 'none':
ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
ret = torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
return ret
示例6: l1_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if target.requires_grad:
ret = torch.abs(input - target)
if reduction != 'none':
ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
else:
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
ret = torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
return ret
示例7: multi_margin_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def multi_margin_loss(input, target, p=1, margin=1., weight=None, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, int, float, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor
r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None,
reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiMarginLoss` for details.
"""
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if p != 1 and p != 2:
raise ValueError('only p == 1 and p == 2 supported')
if weight is not None:
weight = torch.jit._unwrap_optional(weight)
if weight.dim() != 1:
raise ValueError('weight must be one-dimensional')
return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)
示例8: max_pool2d_with_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def max_pool2d_with_indices(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], BroadcastingList2[int], bool, bool) -> Tuple[Tensor, Tensor] # noqa
r"""Applies a 2D max pooling over an input signal composed of several input
planes.
See :class:`~torch.nn.MaxPool2d` for details.
"""
if stride is None:
_stride = torch.jit.annotate(List[int], [])
else:
_stride = torch.jit._unwrap_optional(stride)
return torch._C._nn.max_pool2d_with_indices(input, kernel_size, _stride, padding, dilation, ceil_mode)
示例9: max_pool3d_with_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def max_pool3d_with_indices(input, kernel_size, stride=None, padding=0,
dilation=1, ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], BroadcastingList3[int], bool, bool) -> Tuple[Tensor, Tensor] # noqa
r"""Applies a 3D max pooling over an input signal composed of several input
planes.
See :class:`~torch.nn.MaxPool3d` for details.
"""
if stride is None:
_stride = torch.jit.annotate(List[int], [])
else:
_stride = torch.jit._unwrap_optional(stride)
return torch._C._nn.max_pool3d_with_indices(
input, kernel_size, _stride, padding, dilation, ceil_mode)
示例10: adaptive_max_pool2d_with_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def adaptive_max_pool2d_with_indices(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor]
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool2d(input, output_size)
示例11: adaptive_max_pool3d_with_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def adaptive_max_pool3d_with_indices(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor]
r"""Applies a 3D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool3d(input, output_size)
示例12: adaptive_avg_pool2d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def adaptive_avg_pool2d(input, output_size):
# type: (Tensor, BroadcastingList2[int]) -> Tensor
r"""
Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
示例13: elu
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def elu(input, alpha=1., inplace=False):
# type: (Tensor, float, bool) -> Tensor
r"""Applies element-wise,
:math:`\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))`.
See :class:`~torch.nn.ELU` for more details.
"""
if inplace:
result = torch._C._nn.elu_(input, alpha)
else:
result = torch._C._nn.elu(input, alpha)
return result
示例14: leaky_relu
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def leaky_relu(input, negative_slope=0.01, inplace=False):
# type: (Tensor, float, bool) -> Tensor
r"""
leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
else:
result = torch._C._nn.leaky_relu(input, negative_slope)
return result
示例15: multilabel_margin_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import _C [as 别名]
def multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelMarginLoss` for details.
"""
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)