本文整理汇总了Python中torch.nn.modules.utils._pair方法的典型用法代码示例。如果您正苦于以下问题:Python utils._pair方法的具体用法?Python utils._pair怎么用?Python utils._pair使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.modules.utils
的用法示例。
在下文中一共展示了utils._pair方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = _pair(out_size)
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
示例2: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_ext.forward(features, rois, out_h, out_w, spatial_scale,
output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
示例3: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(self, out_size, spatial_scale, aligned=True):
"""Simple RoI align in PointRend, faster than standard RoIAlign.
Args:
out_size (tuple[int]): h, w
spatial_scale (float): scale the input boxes by this number
aligned (bool): if False, use the legacy implementation in
MMDetection, align_corners=True will be used in F.grid_sample.
If True, align the results more perfectly.
"""
super(SimpleRoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
# to be consistent with other RoI ops
self.use_torchvision = False
self.aligned = aligned
示例4: _check_roi_extractor
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
if isinstance(roi_extractor, nn.ModuleList):
if prev_roi_extractor:
prev_roi_extractor = prev_roi_extractor[0]
roi_extractor = roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(
config.roi_layer.out_size) == roi_extractor.roi_layers[0].out_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
示例5: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(self, c_in, c_out, k_size, stride=1, pad=0, bias=True):
""" constructor for the class """
from torch.nn.modules.utils import _pair
from numpy import sqrt, prod
super().__init__()
# define the weight and bias if to be used
self.weight = th.nn.Parameter(th.nn.init.normal_(
th.empty(c_out, c_in, *_pair(k_size))
))
self.use_bias = bias
self.stride = stride
self.pad = pad
if self.use_bias:
self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))
fan_in = prod(_pair(k_size)) * c_in # value of fan_in
self.scale = sqrt(2) / sqrt(fan_in)
示例6: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False
):
super(DeformConvPack, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, deformable_groups, bias)
self.conv_offset = nn.Conv2d(
self.in_channels,
self.deformable_groups * 2 * self.kernel_size[0] * self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
示例7: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, bias=True):
super(deform_conv2d_naive, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.use_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels//groups, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
if not self.use_bias:
self.bias.requires_grad = False
self.bias.data.zero_()
示例8: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(ctx, input, offset, weight, bias,
stride, padding, dilation, group, deformable_groups, im2col_step):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.group = group
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
output = DCN.deform_conv2d_forward(input, weight, bias,
offset,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.group,
ctx.deformable_groups,
ctx.im2col_step)
ctx.save_for_backward(input, offset, weight, bias)
return output
示例9: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(ctx, input, offset, mask, weight, bias,
stride, padding, dilation, groups, deformable_groups, im2col_step):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
output = DCN.modulated_deform_conv2d_forward(input, weight, bias,
offset, mask,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.groups,
ctx.deformable_groups,
ctx.im2col_step)
ctx.save_for_backward(input, offset, mask, weight, bias)
return output
示例10: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(self, x):
x_list = []
s_num = self.s_num
ch_ratio = (1+self.delta/self.g)
ch_len = self.in_channels - self.delta
for s in range(s_num):
for start in range(0, self.delta+1, self.g):
weight1 = self.weight[:, :ch_len, s:self.kernel_size[0]-s, s:self.kernel_size[0]-s]
if self.padding[0]-s < 0:
h = x.size(2)
x1 = x[:,start:start+ch_len,s:h-s,s:h-s]
padding1 = _pair(0)
else:
x1 = x[:,start:start+ch_len,:,:]
padding1 = _pair(self.padding[0]-s)
x_list.append(F.conv2d(x1, weight1, self.bias[int(self.out_channels*(s*ch_ratio+start)/s_num/ch_ratio):int(self.out_channels*(s*ch_ratio+start+1)/s_num/ch_ratio)], self.stride,
padding1, self.dilation, self.groups))
x = torch.cat(x_list, 1)
return x
示例11: _conv_forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def _conv_forward(self, input, weight):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
self.bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input,
weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
示例12: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(ctx, input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.deformable_groups = deformable_groups
output = _backend.dcn_v2_forward(input, weight, bias,
offset, mask,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.deformable_groups)
ctx.save_for_backward(input, offset, mask, weight, bias)
return output
示例13: __init__
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConvPack, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, deformable_groups, bias)
self.conv_offset_mask = nn.Conv2d(
self.in_channels // self.groups,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
示例14: forward
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def forward(ctx, input, kernel_size, stride, padding, dilation, channel_wise):
ctx.kernel_size = _pair(kernel_size)
ctx.dilation = _pair(dilation)
ctx.padding = _pair(padding)
ctx.stride = _pair(stride)
bs, ch, in_h, in_w = input.shape
out_h = (in_h + 2 * ctx.padding[0] - ctx.dilation[0] * (ctx.kernel_size[0] - 1) - 1) // ctx.stride[0] + 1
out_w = (in_w + 2 * ctx.padding[1] - ctx.dilation[1] * (ctx.kernel_size[1] - 1) - 1) // ctx.stride[1] + 1
cols = F.unfold(input, ctx.kernel_size, ctx.dilation, ctx.padding, ctx.stride)
cols = cols.view(bs, ch, ctx.kernel_size[0], ctx.kernel_size[1], out_h, out_w)
center_y, center_x = ctx.kernel_size[0] // 2, ctx.kernel_size[1] // 2
feat_0 = cols.contiguous()[:, :, center_y:center_y + 1, center_x:center_x + 1, :, :]
diff_sq = (cols - feat_0).pow(2)
if not channel_wise:
diff_sq = diff_sq.sum(dim=1, keepdim=True)
output = torch.exp(-0.5 * diff_sq)
ctx._backend = type2backend[input.type()]
ctx.save_for_backward(input, output)
return output
示例15: pacconv2d
# 需要导入模块: from torch.nn.modules import utils [as 别名]
# 或者: from torch.nn.modules.utils import _pair [as 别名]
def pacconv2d(input, kernel, weight, bias=None, stride=1, padding=0, dilation=1, shared_filters=False,
native_impl=False):
kernel_size = tuple(weight.shape[-2:])
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if native_impl:
# im2col on input
im_cols = nd2col(input, kernel_size, stride=stride, padding=padding, dilation=dilation)
# main computation
if shared_filters:
output = torch.einsum('ijklmn,zykl->ijmn', (im_cols * kernel, weight))
else:
output = torch.einsum('ijklmn,ojkl->iomn', (im_cols * kernel, weight))
if bias is not None:
output += bias.view(1, -1, 1, 1)
else:
output = PacConv2dFn.apply(input, kernel, weight, bias, stride, padding, dilation, shared_filters)
return output