本文整理汇总了Python中torch.nn.functional.grid_sample方法的典型用法代码示例。如果您正苦于以下问题:Python functional.grid_sample方法的具体用法?Python functional.grid_sample怎么用?Python functional.grid_sample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.grid_sample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def forward(self, x, offset_map):
n, c, h, w = x.size()
if self.coord_map is None or self.coord_map[0].size() != offset_map.size()[2:]:
self.coord_map = self._gen_coord_map(h, w)
self.norm_factor = torch.cuda.FloatTensor([(w-1) / 2, (h-1) / 2])
# offset to absolute coordinate
grid_h = offset_map[:, 0] + self.coord_map[0] # (N, H, W)
grid_w = offset_map[:, 1] + self.coord_map[1] # (N, H, W)
# scale to [-1, 1], order of grid: [x, y] (i.e., [w, h])
grid = torch.stack([grid_w, grid_h], dim=-1) / self.norm_factor - 1. # (N, H, W, 2)
# use grid to obtain output feature
feats = F.grid_sample(x, grid, padding_mode='border') # (N, C, H, W)
return feats
示例2: point_sample
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def point_sample(input, points, align_corners=False, **kwargs):
"""A wrapper around :func:`grid_sample` to support 3D point_coords tensors
Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
lie inside ``[0, 1] x [0, 1]`` square.
Args:
input (Tensor): Feature map, shape (N, C, H, W).
points (Tensor): Image based absolute point coordinates (normalized),
range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
align_corners (bool): Whether align_corners. Default: False
Returns:
Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
add_dim = False
if points.dim() == 3:
add_dim = True
points = points.unsqueeze(2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
示例3: point_sample
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def point_sample(input, points, align_corners=False, **kwargs):
"""A wrapper around :function:`grid_sample` to support 3D point_coords
tensors Unlike :function:`torch.nn.functional.grid_sample` it assumes
point_coords to lie inside [0, 1] x [0, 1] square.
Args:
input (Tensor): Feature map, shape (N, C, H, W).
points (Tensor): Image based absolute point coordinates (normalized),
range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
align_corners (bool): Whether align_corners. Default: False
Returns:
Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
add_dim = False
if points.dim() == 3:
add_dim = True
points = points.unsqueeze(2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
示例4: interp
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def interp(input, output_size, mode='bilinear'):
n, c, ih, iw = input.shape
oh, ow = output_size
# normalize to [-1, 1]
h = torch.arange(0, oh) / (oh-1) * 2 - 1
w = torch.arange(0, ow) / (ow-1) * 2 - 1
grid = torch.zeros(oh, ow, 2)
grid[:, :, 0] = w.unsqueeze(0).repeat(oh, 1)
grid[:, :, 1] = h.unsqueeze(0).repeat(ow, 1).transpose(0, 1)
grid = grid.unsqueeze(0).repeat(n, 1, 1, 1) # grid.shape: [n, oh, ow, 2]
grid = Variable(grid)
if input.is_cuda:
grid = grid.cuda()
return F.grid_sample(input, grid, mode=mode)
示例5: augmentAffine
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def augmentAffine(img_in, seg_in, strength=0.05):
"""
3D affine augmentation on image and segmentation mini-batch on GPU.
(affine transf. is centered: trilinear interpolation and zero-padding used for sampling)
:input: img_in batch (torch.cuda.FloatTensor), seg_in batch (torch.cuda.LongTensor)
:return: augmented BxCxTxHxW image batch (torch.cuda.FloatTensor), augmented BxTxHxW seg batch (torch.cuda.LongTensor)
"""
B,C,D,H,W = img_in.size()
affine_matrix = (torch.eye(3,4).unsqueeze(0) + torch.randn(B, 3, 4) * strength).to(img_in.device)
meshgrid = F.affine_grid(affine_matrix,torch.Size((B,1,D,H,W)))
img_out = F.grid_sample(img_in, meshgrid,padding_mode='border')
seg_out = F.grid_sample(seg_in.float().unsqueeze(1), meshgrid, mode='nearest').long().squeeze(1)
return img_out, seg_out
示例6: image_to_object
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def image_to_object(images, pose, object_size):
'''
Inverse pose, crop and transform image patches.
param images: (... x C x H x W) tensor
param pose: (N x 3) tensor
'''
N, pose_size = pose.size()
n_channels, H, W = images.size()[-3:]
images = images.view(N, n_channels, H, W)
if pose_size == 3:
transformer_inv = expand_pose(pose_inv(pose))
elif pose_size == 6:
transformer_inv = pose_inv_full(pose)
grid = F.affine_grid(transformer_inv,
torch.Size((N, n_channels, object_size, object_size)))
obj = F.grid_sample(images, grid)
return obj
示例7: object_to_image
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def object_to_image(objects, pose, image_size):
'''
param images: (N x C x H x W) tensor
param pose: (N x 3) tensor
'''
N, pose_size = pose.size()
_, n_channels, _, _ = objects.size()
if pose_size == 3:
transformer = expand_pose(pose)
elif pose_size == 6:
transformer = pose.view(N, 2, 3)
grid = F.affine_grid(transformer,
torch.Size((N, n_channels, image_size, image_size)))
components = F.grid_sample(objects, grid)
return components
示例8: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def __init__(self, out_size, spatial_scale, aligned=True):
"""Simple RoI align in PointRend, faster than standard RoIAlign.
Args:
out_size (tuple[int]): h, w
spatial_scale (float): scale the input boxes by this number
aligned (bool): if False, use the legacy implementation in
MMDetection, align_corners=True will be used in F.grid_sample.
If True, align the results more perfectly.
"""
super(SimpleRoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
# to be consistent with other RoI ops
self.use_torchvision = False
self.aligned = aligned
示例9: imwrap_BCHW0
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def imwrap_BCHW0(im_src, disp):
# imwrap
bn, c, h, w = im_src.shape
row = torch.linspace(-1, 1, w)
col = torch.linspace(-1, 1, h)
grid = torch.zeros(bn, h, w, 2)
for n in range(bn):
for i in range(h):
grid[n, i, :, 0] = row
for i in range(w):
grid[n, :, i, 1] = col
grid = Variable(grid, requires_grad=True).type_as(im_src)
grid[:, :, :, 0] = grid[:, :, :, 0] - disp.squeeze(1)*2/w
#print disp[-1, -1, -1], grid[-1, -1, -1, 0]
im_src.clamp(min=1e-6)
im_wrap = F.grid_sample(im_src, grid)
return im_wrap
示例10: point_sample
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def point_sample(input, point_coords, **kwargs):
"""
From Detectron2, point_features.py#19
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
示例11: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def forward(self, x):
x_shape = x.size() # (b, c, h, w)
offset = self.offset_filter(x) # (b, 2*c, h, w)
offset_w, offset_h = torch.split(offset, self.regular_filter.in_channels, 1) # (b, c, h, w)
offset_w = offset_w.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)
offset_h = offset_h.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)
if not self.input_shape or self.input_shape != x_shape:
self.input_shape = x_shape
grid_w, grid_h = np.meshgrid(np.linspace(-1, 1, x_shape[3]), np.linspace(-1, 1, x_shape[2])) # (h, w)
grid_w = torch.Tensor(grid_w)
grid_h = torch.Tensor(grid_h)
if self.cuda:
grid_w = grid_w.cuda()
grid_h = grid_h.cuda()
self.grid_w = nn.Parameter(grid_w)
self.grid_h = nn.Parameter(grid_h)
offset_w = offset_w + self.grid_w # (b*c, h, w)
offset_h = offset_h + self.grid_h # (b*c, h, w)
x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])).unsqueeze(1) # (b*c, 1, h, w)
x = F.grid_sample(x, torch.stack((offset_h, offset_w), 3)) # (b*c, h, w)
x = x.contiguous().view(-1, int(x_shape[1]), int(x_shape[2]), int(x_shape[3])) # (b, c, h, w)
x = self.regular_filter(x)
return x
示例12: shift
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def shift(x, offset):
"""
x: h x w
offset: 2 x h x w
"""
h, w = x.shape
x = torch.from_numpy(x).unsqueeze(0)
offset = torch.from_numpy(offset).unsqueeze(0)
coord_map = gen_coord_map(h, w)
norm_factor = torch.FloatTensor([(w-1)/2, (h-1)/2])
grid_h = offset[:, 0]+coord_map[0]
grid_w = offset[:, 1]+coord_map[1]
grid = torch.stack([grid_w, grid_h], dim=-1) / norm_factor - 1
x = F.grid_sample(x.unsqueeze(1).float(), grid, padding_mode='border', mode='bilinear').squeeze().numpy()
x = np.round(x)
return x.astype(np.uint8)
示例13: flow_warp
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros'):
"""Warp an image or feature map with optical flow
Args:
x (Tensor): size (N, C, H, W)
flow (Tensor): size (N, H, W, 2), normal value
interp_mode (str): 'nearest' or 'bilinear'
padding_mode (str): 'zeros' or 'border' or 'reflection'
Returns:
Tensor: warped image or feature map
"""
assert x.size()[-2:] == flow.size()[1:3]
B, C, H, W = x.size()
# mesh grid
grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))
grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
grid.requires_grad = False
grid = grid.type_as(x)
vgrid = grid + flow
# scale grid to [-1,1]
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0
vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)
return output
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def forward(self, x, pts_list):
x_height, x_width = x.size()[-2:]
coarse_desc_map = self.head(x)
coarse_desc_map = F.normalize(coarse_desc_map)
descriptors_list = []
for i, pts in enumerate(pts_list):
pts = pts.float()
pts[:, 0] = pts[:, 0] / (0.5 * x_height * self.reduction) - 1.0
pts[:, 1] = pts[:, 1] / (0.5 * x_width * self.reduction) - 1.0
if self.transpose_descriptors:
pts = torch.index_select(pts, dim=1, index=torch.tensor([1, 0], device=pts.device))
pts = pts.unsqueeze(0).unsqueeze(0)
descriptors = F.grid_sample(coarse_desc_map[i:(i + 1)], pts)
descriptors = descriptors.squeeze(0).squeeze(1)
descriptors = descriptors.transpose(0, 1)
descriptors = F.normalize(descriptors)
descriptors_list.append(descriptors)
return descriptors_list
示例15: offset_flow
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import grid_sample [as 别名]
def offset_flow(img, flow):
'''
:param img: torch.FloatTensor of shape NxCxHxW
:param flow: torch.FloatTensor of shape NxHxWx2
:return: torch.FloatTensor of shape NxCxHxW
'''
N, C, H, W = img.shape
# generate identity sampling grid
gx, gy = torch.meshgrid(torch.arange(H), torch.arange(W))
gx = gx.float().div(gx.max() - 1).view(1, H, W, 1)
gy = gy.float().div(gy.max() - 1).view(1, H, W, 1)
grid = torch.cat([gy, gx], dim=-1).mul(2.).sub(1)
# generate normalized flow field
flown = flow.clone()
flown[..., 0] /= W
flown[..., 1] /= H
# calculate offset field
grid += flown
return F.grid_sample(img, grid), grid