当前位置: 首页>>代码示例>>Python>>正文


Python functional.affine_grid方法代码示例

本文整理汇总了Python中torch.nn.functional.affine_grid方法的典型用法代码示例。如果您正苦于以下问题:Python functional.affine_grid方法的具体用法?Python functional.affine_grid怎么用?Python functional.affine_grid使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.affine_grid方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: image_to_object

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def image_to_object(images, pose, object_size):
  '''
  Inverse pose, crop and transform image patches.
  param images: (... x C x H x W) tensor
  param pose: (N x 3) tensor
  '''
  N, pose_size = pose.size()
  n_channels, H, W = images.size()[-3:]
  images = images.view(N, n_channels, H, W)
  if pose_size == 3:
    transformer_inv = expand_pose(pose_inv(pose))
  elif pose_size == 6:
    transformer_inv = pose_inv_full(pose)

  grid = F.affine_grid(transformer_inv,
                       torch.Size((N, n_channels, object_size, object_size)))
  obj = F.grid_sample(images, grid)
  return obj 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:20,代码来源:DDPAE_utils.py

示例2: object_to_image

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def object_to_image(objects, pose, image_size):
  '''
  param images: (N x C x H x W) tensor
  param pose: (N x 3) tensor
  '''
  N, pose_size = pose.size()
  _, n_channels, _, _ = objects.size()
  if pose_size == 3:
    transformer = expand_pose(pose)
  elif pose_size == 6:
    transformer = pose.view(N, 2, 3)

  grid = F.affine_grid(transformer,
                       torch.Size((N, n_channels, image_size, image_size)))
  components = F.grid_sample(objects, grid)
  return components 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:18,代码来源:DDPAE_utils.py

示例3: generate_grid

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def generate_grid(num_grid, size, device):
    """Generate regular square grid of points in [0, 1] x [0, 1] coordinate
    space.

    Args:
        num_grid (int): The number of grids to sample, one for each region.
        size (tuple(int, int)): The side size of the regular grid.
        device (torch.device): Desired device of returned tensor.

    Returns:
        (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
            contains coordinates for the regular grids.
    """

    affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
    grid = F.affine_grid(
        affine_trans, torch.Size((1, 1, *size)), align_corners=False)
    grid = normalize(grid)
    return grid.view(1, -1, 2).expand(num_grid, -1, -1) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:21,代码来源:point_sample.py

示例4: _affine_grid_gen

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def _affine_grid_gen(rois, input_size, grid_size):

    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:25,代码来源:net_utils.py

示例5: _affine_grid_gen

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def _affine_grid_gen(rois, input_size, grid_size):
    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([ \
        (x2 - x1) / (width - 1),
        zero,
        (x1 + x2 - width + 1) / (width - 1),
        zero,
        (y2 - y1) / (height - 1),
        (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta,
                         torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid 
开发者ID:ucbdrive,项目名称:3d-vehicle-tracking,代码行数:25,代码来源:net_utils.py

示例6: affine_grid_gen

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def affine_grid_gen(rois, input_size, grid_size):

    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid 
开发者ID:roytseng-tw,项目名称:Detectron.pytorch,代码行数:25,代码来源:net.py

示例7: augmentAffine

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def augmentAffine(img_in, seg_in, strength=0.05):
    """
    3D affine augmentation on image and segmentation mini-batch on GPU.
    (affine transf. is centered: trilinear interpolation and zero-padding used for sampling)
    :input: img_in batch (torch.cuda.FloatTensor), seg_in batch (torch.cuda.LongTensor)
    :return: augmented BxCxTxHxW image batch (torch.cuda.FloatTensor), augmented BxTxHxW seg batch (torch.cuda.LongTensor)
    """
    B,C,D,H,W = img_in.size()
    affine_matrix = (torch.eye(3,4).unsqueeze(0) + torch.randn(B, 3, 4) * strength).to(img_in.device)

    meshgrid = F.affine_grid(affine_matrix,torch.Size((B,1,D,H,W)))

    img_out = F.grid_sample(img_in, meshgrid,padding_mode='border')
    seg_out = F.grid_sample(seg_in.float().unsqueeze(1), meshgrid, mode='nearest').long().squeeze(1)

    return img_out, seg_out 
开发者ID:multimodallearning,项目名称:pdd_net,代码行数:18,代码来源:dense_pdd_net_v01.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def forward(self, image: Tensor, pose: Tensor):
        n = image.size(0)
        c = image.size(1)
        h = image.size(2)
        w = image.size(3)

        pose = pose.unsqueeze(2).unsqueeze(3)
        pose = pose.expand(pose.size(0), pose.size(1), image.size(2), image.size(3))
        x = torch.cat([image, pose], dim=1)
        y = self.main_body(x)

        color_change = self.pumarola_color_change(y)
        alpha_mask = self.pumarola_alpha_mask(y)
        color_changed = alpha_mask * image + (1 - alpha_mask) * color_change

        grid_change = torch.transpose(self.zhou_grid_change(y).view(n, 2, h * w), 1, 2).view(n, h, w, 2)
        device = self.zhou_grid_change.weight.device
        identity = torch.Tensor([[1, 0, 0], [0, 1, 0]]).to(device).unsqueeze(0).repeat(n, 1, 1)
        base_grid = affine_grid(identity, [n, c, h, w], align_corners=self.align_corners)
        grid = base_grid + grid_change
        resampled = grid_sample(image, grid, mode='bilinear', padding_mode='border', align_corners=self.align_corners)

        return [color_changed, resampled, color_change, alpha_mask, grid_change, grid] 
开发者ID:pkhungurn,项目名称:talking-head-anime-demo,代码行数:25,代码来源:two_algo_face_rotator.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def forward(self, x):
        #
        # Calculate the transform
        #
        xs = self.localization(x)
        xs = xs.view(-1, 32*7*7)

        theta = self.fc_loc(xs)
        theta = theta.view(-1, 2, 3)

        grid = F.affine_grid(theta, x.size())

        #
        # transform the input
        #
        x = F.grid_sample(x, grid)

        return x 
开发者ID:leokarlin,项目名称:LaSO,代码行数:20,代码来源:stn.py

示例10: roi_pooling

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def roi_pooling(input, rois, size=(7,7)):
  assert rois.dim() == 2 and rois.size(1) == 5, 'rois shape is wrong : {}'.format(rois.size())
  output = []
  num_rois = rois.size(0)
  size = np.array(size)
  spatial_size = np.array([input.size(3), input.size(2)])
  for i in range(num_rois):
    roi = variable2np(rois[i])
    im_idx = int(roi[0])
    theta = utils.crop2affine(spatial_size, roi[1:])
    theta = np2variable(theta, input.is_cuda).unsqueeze(0)
    grid_size = torch.Size([1, 3, int(size[1]), int(size[0])])
    grid = F.affine_grid(theta, grid_size)
    roi_feature = F.grid_sample(input.narrow(0, im_idx, 1), grid)
    output.append( roi_feature )
  return torch.cat(output, 0) 
开发者ID:D-X-Y,项目名称:landmark-detection,代码行数:18,代码来源:model_utils.py

示例11: face_align

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def face_align(face, point, target):
  spatial_size = np.array(face.size)
  point, target = point.copy(), target.copy()
  point[:,0] = normalize(spatial_size[0], point[:,0])
  point[:,1] = normalize(spatial_size[1], point[:,1])
  target[:,0] = normalize(spatial_size[0], target[:,0])
  target[:,1] = normalize(spatial_size[1], target[:,1])
  x, residual, rank, s = np.linalg.lstsq(target, point)
  theta = x.T[:2,:]
  theta = np2variable(theta).unsqueeze(0)
  image = np.array(face.copy()).transpose(2, 0, 1)
  image_var = np2variable(image, False).unsqueeze(0)
  grid_size = torch.Size([1, 3, int(spatial_size[1]), int(spatial_size[0])])
  grid = F.affine_grid(theta, grid_size)
  aligned_image = F.grid_sample(image_var, grid)
  aligned_image = aligned_image.data.numpy().squeeze()
  aligned_image = aligned_image.transpose(1, 2, 0)
  aligned_image = Image.fromarray(np.uint8(aligned_image))
  return aligned_image 
开发者ID:D-X-Y,项目名称:landmark-detection,代码行数:21,代码来源:gen_mean_face.py

示例12: warp_feature_batch

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import affine_grid [as 别名]
def warp_feature_batch(feature, pts_location, patch_size):
  # feature must be [1,C,H,W] and pts_location must be [Num-Pts, (x,y)]
  _, C, H, W = list(feature.size())
  num_pts = pts_location.size(0)
  assert isinstance(patch_size, int) and feature.size(0) == 1 and pts_location.size(1) == 2, 'The shapes of feature or points are not right : {} vs {}'.format(feature.size(), pts_location.size())
  assert W > 1 and H > 1, 'To guarantee normalization {}, {}'.format(W, H)

  def normalize(x, L):
    return -1. + 2. * x / (L-1)

  crop_box = torch.cat([pts_location-patch_size, pts_location+patch_size], 1)
  crop_box[:, [0,2]] = normalize(crop_box[:, [0,2]], W)
  crop_box[:, [1,3]] = normalize(crop_box[:, [1,3]], H)
 
  affine_parameter = [(crop_box[:,2]-crop_box[:,0])/2, crop_box[:,0]*0, (crop_box[:,2]+crop_box[:,0])/2,
                      crop_box[:,0]*0, (crop_box[:,3]-crop_box[:,1])/2, (crop_box[:,3]+crop_box[:,1])/2]
  #affine_parameter = [(crop_box[:,2]-crop_box[:,0])/2, MU.np2variable(torch.zeros(num_pts),feature.is_cuda,False), (crop_box[:,2]+crop_box[:,0])/2,
  #                    MU.np2variable(torch.zeros(num_pts),feature.is_cuda,False), (crop_box[:,3]-crop_box[:,1])/2, (crop_box[:,3]+crop_box[:,1])/2]
  theta = torch.stack(affine_parameter, 1).view(num_pts, 2, 3)
  feature = feature.expand(num_pts,C, H, W)
  grid_size = torch.Size([num_pts, 1, 2*patch_size+1, 2*patch_size+1])
  grid = F.affine_grid(theta, grid_size)
  sub_feature = F.grid_sample(feature, grid)
  return sub_feature 
开发者ID:D-X-Y,项目名称:landmark-detection,代码行数:26,代码来源:basic_utils_batch.py


注:本文中的torch.nn.functional.affine_grid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。