當前位置: 首頁>>代碼示例>>Python>>正文


Python image.affine_transform方法代碼示例

本文整理匯總了Python中utils.image.affine_transform方法的典型用法代碼示例。如果您正苦於以下問題:Python image.affine_transform方法的具體用法?Python image.affine_transform怎麽用?Python image.affine_transform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils.image的用法示例。


在下文中一共展示了image.affine_transform方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __getitem__

# 需要導入模塊: from utils import image [as 別名]
# 或者: from utils.image import affine_transform [as 別名]
def __getitem__(self, index):
    if index < 10 and self.split == 'train':
      self.idxs = np.random.choice(
        self.num_samples, self.num_samples, replace=False)
    img = self._load_image(index)
    gt_3d, pts, c, s = self._get_part_info(index)
    
    r = 0
    s = np.array([s, s])
    s = adjust_aspect_ratio(s, self.aspect_ratio, self.opt.fit_short_side)
    
    trans_input = get_affine_transform(
      c, s, r, [self.opt.input_h, self.opt.input_w])
    inp = cv2.warpAffine(img, trans_input, (self.opt.input_h, self.opt.input_w),
                         flags=cv2.INTER_LINEAR)
    inp = (inp.astype(np.float32) / 256. - self.mean) / self.std
    inp = inp.transpose(2, 0, 1)

    trans_output = get_affine_transform(
      c, s, r, [self.opt.output_h, self.opt.output_w])
    out = np.zeros((self.num_joints, self.opt.output_h, self.opt.output_w), 
                    dtype=np.float32)
    reg_target = np.zeros((self.num_joints, 1), dtype=np.float32)
    reg_ind = np.zeros((self.num_joints), dtype=np.int64)
    reg_mask = np.zeros((self.num_joints), dtype=np.uint8)
    pts_crop = np.zeros((self.num_joints, 2), dtype=np.int32)
    for i in range(self.num_joints):
      pt = affine_transform(pts[i, :2], trans_output).astype(np.int32)
      if pt[0] >= 0 and pt[1] >=0 and pt[0] < self.opt.output_w \
        and pt[1] < self.opt.output_h:
        pts_crop[i] = pt
        out[i] = draw_gaussian(out[i], pt, self.opt.hm_gauss)
        reg_target[i] = pts[i, 2] / s[0] # assert not fit_short
        reg_ind[i] = pt[1] * self.opt.output_w * self.num_joints + \
                     pt[0] * self.num_joints + i # note transposed
        reg_mask[i] = 1
    
    meta = {'index' : self.idxs[index], 'center' : c, 'scale' : s,
            'gt_3d': gt_3d, 'pts_crop': pts_crop}

    ret = {'input': inp, 'target': out, 'meta': meta, 
           'reg_target': reg_target, 'reg_ind': reg_ind, 'reg_mask': reg_mask}
    
    return ret 
開發者ID:xingyizhou,項目名稱:pytorch-pose-hg-3d,代碼行數:46,代碼來源:h36m_iccv.py

示例2: __getitem__

# 需要導入模塊: from utils import image [as 別名]
# 或者: from utils.image import affine_transform [as 別名]
def __getitem__(self, index):
    img = self._load_image(index)
    _, pts, c, s = self._get_part_info(index)
    r = 0
    
    if self.split == 'train':
      sf = self.opt.scale
      rf = self.opt.rotate
      s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
      r = np.clip(np.random.randn()*rf, -rf*2, rf*2) \
          if np.random.random() <= 0.6 else 0
    s = min(s, max(img.shape[0], img.shape[1])) * 1.0
    s = np.array([s, s])
    s = adjust_aspect_ratio(s, self.aspect_ratio, self.opt.fit_short_side)

    flipped = (self.split == 'train' and np.random.random() < self.opt.flip)
    if flipped:
      img = img[:, ::-1, :]
      c[0] = img.shape[1] - 1 - c[0]
      pts[:, 0] = img.shape[1] - 1 - pts[:, 0]
      for e in self.shuffle_ref:
        pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()

    trans_input = get_affine_transform(
      c, s, r, [self.opt.input_h, self.opt.input_w])
    inp = cv2.warpAffine(img, trans_input, (self.opt.input_h, self.opt.input_w),
                         flags=cv2.INTER_LINEAR)
    inp = (inp.astype(np.float32) / 256. - self.mean) / self.std
    inp = inp.transpose(2, 0, 1)

    trans_output = get_affine_transform(
      c, s, r, [self.opt.output_h, self.opt.output_w])
    out = np.zeros((self.num_joints, self.opt.output_h, self.opt.output_w), 
                    dtype=np.float32)
    pts_crop = np.zeros((self.num_joints, 2), dtype=np.int32)
    for i in range(self.num_joints):
      if pts[i, 0] > 0 or pts[i, 1] > 0:
        pts_crop[i] = affine_transform(pts[i], trans_output)
        out[i] = draw_gaussian(out[i], pts_crop[i], self.opt.hm_gauss) 
    
    meta = {'index' : index, 'center' : c, 'scale' : s, \
            'pts_crop': pts_crop}
    return {'input': inp, 'target': out, 'meta': meta} 
開發者ID:xingyizhou,項目名稱:pytorch-pose-hg-3d,代碼行數:45,代碼來源:mpii.py


注:本文中的utils.image.affine_transform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。