当前位置: 首页>>代码示例>>Python>>正文


Python image.get_affine_transform方法代码示例

本文整理汇总了Python中utils.image.get_affine_transform方法的典型用法代码示例。如果您正苦于以下问题:Python image.get_affine_transform方法的具体用法?Python image.get_affine_transform怎么用?Python image.get_affine_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.image的用法示例。


在下文中一共展示了image.get_affine_transform方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(image, cfg=None, scale=1, meta=None):
    height, width = image.shape[0:2]
    new_height = int(height * scale)
    new_width  = int(width * scale)
    mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3)
    std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3)

    inp_height, inp_width = cfg.MODEL.INPUT_H, cfg.MODEL.INPUT_W
    c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
    s = max(height, width) * 1.0


    trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
    resized_image = cv2.resize(image, (new_width, new_height))
    inp_image = cv2.warpAffine(
      resized_image, trans_input, (inp_width, inp_height),
      flags=cv2.INTER_LINEAR)
    inp_image = ((inp_image / 255. - mean) / std).astype(np.float32)

    images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
    images = torch.from_numpy(images)
    meta = {'c': c, 's': s, 
            'out_height': inp_height // cfg.MODEL.DOWN_RATIO, 
            'out_width': inp_width // cfg.MODEL.DOWN_RATIO}
    return images, meta 
开发者ID:tensorboy,项目名称:centerpose,代码行数:27,代码来源:convert2onnx.py

示例2: preprocess

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def preprocess(self, image, scale=1, meta=None):
        height, width = image.shape[0:2]
        new_height = int(height * scale)
        new_width = int(width * scale)
        mean = np.array(self.cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3)
        std = np.array(self.cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3)

        inp_height, inp_width = self.cfg.MODEL.INPUT_H, self.cfg.MODEL.INPUT_W
        c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
        s = max(height, width) * 1.0

        trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
        resized_image = cv2.resize(image, (new_width, new_height))
        inp_image = cv2.warpAffine(
            resized_image, trans_input, (inp_width, inp_height),
            flags=cv2.INTER_LINEAR)
        inp_image = ((inp_image / 255. - mean) / std).astype(np.float32)

        images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)

        meta = {'c': c, 's': s,
                'out_height': inp_height // self.cfg.MODEL.DOWN_RATIO,
                'out_width': inp_width // self.cfg.MODEL.DOWN_RATIO}

        return np.ascontiguousarray(images), meta 
开发者ID:tensorboy,项目名称:centerpose,代码行数:27,代码来源:centernet_tensorrt_engine.py

示例3: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(self, image, scale, calib=None):
        height, width = image.shape[0:2]

        inp_height, inp_width = self.opt.input_h, self.opt.input_w
        c = np.array([width / 2, height / 2], dtype=np.float32)
        if self.opt.keep_res:
            s = np.array([inp_width, inp_height], dtype=np.int32)
        else:
            s = np.array([width, height], dtype=np.int32)

        trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
        resized_image = image #cv2.resize(image, (width, height))
        inp_image = cv2.warpAffine(resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR)
        inp_image = (inp_image.astype(np.float32) / 255.)
        inp_image = (inp_image - self.mean) / self.std
        images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
        calib = np.array(calib, dtype=np.float32) if calib is not None else self.calib
        images = nd.array(images)
        meta = {'c': c, 's': s,
                'out_height': inp_height // self.opt.down_ratio,
                'out_width': inp_width // self.opt.down_ratio,
                'calib': calib}
        return images, meta 
开发者ID:Guanghan,项目名称:mxnet-centernet,代码行数:25,代码来源:ddd_detector.py

示例4: demo_image

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def demo_image(image, model, opt):
  s = max(image.shape[0], image.shape[1]) * 1.0
  c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
  trans_input = get_affine_transform(
      c, s, 0, [opt.input_w, opt.input_h])
  inp = cv2.warpAffine(image, trans_input, (opt.input_w, opt.input_h),
                         flags=cv2.INTER_LINEAR)
  inp = (inp / 255. - mean) / std
  inp = inp.transpose(2, 0, 1)[np.newaxis, ...].astype(np.float32)
  inp = torch.from_numpy(inp).to(opt.device)
  out = model(inp)[-1]
  pred = get_preds(out['hm'].detach().cpu().numpy())[0]
  pred = transform_preds(pred, c, s, (opt.output_w, opt.output_h))
  pred_3d = get_preds_3d(out['hm'].detach().cpu().numpy(), 
                         out['depth'].detach().cpu().numpy())[0]
  
  debugger = Debugger()
  debugger.add_img(image)
  debugger.add_point_2d(pred, (255, 0, 0))
  debugger.add_point_3d(pred_3d, 'b')
  debugger.show_all_imgs(pause=False)
  debugger.show_3d() 
开发者ID:xingyizhou,项目名称:pytorch-pose-hg-3d,代码行数:24,代码来源:demo.py

示例5: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(self, image, scale, meta=None):
        height, width = image.shape[0:2]

        new_height = int(height * scale)
        new_width  = int(width * scale)
        if self.cfg.TEST.FIX_RES:
            inp_height, inp_width = self.cfg.MODEL.INPUT_H, self.cfg.MODEL.INPUT_W
            c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
            s = max(height, width) * 1.0
        else:
            inp_height = (new_height | self.cfg.MODEL.PAD) + 1
            inp_width = (new_width | self.cfg.MODEL.PAD) + 1
            c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
            s = np.array([inp_width, inp_height], dtype=np.float32)

        trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
        resized_image = cv2.resize(image, (new_width, new_height))
        inp_image = cv2.warpAffine(
            resized_image, trans_input, (inp_width, inp_height),
            flags=cv2.INTER_LINEAR)

        inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)

        images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
        if self.cfg.TEST.FLIP_TEST:
            images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
        images = torch.from_numpy(images)
        meta = {'c': c, 's': s, 
                'out_height': inp_height // self.cfg.MODEL.DOWN_RATIO, 
                'out_width': inp_width // self.cfg.MODEL.DOWN_RATIO}
        return images, meta 
开发者ID:tensorboy,项目名称:centerpose,代码行数:33,代码来源:base_detector.py

示例6: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(self, image, scale, meta=None):
        height, width = image.shape[0:2]
        new_height = int(height * scale)
        new_width  = int(width * scale)
        if self.opt.fix_res:
            inp_height, inp_width = self.opt.input_h, self.opt.input_w
            c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
            s = max(height, width) * 1.0
        else:
            inp_height = (new_height | self.opt.pad) + 1
            inp_width = (new_width | self.opt.pad) + 1
            c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
            s = np.array([inp_width, inp_height], dtype=np.float32)

        trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
        resized_image = cv2.resize(image, (new_width, new_height))
        inp_image = cv2.warpAffine(
            resized_image, trans_input, (inp_width, inp_height),
            flags=cv2.INTER_LINEAR)
        inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)

        images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
        if self.opt.flip_test:
            images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
        images = nd.array(images)
        meta = {'c': c, 's': s,
                'out_height': inp_height // self.opt.down_ratio,
                'out_width': inp_width // self.opt.down_ratio}
        return images, meta 
开发者ID:Guanghan,项目名称:mxnet-centernet,代码行数:31,代码来源:base_detector.py

示例7: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(self, image, scale, calib=None):
    height, width = image.shape[0:2]
    
    inp_height, inp_width = self.opt.input_h, self.opt.input_w
    c = np.array([width / 2, height / 2], dtype=np.float32)
    if self.opt.keep_res:
      s = np.array([inp_width, inp_height], dtype=np.int32)
    else:
      s = np.array([width, height], dtype=np.int32)

    trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
    resized_image = image #cv2.resize(image, (width, height))
    inp_image = cv2.warpAffine(
      resized_image, trans_input, (inp_width, inp_height),
      flags=cv2.INTER_LINEAR)
    inp_image = (inp_image.astype(np.float32) / 255.)
    inp_image = (inp_image - self.mean) / self.std
    images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
    calib = np.array(calib, dtype=np.float32) if calib is not None \
            else self.calib
    images = torch.from_numpy(images)
    meta = {'c': c, 's': s, 
            'out_height': inp_height // self.opt.down_ratio, 
            'out_width': inp_width // self.opt.down_ratio,
            'calib': calib}
    return images, meta 
开发者ID:CaoWGG,项目名称:CenterNet-CondInst,代码行数:28,代码来源:ddd.py

示例8: pre_process

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def pre_process(self, image, scale, meta=None):
    height, width = image.shape[0:2]
    new_height = int(height * scale)
    new_width  = int(width * scale)
    if self.opt.fix_res:
      inp_height, inp_width = self.opt.input_h, self.opt.input_w
      c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
      s = max(height, width) * 1.0
    else:
      inp_height = (new_height | self.opt.pad) + 1
      inp_width = (new_width | self.opt.pad) + 1
      c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
      s = np.array([inp_width, inp_height], dtype=np.float32)

    trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
    resized_image = cv2.resize(image, (new_width, new_height))
    inp_image = cv2.warpAffine(
      resized_image, trans_input, (inp_width, inp_height),
      flags=cv2.INTER_LINEAR)
    inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)

    images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
    if self.opt.flip_test:
      images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
    images = torch.from_numpy(images)
    meta = {'c': c, 's': s, 
            'out_height': inp_height // self.opt.down_ratio, 
            'out_width': inp_width // self.opt.down_ratio}
    return images, meta 
开发者ID:CaoWGG,项目名称:CenterNet-CondInst,代码行数:31,代码来源:base_detector.py

示例9: __getitem__

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def __getitem__(self, index):
    if index < 10 and self.split == 'train':
      self.idxs = np.random.choice(
        self.num_samples, self.num_samples, replace=False)
    img = self._load_image(index)
    gt_3d, pts, c, s = self._get_part_info(index)
    
    r = 0
    s = np.array([s, s])
    s = adjust_aspect_ratio(s, self.aspect_ratio, self.opt.fit_short_side)
    
    trans_input = get_affine_transform(
      c, s, r, [self.opt.input_h, self.opt.input_w])
    inp = cv2.warpAffine(img, trans_input, (self.opt.input_h, self.opt.input_w),
                         flags=cv2.INTER_LINEAR)
    inp = (inp.astype(np.float32) / 256. - self.mean) / self.std
    inp = inp.transpose(2, 0, 1)

    trans_output = get_affine_transform(
      c, s, r, [self.opt.output_h, self.opt.output_w])
    out = np.zeros((self.num_joints, self.opt.output_h, self.opt.output_w), 
                    dtype=np.float32)
    reg_target = np.zeros((self.num_joints, 1), dtype=np.float32)
    reg_ind = np.zeros((self.num_joints), dtype=np.int64)
    reg_mask = np.zeros((self.num_joints), dtype=np.uint8)
    pts_crop = np.zeros((self.num_joints, 2), dtype=np.int32)
    for i in range(self.num_joints):
      pt = affine_transform(pts[i, :2], trans_output).astype(np.int32)
      if pt[0] >= 0 and pt[1] >=0 and pt[0] < self.opt.output_w \
        and pt[1] < self.opt.output_h:
        pts_crop[i] = pt
        out[i] = draw_gaussian(out[i], pt, self.opt.hm_gauss)
        reg_target[i] = pts[i, 2] / s[0] # assert not fit_short
        reg_ind[i] = pt[1] * self.opt.output_w * self.num_joints + \
                     pt[0] * self.num_joints + i # note transposed
        reg_mask[i] = 1
    
    meta = {'index' : self.idxs[index], 'center' : c, 'scale' : s,
            'gt_3d': gt_3d, 'pts_crop': pts_crop}

    ret = {'input': inp, 'target': out, 'meta': meta, 
           'reg_target': reg_target, 'reg_ind': reg_ind, 'reg_mask': reg_mask}
    
    return ret 
开发者ID:xingyizhou,项目名称:pytorch-pose-hg-3d,代码行数:46,代码来源:h36m_iccv.py

示例10: __getitem__

# 需要导入模块: from utils import image [as 别名]
# 或者: from utils.image import get_affine_transform [as 别名]
def __getitem__(self, index):
    img = self._load_image(index)
    _, pts, c, s = self._get_part_info(index)
    r = 0
    
    if self.split == 'train':
      sf = self.opt.scale
      rf = self.opt.rotate
      s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
      r = np.clip(np.random.randn()*rf, -rf*2, rf*2) \
          if np.random.random() <= 0.6 else 0
    s = min(s, max(img.shape[0], img.shape[1])) * 1.0
    s = np.array([s, s])
    s = adjust_aspect_ratio(s, self.aspect_ratio, self.opt.fit_short_side)

    flipped = (self.split == 'train' and np.random.random() < self.opt.flip)
    if flipped:
      img = img[:, ::-1, :]
      c[0] = img.shape[1] - 1 - c[0]
      pts[:, 0] = img.shape[1] - 1 - pts[:, 0]
      for e in self.shuffle_ref:
        pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()

    trans_input = get_affine_transform(
      c, s, r, [self.opt.input_h, self.opt.input_w])
    inp = cv2.warpAffine(img, trans_input, (self.opt.input_h, self.opt.input_w),
                         flags=cv2.INTER_LINEAR)
    inp = (inp.astype(np.float32) / 256. - self.mean) / self.std
    inp = inp.transpose(2, 0, 1)

    trans_output = get_affine_transform(
      c, s, r, [self.opt.output_h, self.opt.output_w])
    out = np.zeros((self.num_joints, self.opt.output_h, self.opt.output_w), 
                    dtype=np.float32)
    pts_crop = np.zeros((self.num_joints, 2), dtype=np.int32)
    for i in range(self.num_joints):
      if pts[i, 0] > 0 or pts[i, 1] > 0:
        pts_crop[i] = affine_transform(pts[i], trans_output)
        out[i] = draw_gaussian(out[i], pts_crop[i], self.opt.hm_gauss) 
    
    meta = {'index' : index, 'center' : c, 'scale' : s, \
            'pts_crop': pts_crop}
    return {'input': inp, 'target': out, 'meta': meta} 
开发者ID:xingyizhou,项目名称:pytorch-pose-hg-3d,代码行数:45,代码来源:mpii.py


注:本文中的utils.image.get_affine_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。