当前位置: 首页>>代码示例>>Python>>正文


Python transforms.get_affine_transform方法代码示例

本文整理汇总了Python中utils.transforms.get_affine_transform方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.get_affine_transform方法的具体用法?Python transforms.get_affine_transform怎么用?Python transforms.get_affine_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.transforms的用法示例。


在下文中一共展示了transforms.get_affine_transform方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_image_info

# 需要导入模块: from utils import transforms [as 别名]
# 或者: from utils.transforms import get_affine_transform [as 别名]
def get_image_info(self,index):

        info = self.gt_db[index]
        imgpath = info['image']
        image = cv2.imread(imgpath)[:,:,::-1]
        joints = info['joints_3d']
        joints_vis = info['joints_3d_vis'][:, 0]

        c = info['center']
        s = info['scale']
        r = 0
        if self.train_flag:
            sf = self.scale_factor
            rf = self.rotation_factor
            s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
            r = np.clip(np.random.randn()*rf, -rf*2, rf*2) \
                if random.random() <= 0.6 else 0

        trans = get_affine_transform(c, s, r, (self.crop_size, self.crop_size))
        dst_image = cv2.warpAffine(image,trans,
            (self.crop_size, self.crop_size),flags=cv2.INTER_LINEAR)

        for i in range(self.num_joints):
            if joints_vis[i] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
        kp2d = np.concatenate([joints[:,0:2],joints_vis[:,None]],1)[self.mpii_2_lsp14]

        result_dir = '{}/{}'.format(self.save_dir,os.path.basename(imgpath))
        metas = ('mpii',imgpath,result_dir,self.empty_kp3d,self.empty_kp3d,self.empty_param,self.empty_gr)

        return dst_image, kp2d, self.const_box, metas 
开发者ID:JDAI-CV,项目名称:DSD-SATN,代码行数:33,代码来源:mpii.py

示例2: __getitem__

# 需要导入模块: from utils import transforms [as 别名]
# 或者: from utils.transforms import get_affine_transform [as 别名]
def __getitem__(self, idx):
        db_rec = copy.deepcopy(self.db[idx])

        image_dir = 'images.zip@' if self.data_format == 'zip' else ''
        image_file = osp.join(self.root, db_rec['source'], image_dir, 'images',
                              db_rec['image'])
        if self.data_format == 'zip':
            from utils import zipreader
            data_numpy = zipreader.imread(
                image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
        else:
            data_numpy = cv2.imread(
                image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)

        joints = db_rec['joints_2d'].copy()
        joints_vis = db_rec['joints_vis'].copy()

        center = np.array(db_rec['center']).copy()
        scale = np.array(db_rec['scale']).copy()
        rotation = 0

        if self.is_train:
            sf = self.scale_factor
            rf = self.rotation_factor
            scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
            rotation = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
                if random.random() <= 0.6 else 0

        trans = get_affine_transform(center, scale, rotation, self.image_size)
        input = cv2.warpAffine(
            data_numpy,
            trans, (int(self.image_size[0]), int(self.image_size[1])),
            flags=cv2.INTER_LINEAR)

        if self.transform:
            input = self.transform(input)

        for i in range(self.num_joints):
            if joints_vis[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
                if (np.min(joints[i, :2]) < 0 or
                        joints[i, 0] >= self.image_size[0] or
                        joints[i, 1] >= self.image_size[1]):
                    joints_vis[i, :] = 0

        target, target_weight = self.generate_target(joints, joints_vis)

        target = torch.from_numpy(target)
        target_weight = torch.from_numpy(target_weight)

        meta = {
            'scale': scale,
            'center': center,
            'rotation': rotation,
            'joints_2d': db_rec['joints_2d'],
            'joints_2d_transformed': joints,
            'joints_vis': joints_vis,
            'source': db_rec['source']
        }
        return input, target, target_weight, meta 
开发者ID:microsoft,项目名称:multiview-human-pose-estimation-pytorch,代码行数:62,代码来源:joints_dataset.py

示例3: compute_unary_term

# 需要导入模块: from utils import transforms [as 别名]
# 或者: from utils.transforms import get_affine_transform [as 别名]
def compute_unary_term(heatmap, grid, bbox2D, cam, imgSize):
    """
    Args:
        heatmap: array of size (n * k * h * w)
                -n: number of views,  -k: number of joints
                -h: heatmap height,   -w: heatmap width
        grid: list of k ndarrays of size (nbins * 3)
                    -k: number of joints; 1 when the grid is shared in PSM
                    -nbins: number of bins in the grid
        bbox2D: bounding box on which heatmap is computed
    Returns:
        unary_of_all_joints: a list of ndarray of size nbins
    """

    n, k = heatmap.shape[0], heatmap.shape[1]
    h, w = heatmap.shape[2], heatmap.shape[3]
    nbins = grid[0].shape[0]

    unary_of_all_joints = []
    for j in range(k):
        unary = np.zeros(nbins)
        for c in range(n):

            grid_id = 0 if len(grid) == 1 else j
            xy = cameras.project_pose(grid[grid_id], cam[c])
            trans = get_affine_transform(bbox2D[c]['center'],
                                         bbox2D[c]['scale'], 0, imgSize)

            xy = affine_transform_pts(xy, trans) * np.array([w, h]) / imgSize
            # for i in range(nbins):
            #     xy[i] = affine_transform(xy[i], trans) * np.array([w, h]) / imgSize

            hmap = heatmap[c, j, :, :]
            point_x, point_y = np.arange(hmap.shape[0]), np.arange(
                hmap.shape[1])
            rgi = RegularGridInterpolator(
                points=[point_x, point_y],
                values=hmap.transpose(),
                bounds_error=False,
                fill_value=0)
            score = rgi(xy)
            unary = unary + np.reshape(score, newshape=unary.shape)
        unary_of_all_joints.append(unary)

    return unary_of_all_joints 
开发者ID:microsoft,项目名称:multiview-human-pose-estimation-pytorch,代码行数:47,代码来源:pictorial.py


注:本文中的utils.transforms.get_affine_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。