當前位置: 首頁>>代碼示例>>Python>>正文


Python transforms.get_affine_transform方法代碼示例

本文整理匯總了Python中utils.transforms.get_affine_transform方法的典型用法代碼示例。如果您正苦於以下問題:Python transforms.get_affine_transform方法的具體用法?Python transforms.get_affine_transform怎麽用?Python transforms.get_affine_transform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils.transforms的用法示例。


在下文中一共展示了transforms.get_affine_transform方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_image_info

# 需要導入模塊: from utils import transforms [as 別名]
# 或者: from utils.transforms import get_affine_transform [as 別名]
def get_image_info(self,index):

        info = self.gt_db[index]
        imgpath = info['image']
        image = cv2.imread(imgpath)[:,:,::-1]
        joints = info['joints_3d']
        joints_vis = info['joints_3d_vis'][:, 0]

        c = info['center']
        s = info['scale']
        r = 0
        if self.train_flag:
            sf = self.scale_factor
            rf = self.rotation_factor
            s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
            r = np.clip(np.random.randn()*rf, -rf*2, rf*2) \
                if random.random() <= 0.6 else 0

        trans = get_affine_transform(c, s, r, (self.crop_size, self.crop_size))
        dst_image = cv2.warpAffine(image,trans,
            (self.crop_size, self.crop_size),flags=cv2.INTER_LINEAR)

        for i in range(self.num_joints):
            if joints_vis[i] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
        kp2d = np.concatenate([joints[:,0:2],joints_vis[:,None]],1)[self.mpii_2_lsp14]

        result_dir = '{}/{}'.format(self.save_dir,os.path.basename(imgpath))
        metas = ('mpii',imgpath,result_dir,self.empty_kp3d,self.empty_kp3d,self.empty_param,self.empty_gr)

        return dst_image, kp2d, self.const_box, metas 
開發者ID:JDAI-CV,項目名稱:DSD-SATN,代碼行數:33,代碼來源:mpii.py

示例2: __getitem__

# 需要導入模塊: from utils import transforms [as 別名]
# 或者: from utils.transforms import get_affine_transform [as 別名]
def __getitem__(self, idx):
        db_rec = copy.deepcopy(self.db[idx])

        image_dir = 'images.zip@' if self.data_format == 'zip' else ''
        image_file = osp.join(self.root, db_rec['source'], image_dir, 'images',
                              db_rec['image'])
        if self.data_format == 'zip':
            from utils import zipreader
            data_numpy = zipreader.imread(
                image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
        else:
            data_numpy = cv2.imread(
                image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)

        joints = db_rec['joints_2d'].copy()
        joints_vis = db_rec['joints_vis'].copy()

        center = np.array(db_rec['center']).copy()
        scale = np.array(db_rec['scale']).copy()
        rotation = 0

        if self.is_train:
            sf = self.scale_factor
            rf = self.rotation_factor
            scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
            rotation = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
                if random.random() <= 0.6 else 0

        trans = get_affine_transform(center, scale, rotation, self.image_size)
        input = cv2.warpAffine(
            data_numpy,
            trans, (int(self.image_size[0]), int(self.image_size[1])),
            flags=cv2.INTER_LINEAR)

        if self.transform:
            input = self.transform(input)

        for i in range(self.num_joints):
            if joints_vis[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
                if (np.min(joints[i, :2]) < 0 or
                        joints[i, 0] >= self.image_size[0] or
                        joints[i, 1] >= self.image_size[1]):
                    joints_vis[i, :] = 0

        target, target_weight = self.generate_target(joints, joints_vis)

        target = torch.from_numpy(target)
        target_weight = torch.from_numpy(target_weight)

        meta = {
            'scale': scale,
            'center': center,
            'rotation': rotation,
            'joints_2d': db_rec['joints_2d'],
            'joints_2d_transformed': joints,
            'joints_vis': joints_vis,
            'source': db_rec['source']
        }
        return input, target, target_weight, meta 
開發者ID:microsoft,項目名稱:multiview-human-pose-estimation-pytorch,代碼行數:62,代碼來源:joints_dataset.py

示例3: compute_unary_term

# 需要導入模塊: from utils import transforms [as 別名]
# 或者: from utils.transforms import get_affine_transform [as 別名]
def compute_unary_term(heatmap, grid, bbox2D, cam, imgSize):
    """
    Args:
        heatmap: array of size (n * k * h * w)
                -n: number of views,  -k: number of joints
                -h: heatmap height,   -w: heatmap width
        grid: list of k ndarrays of size (nbins * 3)
                    -k: number of joints; 1 when the grid is shared in PSM
                    -nbins: number of bins in the grid
        bbox2D: bounding box on which heatmap is computed
    Returns:
        unary_of_all_joints: a list of ndarray of size nbins
    """

    n, k = heatmap.shape[0], heatmap.shape[1]
    h, w = heatmap.shape[2], heatmap.shape[3]
    nbins = grid[0].shape[0]

    unary_of_all_joints = []
    for j in range(k):
        unary = np.zeros(nbins)
        for c in range(n):

            grid_id = 0 if len(grid) == 1 else j
            xy = cameras.project_pose(grid[grid_id], cam[c])
            trans = get_affine_transform(bbox2D[c]['center'],
                                         bbox2D[c]['scale'], 0, imgSize)

            xy = affine_transform_pts(xy, trans) * np.array([w, h]) / imgSize
            # for i in range(nbins):
            #     xy[i] = affine_transform(xy[i], trans) * np.array([w, h]) / imgSize

            hmap = heatmap[c, j, :, :]
            point_x, point_y = np.arange(hmap.shape[0]), np.arange(
                hmap.shape[1])
            rgi = RegularGridInterpolator(
                points=[point_x, point_y],
                values=hmap.transpose(),
                bounds_error=False,
                fill_value=0)
            score = rgi(xy)
            unary = unary + np.reshape(score, newshape=unary.shape)
        unary_of_all_joints.append(unary)

    return unary_of_all_joints 
開發者ID:microsoft,項目名稱:multiview-human-pose-estimation-pytorch,代碼行數:47,代碼來源:pictorial.py


注:本文中的utils.transforms.get_affine_transform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。