当前位置: 首页>>代码示例>>Python>>正文


Python preprocess.get_transform方法代码示例

本文整理汇总了Python中preprocess.get_transform方法的典型用法代码示例。如果您正苦于以下问题:Python preprocess.get_transform方法的具体用法?Python preprocess.get_transform怎么用?Python preprocess.get_transform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在preprocess的用法示例。


在下文中一共展示了preprocess.get_transform方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_loader

# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import get_transform [as 别名]
def get_loader(self, force_update=False, override_settings=None, subset_indices=None):
        if force_update or self.regime.update(self.epoch, self.steps):
            setting = self.get_setting()
            if override_settings is not None:
                setting.update(override_settings)
            self._transform = get_transform(**setting['transform'])
            setting['data'].setdefault('transform', self._transform)
            self._data = get_dataset(**setting['data'])
            if subset_indices is not None:
                self._data = Subset(self._data, subset_indices)
            if setting['other'].get('distributed', False):
                setting['loader']['sampler'] = DistributedSampler(self._data)
                setting['loader']['shuffle'] = None
                # pin-memory currently broken for distributed
                setting['loader']['pin_memory'] = False
            self._sampler = setting['loader'].get('sampler', None)
            self._loader = torch.utils.data.DataLoader(
                self._data, **setting['loader'])
        return self._loader 
开发者ID:eladhoffer,项目名称:convNet.pytorch,代码行数:21,代码来源:data.py

示例2: __getitem__

# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import get_transform [as 别名]
def __getitem__(self, index):
        left = self.left[index]
        normal = self.normal[index]
        gt = self.gts[index]
        left_img = self.loader(left)
        w,h = left_img.size
        input1,mask1 = self.inloader(gt)
        sparse,mask = self.sloader(normal)

        th, tw = 256, 512
        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)

        left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))
        data_in1 = input1[y1:y1 + th, x1:x1 + tw,:]
        sparse_n = sparse[y1:y1 + th, x1:x1 + tw,:]
        mask = mask[y1:y1 + th, x1:x1 + tw,:]
        mask1 = mask1[y1:y1 + th, x1:x1 + tw, :]

        processed = preprocess.get_transform(augment=False)
        # processed = scale_crop2()
        left_img = processed(left_img)
        sparse_n = processed(sparse_n)
        return left_img,sparse_n,mask,mask1,data_in1 
开发者ID:JiaxiongQ,项目名称:DeepLiDAR,代码行数:26,代码来源:trainLoaderN.py

示例3: __getitem__

# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import get_transform [as 别名]
def __getitem__(self, index):
        up  = self.up[index]
        down = self.down[index]
        disp_name= self.disp_name[index]
        equi_info = self.equi_infos

        up_img = self.loader(up)
        down_img = self.loader(down)
        disp = self.dploader(disp_name)
        up_img = np.concatenate([np.array(up_img), equi_info],2)
        down_img = np.concatenate([np.array(down_img), equi_info],2)

        if self.training:  
            h, w = up_img.shape[0], up_img.shape[1]
            th, tw = 512, 256

            # vertical remaining cropping 
            x1 = random.randint(0, w - tw)
            y1 = random.randint(0, h - th)
            up_img = up_img[y1:y1+th, x1:x1+tw, :]
            down_img = down_img[y1:y1+th, x1:x1+tw, :]
            disp = np.ascontiguousarray(disp,dtype=np.float32)
            disp = disp[y1:y1 + th, x1:x1 + tw]

            # preprocessing          
            processed = preprocess.get_transform(augment=False)  
            up_img   = processed(up_img)
            down_img  = processed(down_img)
          
            return up_img, down_img, disp
        else:
            disp = np.ascontiguousarray(disp,dtype=np.float32)
          
            processed = preprocess.get_transform(augment=False)  
            up_img       = processed(up_img)
            down_img      = processed(down_img)
          
            return up_img, down_img, disp 
开发者ID:albert100121,项目名称:360SD-Net,代码行数:40,代码来源:RGB_Loader.py

示例4: __getitem__

# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import get_transform [as 别名]
def __getitem__(self, index):
        left = self.left[index]
        input = self.input[index]
        sparse = self.sparse[index]
        left_img = self.loader(left)

        index_str = self.left[index].split('/')[-4][0:10]
        params_t = INSTICS[index_str]
        params = np.ones((256,512,3),dtype=np.float32)
        params[:, :, 0] = params[:,:,0] * params_t[0]
        params[:, :, 1] = params[:, :, 1] * params_t[1]
        params[:, :, 2] = params[:, :, 2] * params_t[2]

        h,w,c= left_img.shape
        input1 = self.inloader(input)
        sparse,mask = self.sloader(sparse)

        th, tw = 256,512
        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        mask = np.reshape(mask, [sparse.shape[0], sparse.shape[1], 1]).astype(np.float32)
        params = np.reshape(params, [256, 512, 3]).astype(np.float32)

        left_img = left_img[y1:y1 + th, x1:x1 + tw, :]
        data_in1 = input1[y1:y1 + th, x1:x1 + tw,:]
        sparse = sparse[y1:y1 + th, x1:x1 + tw, :]
        mask = mask[y1:y1 + th, x1:x1 + tw,:]
        processed = preprocess.get_transform(augment=False)

        left_img = processed(left_img)
        sparse = processed(sparse)
        mask = processed(mask)

        return left_img,data_in1,sparse,mask,params 
开发者ID:JiaxiongQ,项目名称:DeepLiDAR,代码行数:36,代码来源:trainLoader.py

示例5: __getitem__

# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import get_transform [as 别名]
def __getitem__(self, index):
        left  = self.left[index]
        right = self.right[index]
        disp_L= self.disp_L[index]

        left_img = self.loader(left)
        right_img = self.loader(right)
        dataL = self.dploader(disp_L)


        if self.training:  
           w, h = left_img.size
           th, tw = 256, 512
 
           x1 = random.randint(0, w - tw)
           y1 = random.randint(0, h - th)

           left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))
           right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))

           dataL = np.ascontiguousarray(dataL,dtype=np.float32)/256
           dataL = dataL[y1:y1 + th, x1:x1 + tw]

           processed = preprocess.get_transform(augment=False)  
           left_img   = processed(left_img)
           right_img  = processed(right_img)

           return left_img, right_img, dataL
        else:
           w, h = left_img.size

           left_img = left_img.crop((w-1232, h-368, w, h))
           right_img = right_img.crop((w-1232, h-368, w, h))
           w1, h1 = left_img.size

           dataL = dataL.crop((w-1232, h-368, w, h))
           dataL = np.ascontiguousarray(dataL,dtype=np.float32)/256

           processed = preprocess.get_transform(augment=False)  
           left_img       = processed(left_img)
           right_img      = processed(right_img)

           return left_img, right_img, dataL 
开发者ID:JiaRenChang,项目名称:PSMNet,代码行数:45,代码来源:KITTILoader.py


注:本文中的preprocess.get_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。