当前位置: 首页>>代码示例>>Python>>正文


Python cv2.IMREAD_ANYDEPTH属性代码示例

本文整理汇总了Python中cv2.IMREAD_ANYDEPTH属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.IMREAD_ANYDEPTH属性的具体用法?Python cv2.IMREAD_ANYDEPTH怎么用?Python cv2.IMREAD_ANYDEPTH使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.IMREAD_ANYDEPTH属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_gated_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def read_gated_image(base_dir, gta_pass, img_id, data_type, num_bits=10, scale_images=False,
                     scaled_img_width=None, scaled_img_height=None,
                     normalize_images=False):
    gated_imgs = []
    normalizer = 2 ** num_bits - 1.

    for gate_id in range(3):
        gate_dir = os.path.join(base_dir, gta_pass, 'gated%d_10bit' % gate_id)
        img = cv2.imread(os.path.join(gate_dir, img_id + '.png'), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if data_type == 'real':
            img = img[crop_size:(img.shape[0] - crop_size), crop_size:(img.shape[1] - crop_size)]
            img = img.copy()
            img[img > 2 ** 10 - 1] = normalizer
        img = np.float32(img / normalizer)
        gated_imgs.append(np.expand_dims(img, axis=2))

    img = np.concatenate(gated_imgs, axis=2)
    if normalize_images:
        mean = np.mean(img, axis=2, keepdims=True)
        std = np.std(img, axis=2, keepdims=True)
        img = (img - mean) / (std + np.finfo(float).eps)
    if scale_images:
        img = cv2.resize(img, dsize=(scaled_img_width, scaled_img_height), interpolation=cv2.INTER_AREA)
    return np.expand_dims(img, axis=0) 
开发者ID:gruberto,项目名称:Gated2Depth,代码行数:26,代码来源:dataset_util.py

示例2: imread

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def imread(filename):
    """Reads an image file from disk into a Numpy Array (OpenCV view).

    Args:
        filename (str): Name of pfm image file.
    """
    filename = process(filename)
    ext = os.path.splitext(filename)[1]
    if ext.lower() == '.pfm':
        return load_pfm(filename)
    elif ext.lower() == '.dng':
        return load_dng(filename)
    else:
        loaded = cv2.imread(filename, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)
        if loaded is None:
            raise IOError('Could not read {0}'.format(filename))
        else:
            return loaded 
开发者ID:dmarnerides,项目名称:pydlt,代码行数:20,代码来源:io.py

示例3: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def __getitem__(self, index):
        im_name = self.files[self.split][index]                # 1/824_8-cp_Page_0503-7Nw0001
        im_path = pjoin(self.root, 'img',  im_name + '.png')  
        lbl_path=pjoin(self.root, 'wc', im_name + '.exr')
        im = m.imread(im_path,mode='RGB')
        im = np.array(im, dtype=np.uint8)
        lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        lbl = np.array(lbl, dtype=np.float)
        if 'val' in self.split:
            im, lbl=tight_crop(im/255.0,lbl)
        if self.augmentations:          #this is for training, default false for validation\
            tex_id=random.randint(0,len(self.txpths)-1)
            txpth=self.txpths[tex_id] 
            tex=cv2.imread(os.path.join(self.root[:-7],txpth)).astype(np.uint8)
            bg=cv2.resize(tex,self.img_size,interpolation=cv2.INTER_NEAREST)
            im,lbl=data_aug(im,lbl,bg)
        if self.is_transform:
            im, lbl = self.transform(im, lbl)
        return im, lbl 
开发者ID:cvlab-stonybrook,项目名称:DewarpNet,代码行数:21,代码来源:doc3dwc_loader.py

示例4: run_tiff

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def run_tiff(file_path,progress_percent):
    progress_percent
    angle = 0
    ret,video=cv2.imreadmulti(file_path,flags=cv2.IMREAD_ANYDEPTH)
    video_labeled,table=[],[]
    idx=1
    for frame in video[:]:
        img_label,angle_new=process(frame)
        angle_new = float('{0:.2f}'.format(angle_new))
        rotation=cal_rotation(angle,angle_new)
        rotation = float('{0:.2f}'.format(rotation))
        table.append([angle,rotation,angle_new])
        video_labeled.append(img_label)
        angle=angle_new
        idx+=1
        progress_percent['value']=idx/len(video)*100
        # print(table[-1])
        # cv2.imshow('img',cv2.resize(img_label,(512,512)))
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        #     break
    return video_labeled,table 
开发者ID:deepdiy,项目名称:deepdiy,代码行数:23,代码来源:process.py

示例5: read_tiff

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def read_tiff(self,*args):
		ret,self.stack = cv2.imreadmulti(self.path,flags=cv2.IMREAD_ANYDEPTH)
		self.frame_count = len(self.stack) 
开发者ID:deepdiy,项目名称:deepdiy,代码行数:5,代码来源:image_stack_capture.py

示例6: imread_color

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def imread_color(self, path):
        img = cv.imread(path, cv.IMREAD_COLOR | cv.IMREAD_ANYDEPTH)/255.
        b, g, r = cv.split(img)
        img_rgb = cv.merge([r, g, b])
        return img_rgb 
开发者ID:Lvfeifan,项目名称:MBLLEN,代码行数:7,代码来源:data_load.py

示例7: imread_color

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def imread_color(path):
    img = cv.imread(path, cv.IMREAD_COLOR | cv.IMREAD_ANYDEPTH) / 255.
    b, g, r = cv.split(img)
    img_rgb = cv.merge([r, g, b])
    return img_rgb
    # return scipy.misc.imread(path, mode='RGB').astype(np.float) / 255. 
开发者ID:Lvfeifan,项目名称:MBLLEN,代码行数:8,代码来源:utls.py

示例8: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def main():

    input_depth_dir = os.path.expanduser(
        '~/Kitti/depth/val_selection_cropped/velodyne_raw')

    images_to_use = sorted(glob.glob(input_depth_dir + '/*'))

    # Process depth images
    num_images = len(images_to_use)
    all_sparsities = np.zeros(num_images)

    for i in range(num_images):

        # Print progress
        sys.stdout.write('\rProcessing index {} / {}'.format(i, num_images - 1))
        sys.stdout.flush()

        depth_image_path = images_to_use[i]

        # Load depth from image
        depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)

        # Divide by 256
        depth_map = depth_image / 256.0

        num_valid_pixels = len(np.where(depth_map > 0.0)[0])
        num_pixels = depth_image.shape[0] * depth_image.shape[1]

        sparsity = num_valid_pixels / (num_pixels * 2/3)
        all_sparsities[i] = sparsity

    print('')
    print('Sparsity')
    print('Min:   ', np.amin(all_sparsities))
    print('Max:   ', np.amax(all_sparsities))
    print('Mean:  ', np.mean(all_sparsities))
    print('Median:  ', np.median(all_sparsities))

    plt.hist(all_sparsities, bins=20)
    plt.show() 
开发者ID:kujason,项目名称:ip_basic,代码行数:42,代码来源:dataset_sparsity.py

示例9: read_image_scale

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def read_image_scale(image_path, scale):
    img = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)
    if img is None:
        print("not finding {}".format(image_path))
    img = img.astype(np.float32) / scale
    return img 
开发者ID:xy-guo,项目名称:Learning-Monocular-Depth-by-Stereo,代码行数:8,代码来源:distill_dataset.py

示例10: process_frame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def process_frame(image_path: str) -> Tuple[np.ndarray, np.ndarray, str, str]:
    """
    fix given frame
    :param image_path: path to frame which should be fixed
    :return: fixed frame
    """
    seq_no = image_path.split('/')[-3]
    img_no = image_path.split('/')[-1].split('.')[0]

    depth_path = f"{depth_root}/{seq_no}/clone/{img_no}.png"
    semantic_path = f"{labels_root}/{seq_no}/clone/{img_no}.png"

    # BGR -> RGB
    rgb_map = cv2.imread(image_path)[:, :, (2, 1, 0)]

    # convert centimeters to meters
    depth_map = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) / 100.

    # semantic image
    semantic_map = cv2.imread(semantic_path)[:, :, (2, 1, 0)]
    label_map = np.apply_along_axis(lambda r: rgb2label[tuple(r)], 2, semantic_map)

    # backprojection to camera space
    x3 = (xv - center_x) / focal_x * depth_map
    y3 = (yv - center_y) / focal_y * depth_map

    erg = np.stack((depth_map, -x3, -y3), axis=-1).reshape((-1, 3))
    erg = np.hstack((erg, rgb_map.reshape(-1, 3), label_map.reshape(-1, 1)))

    # delete sky points
    erg = distance_cutoff(erg, g_cutoff)

    if g_is_v1:
        return None, erg, seq_no, img_no
    else:
        erg = remove_car_shadows(erg, img_no, g_bb_eps)
        worldspace = transform2worldspace(erg, img_no)
        return worldspace, erg, seq_no, img_no 
开发者ID:VisualComputingInstitute,项目名称:vkitti3D-dataset,代码行数:40,代码来源:create_npy.py

示例11: read_disparity

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def read_disparity(disp_dir, img_idx):
    """Reads in Disparity file from Kitti Dataset.

        Keyword Arguments:
        ------------------
        calib_dir : Str
                    Directory of the disparity files.

        img_idx : Int
                  Index of the image.

        Returns:
        --------
        disp_img : Numpy Array
                   Contains the disparity image.

        [] : if file is not found

        """
    disp_path = disp_dir + "/%06d_left_disparity.png" % img_idx

    if os.path.exists(disp_path):
        disp_img = cv2.imread(disp_path, cv2.IMREAD_ANYDEPTH)
        return disp_img
    else:
        return [] 
开发者ID:Zengyi-Qin,项目名称:TLNet,代码行数:28,代码来源:calib_utils.py

示例12: _read_prediction_py

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def _read_prediction_py(id, filled):
    depth_path = os.path.join(cfg.TMP_DIR, "%d.png"%id)
    if not os.path.isfile(depth_path):
        return filled

    depth = cv2.imread(depth_path, cv2.IMREAD_ANYDEPTH)
    return (depth/5000.0).astype(np.float32) 
开发者ID:princeton-vl,项目名称:DeepV2D,代码行数:9,代码来源:data_layer.py

示例13: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def __getitem__(self, index):
        data_blob = self.dataset_index[index]
        num_frames = data_blob['n_frames']
        num_samples = self.n_frames

        inds = np.random.choice(num_frames, num_samples, replace=False)
        keyframe_index = inds[0]

        images = []
        for i in inds:
            image_file = data_blob['images'][i]
            images.append(cv2.imread(image_file))

        depth_file = data_blob['depths'][keyframe_index]
        depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH)
        depth = (depth.astype(np.float32)) / 5000.0
        filled = fill_depth(depth)
        
        frameid = data_blob['ids'][keyframe_index]
        frameid = np.int32(frameid)
    
        poses = []
        for i in inds:
            pose_vec = data_blob['poses'][i]
            pose_mat = pose_vec2mat(pose_vec)
            poses.append(np.linalg.inv(pose_mat))

        images = np.stack(images, axis=0).astype(np.uint8)
        poses = np.stack(poses, axis=0).astype(np.float32)

        kvec = intrinsics.copy()
        return images, poses, depth, filled, filled, kvec, frameid 
开发者ID:princeton-vl,项目名称:DeepV2D,代码行数:34,代码来源:nyuv2.py

示例14: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def __getitem__(self, index):
        data_blob = self.dataset_index[index]
        num_frames = data_blob['n_frames']
        num_samples = self.n_frames - 1

        frameid = data_blob['id']
        keyframe_index = num_frames // 2

        inds = np.arange(num_frames)
        inds = inds[~np.equal(inds, keyframe_index)]
        
        inds = np.random.choice(inds, num_samples, replace=False)
        inds = [keyframe_index] + inds.tolist()

        images = []
        for i in inds:
            image = cv2.imread(data_blob['images'][i])
            image = cv2.resize(image, (640, 480))
            images.append(image)

        poses = []
        for i in inds:
            poses.append(data_blob['poses'][i])

        images = np.stack(images, axis=0).astype(np.uint8)
        poses = np.stack(poses, axis=0).astype(np.float32)

        depth_file = data_blob['depth']
        depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH)
        
        depth = (depth.astype(np.float32)) / 1000.0
        filled = fill_depth(depth)
        
        K = data_blob['intrinsics']
        kvec = np.stack([K[0,0], K[1,1], K[0,2], K[1,2]], axis=0)

        return images, poses, depth, filled, filled, kvec, frameid 
开发者ID:princeton-vl,项目名称:DeepV2D,代码行数:39,代码来源:scannet.py

示例15: decode_loaded

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 别名]
def decode_loaded(x):
    """Decodes an image stored in a Numpy Byte (uint8) Array using OpenCV.

    Args:
        x: The Numpy Byte (uint8) Array.
    """
    return cv2.imdecode(x, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR) 
开发者ID:dmarnerides,项目名称:pydlt,代码行数:9,代码来源:io.py


注:本文中的cv2.IMREAD_ANYDEPTH属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。