當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.IMREAD_ANYDEPTH屬性代碼示例

本文整理匯總了Python中cv2.IMREAD_ANYDEPTH屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.IMREAD_ANYDEPTH屬性的具體用法?Python cv2.IMREAD_ANYDEPTH怎麽用?Python cv2.IMREAD_ANYDEPTH使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.IMREAD_ANYDEPTH屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: read_gated_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def read_gated_image(base_dir, gta_pass, img_id, data_type, num_bits=10, scale_images=False,
                     scaled_img_width=None, scaled_img_height=None,
                     normalize_images=False):
    gated_imgs = []
    normalizer = 2 ** num_bits - 1.

    for gate_id in range(3):
        gate_dir = os.path.join(base_dir, gta_pass, 'gated%d_10bit' % gate_id)
        img = cv2.imread(os.path.join(gate_dir, img_id + '.png'), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if data_type == 'real':
            img = img[crop_size:(img.shape[0] - crop_size), crop_size:(img.shape[1] - crop_size)]
            img = img.copy()
            img[img > 2 ** 10 - 1] = normalizer
        img = np.float32(img / normalizer)
        gated_imgs.append(np.expand_dims(img, axis=2))

    img = np.concatenate(gated_imgs, axis=2)
    if normalize_images:
        mean = np.mean(img, axis=2, keepdims=True)
        std = np.std(img, axis=2, keepdims=True)
        img = (img - mean) / (std + np.finfo(float).eps)
    if scale_images:
        img = cv2.resize(img, dsize=(scaled_img_width, scaled_img_height), interpolation=cv2.INTER_AREA)
    return np.expand_dims(img, axis=0) 
開發者ID:gruberto,項目名稱:Gated2Depth,代碼行數:26,代碼來源:dataset_util.py

示例2: imread

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def imread(filename):
    """Reads an image file from disk into a Numpy Array (OpenCV view).

    Args:
        filename (str): Name of pfm image file.
    """
    filename = process(filename)
    ext = os.path.splitext(filename)[1]
    if ext.lower() == '.pfm':
        return load_pfm(filename)
    elif ext.lower() == '.dng':
        return load_dng(filename)
    else:
        loaded = cv2.imread(filename, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)
        if loaded is None:
            raise IOError('Could not read {0}'.format(filename))
        else:
            return loaded 
開發者ID:dmarnerides,項目名稱:pydlt,代碼行數:20,代碼來源:io.py

示例3: __getitem__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def __getitem__(self, index):
        im_name = self.files[self.split][index]                # 1/824_8-cp_Page_0503-7Nw0001
        im_path = pjoin(self.root, 'img',  im_name + '.png')  
        lbl_path=pjoin(self.root, 'wc', im_name + '.exr')
        im = m.imread(im_path,mode='RGB')
        im = np.array(im, dtype=np.uint8)
        lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        lbl = np.array(lbl, dtype=np.float)
        if 'val' in self.split:
            im, lbl=tight_crop(im/255.0,lbl)
        if self.augmentations:          #this is for training, default false for validation\
            tex_id=random.randint(0,len(self.txpths)-1)
            txpth=self.txpths[tex_id] 
            tex=cv2.imread(os.path.join(self.root[:-7],txpth)).astype(np.uint8)
            bg=cv2.resize(tex,self.img_size,interpolation=cv2.INTER_NEAREST)
            im,lbl=data_aug(im,lbl,bg)
        if self.is_transform:
            im, lbl = self.transform(im, lbl)
        return im, lbl 
開發者ID:cvlab-stonybrook,項目名稱:DewarpNet,代碼行數:21,代碼來源:doc3dwc_loader.py

示例4: run_tiff

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def run_tiff(file_path,progress_percent):
    progress_percent
    angle = 0
    ret,video=cv2.imreadmulti(file_path,flags=cv2.IMREAD_ANYDEPTH)
    video_labeled,table=[],[]
    idx=1
    for frame in video[:]:
        img_label,angle_new=process(frame)
        angle_new = float('{0:.2f}'.format(angle_new))
        rotation=cal_rotation(angle,angle_new)
        rotation = float('{0:.2f}'.format(rotation))
        table.append([angle,rotation,angle_new])
        video_labeled.append(img_label)
        angle=angle_new
        idx+=1
        progress_percent['value']=idx/len(video)*100
        # print(table[-1])
        # cv2.imshow('img',cv2.resize(img_label,(512,512)))
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        #     break
    return video_labeled,table 
開發者ID:deepdiy,項目名稱:deepdiy,代碼行數:23,代碼來源:process.py

示例5: read_tiff

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def read_tiff(self,*args):
		ret,self.stack = cv2.imreadmulti(self.path,flags=cv2.IMREAD_ANYDEPTH)
		self.frame_count = len(self.stack) 
開發者ID:deepdiy,項目名稱:deepdiy,代碼行數:5,代碼來源:image_stack_capture.py

示例6: imread_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def imread_color(self, path):
        img = cv.imread(path, cv.IMREAD_COLOR | cv.IMREAD_ANYDEPTH)/255.
        b, g, r = cv.split(img)
        img_rgb = cv.merge([r, g, b])
        return img_rgb 
開發者ID:Lvfeifan,項目名稱:MBLLEN,代碼行數:7,代碼來源:data_load.py

示例7: imread_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def imread_color(path):
    img = cv.imread(path, cv.IMREAD_COLOR | cv.IMREAD_ANYDEPTH) / 255.
    b, g, r = cv.split(img)
    img_rgb = cv.merge([r, g, b])
    return img_rgb
    # return scipy.misc.imread(path, mode='RGB').astype(np.float) / 255. 
開發者ID:Lvfeifan,項目名稱:MBLLEN,代碼行數:8,代碼來源:utls.py

示例8: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def main():

    input_depth_dir = os.path.expanduser(
        '~/Kitti/depth/val_selection_cropped/velodyne_raw')

    images_to_use = sorted(glob.glob(input_depth_dir + '/*'))

    # Process depth images
    num_images = len(images_to_use)
    all_sparsities = np.zeros(num_images)

    for i in range(num_images):

        # Print progress
        sys.stdout.write('\rProcessing index {} / {}'.format(i, num_images - 1))
        sys.stdout.flush()

        depth_image_path = images_to_use[i]

        # Load depth from image
        depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)

        # Divide by 256
        depth_map = depth_image / 256.0

        num_valid_pixels = len(np.where(depth_map > 0.0)[0])
        num_pixels = depth_image.shape[0] * depth_image.shape[1]

        sparsity = num_valid_pixels / (num_pixels * 2/3)
        all_sparsities[i] = sparsity

    print('')
    print('Sparsity')
    print('Min:   ', np.amin(all_sparsities))
    print('Max:   ', np.amax(all_sparsities))
    print('Mean:  ', np.mean(all_sparsities))
    print('Median:  ', np.median(all_sparsities))

    plt.hist(all_sparsities, bins=20)
    plt.show() 
開發者ID:kujason,項目名稱:ip_basic,代碼行數:42,代碼來源:dataset_sparsity.py

示例9: read_image_scale

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def read_image_scale(image_path, scale):
    img = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)
    if img is None:
        print("not finding {}".format(image_path))
    img = img.astype(np.float32) / scale
    return img 
開發者ID:xy-guo,項目名稱:Learning-Monocular-Depth-by-Stereo,代碼行數:8,代碼來源:distill_dataset.py

示例10: process_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def process_frame(image_path: str) -> Tuple[np.ndarray, np.ndarray, str, str]:
    """
    fix given frame
    :param image_path: path to frame which should be fixed
    :return: fixed frame
    """
    seq_no = image_path.split('/')[-3]
    img_no = image_path.split('/')[-1].split('.')[0]

    depth_path = f"{depth_root}/{seq_no}/clone/{img_no}.png"
    semantic_path = f"{labels_root}/{seq_no}/clone/{img_no}.png"

    # BGR -> RGB
    rgb_map = cv2.imread(image_path)[:, :, (2, 1, 0)]

    # convert centimeters to meters
    depth_map = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) / 100.

    # semantic image
    semantic_map = cv2.imread(semantic_path)[:, :, (2, 1, 0)]
    label_map = np.apply_along_axis(lambda r: rgb2label[tuple(r)], 2, semantic_map)

    # backprojection to camera space
    x3 = (xv - center_x) / focal_x * depth_map
    y3 = (yv - center_y) / focal_y * depth_map

    erg = np.stack((depth_map, -x3, -y3), axis=-1).reshape((-1, 3))
    erg = np.hstack((erg, rgb_map.reshape(-1, 3), label_map.reshape(-1, 1)))

    # delete sky points
    erg = distance_cutoff(erg, g_cutoff)

    if g_is_v1:
        return None, erg, seq_no, img_no
    else:
        erg = remove_car_shadows(erg, img_no, g_bb_eps)
        worldspace = transform2worldspace(erg, img_no)
        return worldspace, erg, seq_no, img_no 
開發者ID:VisualComputingInstitute,項目名稱:vkitti3D-dataset,代碼行數:40,代碼來源:create_npy.py

示例11: read_disparity

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def read_disparity(disp_dir, img_idx):
    """Reads in Disparity file from Kitti Dataset.

        Keyword Arguments:
        ------------------
        calib_dir : Str
                    Directory of the disparity files.

        img_idx : Int
                  Index of the image.

        Returns:
        --------
        disp_img : Numpy Array
                   Contains the disparity image.

        [] : if file is not found

        """
    disp_path = disp_dir + "/%06d_left_disparity.png" % img_idx

    if os.path.exists(disp_path):
        disp_img = cv2.imread(disp_path, cv2.IMREAD_ANYDEPTH)
        return disp_img
    else:
        return [] 
開發者ID:Zengyi-Qin,項目名稱:TLNet,代碼行數:28,代碼來源:calib_utils.py

示例12: _read_prediction_py

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def _read_prediction_py(id, filled):
    depth_path = os.path.join(cfg.TMP_DIR, "%d.png"%id)
    if not os.path.isfile(depth_path):
        return filled

    depth = cv2.imread(depth_path, cv2.IMREAD_ANYDEPTH)
    return (depth/5000.0).astype(np.float32) 
開發者ID:princeton-vl,項目名稱:DeepV2D,代碼行數:9,代碼來源:data_layer.py

示例13: __getitem__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def __getitem__(self, index):
        data_blob = self.dataset_index[index]
        num_frames = data_blob['n_frames']
        num_samples = self.n_frames

        inds = np.random.choice(num_frames, num_samples, replace=False)
        keyframe_index = inds[0]

        images = []
        for i in inds:
            image_file = data_blob['images'][i]
            images.append(cv2.imread(image_file))

        depth_file = data_blob['depths'][keyframe_index]
        depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH)
        depth = (depth.astype(np.float32)) / 5000.0
        filled = fill_depth(depth)
        
        frameid = data_blob['ids'][keyframe_index]
        frameid = np.int32(frameid)
    
        poses = []
        for i in inds:
            pose_vec = data_blob['poses'][i]
            pose_mat = pose_vec2mat(pose_vec)
            poses.append(np.linalg.inv(pose_mat))

        images = np.stack(images, axis=0).astype(np.uint8)
        poses = np.stack(poses, axis=0).astype(np.float32)

        kvec = intrinsics.copy()
        return images, poses, depth, filled, filled, kvec, frameid 
開發者ID:princeton-vl,項目名稱:DeepV2D,代碼行數:34,代碼來源:nyuv2.py

示例14: __getitem__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def __getitem__(self, index):
        data_blob = self.dataset_index[index]
        num_frames = data_blob['n_frames']
        num_samples = self.n_frames - 1

        frameid = data_blob['id']
        keyframe_index = num_frames // 2

        inds = np.arange(num_frames)
        inds = inds[~np.equal(inds, keyframe_index)]
        
        inds = np.random.choice(inds, num_samples, replace=False)
        inds = [keyframe_index] + inds.tolist()

        images = []
        for i in inds:
            image = cv2.imread(data_blob['images'][i])
            image = cv2.resize(image, (640, 480))
            images.append(image)

        poses = []
        for i in inds:
            poses.append(data_blob['poses'][i])

        images = np.stack(images, axis=0).astype(np.uint8)
        poses = np.stack(poses, axis=0).astype(np.float32)

        depth_file = data_blob['depth']
        depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH)
        
        depth = (depth.astype(np.float32)) / 1000.0
        filled = fill_depth(depth)
        
        K = data_blob['intrinsics']
        kvec = np.stack([K[0,0], K[1,1], K[0,2], K[1,2]], axis=0)

        return images, poses, depth, filled, filled, kvec, frameid 
開發者ID:princeton-vl,項目名稱:DeepV2D,代碼行數:39,代碼來源:scannet.py

示例15: decode_loaded

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import IMREAD_ANYDEPTH [as 別名]
def decode_loaded(x):
    """Decodes an image stored in a Numpy Byte (uint8) Array using OpenCV.

    Args:
        x: The Numpy Byte (uint8) Array.
    """
    return cv2.imdecode(x, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR) 
開發者ID:dmarnerides,項目名稱:pydlt,代碼行數:9,代碼來源:io.py


注:本文中的cv2.IMREAD_ANYDEPTH屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。