当前位置: 首页>>代码示例>>Python>>正文


Python cv2.IMREAD_ANYCOLOR属性代码示例

本文整理汇总了Python中cv2.IMREAD_ANYCOLOR属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.IMREAD_ANYCOLOR属性的具体用法?Python cv2.IMREAD_ANYCOLOR怎么用?Python cv2.IMREAD_ANYCOLOR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.IMREAD_ANYCOLOR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def write_image(bridge, outdir, msg, fmt='png'):
    results = {}
    image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)
    try:
        if hasattr(msg, 'format') and 'compressed' in msg.format:
            buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)
            cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
            if cv_image.shape[2] != 3:
                print("Invalid image %s" % image_filename)
                return results
            results['height'] = cv_image.shape[0]
            results['width'] = cv_image.shape[1]
            # Avoid re-encoding if we don't have to
            if check_format(msg.data) == fmt:
                buf.tofile(image_filename)
            else:
                cv2.imwrite(image_filename, cv_image)
        else:
            cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
            cv2.imwrite(image_filename, cv_image)
    except CvBridgeError as e:
        print(e)
    results['filename'] = image_filename
    return results 
开发者ID:rwightman,项目名称:udacity-driving-reader,代码行数:26,代码来源:bagdump.py

示例2: open_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def open_image(path):
	""" Opens an image using OpenCV given the file path.
	:param path: the file path of the image
	:return: the image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
	"""
	flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
	path = str(path)
	if not os.path.exists(path):
		raise OSError(f'No such file or directory: {path}')
	elif os.path.isdir(path):
		raise OSError(f'Is a directory: {path}')
	else:
		try:
			im = cv2.imread(str(path), flags).astype(np.float32)/255
			if im is None: raise OSError(f'File not recognized by opencv: {path}')
			return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
		except Exception as e:
			raise OSError(f'Error handling image at: {path}') from e 
开发者ID:alecrubin,项目名称:pytorch-serverless,代码行数:20,代码来源:utils.py

示例3: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def main():
    lg = ListGenerator()
    files_to_check = lg.generate_list(args.dir, ['jpg'])
    print("Total files: {}".format(len(files_to_check)))

    gray_img_list = []
    num_checked = 0
    for each_file in tqdm(files_to_check[54145:54146]):
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        print(each_file)

        # Preview gray images.
        if len(img.shape) != 3:
            gray_img_list.append(each_file)
            cv2.imshow("gray", img)
            if cv2.waitKey(100) == 27:
                break

    print("Total gray images: {}".format(len(gray_img_list))) 
开发者ID:yinguobing,项目名称:image_utility,代码行数:21,代码来源:gray_image_detector.py

示例4: read_gated_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def read_gated_image(base_dir, gta_pass, img_id, data_type, num_bits=10, scale_images=False,
                     scaled_img_width=None, scaled_img_height=None,
                     normalize_images=False):
    gated_imgs = []
    normalizer = 2 ** num_bits - 1.

    for gate_id in range(3):
        gate_dir = os.path.join(base_dir, gta_pass, 'gated%d_10bit' % gate_id)
        img = cv2.imread(os.path.join(gate_dir, img_id + '.png'), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if data_type == 'real':
            img = img[crop_size:(img.shape[0] - crop_size), crop_size:(img.shape[1] - crop_size)]
            img = img.copy()
            img[img > 2 ** 10 - 1] = normalizer
        img = np.float32(img / normalizer)
        gated_imgs.append(np.expand_dims(img, axis=2))

    img = np.concatenate(gated_imgs, axis=2)
    if normalize_images:
        mean = np.mean(img, axis=2, keepdims=True)
        std = np.std(img, axis=2, keepdims=True)
        img = (img - mean) / (std + np.finfo(float).eps)
    if scale_images:
        img = cv2.resize(img, dsize=(scaled_img_width, scaled_img_height), interpolation=cv2.INTER_AREA)
    return np.expand_dims(img, axis=0) 
开发者ID:gruberto,项目名称:Gated2Depth,代码行数:26,代码来源:dataset_util.py

示例5: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def __getitem__(self, index):
        im_name = self.files[self.split][index]                # 1/824_8-cp_Page_0503-7Nw0001
        im_path = pjoin(self.root, 'img',  im_name + '.png')  
        lbl_path=pjoin(self.root, 'wc', im_name + '.exr')
        im = m.imread(im_path,mode='RGB')
        im = np.array(im, dtype=np.uint8)
        lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        lbl = np.array(lbl, dtype=np.float)
        if 'val' in self.split:
            im, lbl=tight_crop(im/255.0,lbl)
        if self.augmentations:          #this is for training, default false for validation\
            tex_id=random.randint(0,len(self.txpths)-1)
            txpth=self.txpths[tex_id] 
            tex=cv2.imread(os.path.join(self.root[:-7],txpth)).astype(np.uint8)
            bg=cv2.resize(tex,self.img_size,interpolation=cv2.INTER_NEAREST)
            im,lbl=data_aug(im,lbl,bg)
        if self.is_transform:
            im, lbl = self.transform(im, lbl)
        return im, lbl 
开发者ID:cvlab-stonybrook,项目名称:DewarpNet,代码行数:21,代码来源:doc3dwc_loader.py

示例6: load_rgb

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def load_rgb(self, equalize=False):
        # print("Loading:", self.image_file)
        try:
            img_rgb = cv2.imread(self.image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            if equalize:
                # equalize val (essentially gray scale level)
                clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
                hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
                hue, sat, val = cv2.split(hsv)
                aeq = clahe.apply(val)
                # recombine
                hsv = cv2.merge((hue,sat,aeq))
                # convert back to rgb
                img_rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            h, w = img_rgb.shape[:2]
            self.node.setInt('height', h)
            self.node.setInt('width', w)
            return img_rgb

        except:
            print(self.image_file + ":\n" + "  rgb load error: " \
                + str(sys.exc_info()[1]))
            return None 
开发者ID:UASLab,项目名称:ImageAnalysis,代码行数:25,代码来源:image.py

示例7: open_image_url

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def open_image_url(url):
	"""  Opens an image using OpenCV from a URL.
	:param url: url path of the image
	:return: the image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
	"""
	flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
	url = str(url)
	resp = urllib.request.urlopen(url)
	try:
		im = np.asarray(bytearray(resp.read()))
		im = cv2.imdecode(im, flags).astype(np.float32)/255
		if im is None: raise OSError(f'File from url not recognized by opencv: {url}')
		return im
	except Exception as e:
		raise OSError(f'Error handling image from url at: {url}') from e 
开发者ID:alecrubin,项目名称:pytorch-serverless,代码行数:17,代码来源:utils.py

示例8: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def main():
    # Read in image list to be converted.
    with open('gray.json', 'r') as fp:
        img_list = json.load(fp)
    logging.debug("Total files to be converted: {}".format(len(img_list)))

    # Convert them into 3 channel images.
    for each_file in tqdm(img_list):
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        if len(img.shape) == 3:
            print("Not a gray image: {}".format(each_file))
            continue

        cv2.imshow('preview', img)
        if cv2.waitKey(30) == 27:
            break

        # Do convertion
        img_converted = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        # Write to file.
        cv2.imwrite(each_file, img_converted)

        # Check if convertion failed.
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        assert len(img.shape) == 3, "Convertion failed: {}".format(each_file) 
开发者ID:yinguobing,项目名称:image_utility,代码行数:28,代码来源:convert_to_3_channel.py

示例9: process_frame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def process_frame(image_path: str) -> Tuple[np.ndarray, np.ndarray, str, str]:
    """
    fix given frame
    :param image_path: path to frame which should be fixed
    :return: fixed frame
    """
    seq_no = image_path.split('/')[-3]
    img_no = image_path.split('/')[-1].split('.')[0]

    depth_path = f"{depth_root}/{seq_no}/clone/{img_no}.png"
    semantic_path = f"{labels_root}/{seq_no}/clone/{img_no}.png"

    # BGR -> RGB
    rgb_map = cv2.imread(image_path)[:, :, (2, 1, 0)]

    # convert centimeters to meters
    depth_map = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) / 100.

    # semantic image
    semantic_map = cv2.imread(semantic_path)[:, :, (2, 1, 0)]
    label_map = np.apply_along_axis(lambda r: rgb2label[tuple(r)], 2, semantic_map)

    # backprojection to camera space
    x3 = (xv - center_x) / focal_x * depth_map
    y3 = (yv - center_y) / focal_y * depth_map

    erg = np.stack((depth_map, -x3, -y3), axis=-1).reshape((-1, 3))
    erg = np.hstack((erg, rgb_map.reshape(-1, 3), label_map.reshape(-1, 1)))

    # delete sky points
    erg = distance_cutoff(erg, g_cutoff)

    if g_is_v1:
        return None, erg, seq_no, img_no
    else:
        erg = remove_car_shadows(erg, img_no, g_bb_eps)
        worldspace = transform2worldspace(erg, img_no)
        return worldspace, erg, seq_no, img_no 
开发者ID:VisualComputingInstitute,项目名称:vkitti3D-dataset,代码行数:40,代码来源:create_npy.py

示例10: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def __getitem__(self, index):
        im_name = self.files[self.split][index]                 #1/2Xec_Page_453X56X0001.png
        im_path = pjoin(self.altroot, 'img',  im_name + '.png')  
        img_foldr,fname=im_name.split('/')
        recon_foldr='chess48'
        wc_path = pjoin(self.altroot, 'wc' , im_name + '.exr')
        bm_path = pjoin(self.altroot, 'bm' , im_name + '.mat')
        alb_path = pjoin(self.root,'recon',img_foldr,recon_foldr, fname[:-4]+recon_foldr+'0001.png')

        wc = cv2.imread(wc_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        bm = h5.loadmat(bm_path)['bm']
        alb = m.imread(alb_path,mode='RGB')
        if self.is_transform:
            im, lbl = self.transform(wc,bm,alb)
        return im, lbl 
开发者ID:cvlab-stonybrook,项目名称:DewarpNet,代码行数:17,代码来源:doc3dbmnoimgc_loader.py

示例11: load_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def load_image(filename, data_type=torch.float32):
    color_img = numpy.array(cv2.imread(filename, cv2.IMREAD_ANYCOLOR))
    h, w, c = color_img.shape
    color_data = color_img.astype(numpy.float32).transpose(2, 0, 1)
    return torch.from_numpy(
        color_data.reshape(1, c, h, w)        
    ).type(data_type) / 255.0 
开发者ID:VCL3D,项目名称:DeepDepthDenoising,代码行数:9,代码来源:image.py

示例12: get_flow

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def get_flow(path):
    bgr = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    invalid = bgr[:, :, 0] == 0
    out_flow = (bgr[:, :, 2:0:-1].astype('f4') - 2**15) / 64.
    out_flow[invalid] = 0
    return out_flow, bgr[:, :, 0] 
开发者ID:vt-vl-lab,项目名称:DF-Net,代码行数:8,代码来源:test_flownet_2012.py

示例13: convert

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def convert(f, record_name, mean_flag):
    count = 0.0
    writer = tf.python_io.TFRecordWriter(record_name)

    if mean_flag:
        mean = np.zeros(cv2.imread(f[0][0]).shape, np.float32)

    for name in f:
        modality1 = cv2.imread(name[0])
        if mean_flag:
            mean += modality1
        
        label = cv2.imread(name[1], cv2.IMREAD_ANYCOLOR)
        try:
            assert len(label.shape)==2
        except AssertionError, e:
            raise( AssertionError( "Label should be one channel!" ) )
            
        height = modality1.shape[0]
        width = modality1.shape[1]
        modality1 = modality1.tostring()
        label = label.tostring()
        features = {'height':_int64_feature(height),
                    'width':_int64_feature(width),
                    'modality1':_bytes_feature(modality1),
                    'label':_bytes_feature(label),
                   }
        example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(example.SerializeToString())

        if (count+1)%1 == 0:
            print 'Processed data: {}'.format(count)

        count = count+1 
开发者ID:DeepSceneSeg,项目名称:AdapNet-pp,代码行数:36,代码来源:convert_to_tfrecords.py

示例14: read_img

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def read_img(self, img_key):
        img_str = np.fromstring(self.read_by_key(img_key), dtype=np.uint8)
        img = np.asarray(cv2.imdecode(img_str, cv2.IMREAD_ANYCOLOR)).reshape((240, 320))
        return img 
开发者ID:sfu-gruvi-3dv,项目名称:sanet_relocal_demo,代码行数:6,代码来源:gen_lmdb_cache.py

示例15: convert

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_ANYCOLOR [as 别名]
def convert(f, record_name):
    count = 0.0
    writer = tf.python_io.TFRecordWriter(record_name)

    for name in f:
        modality1 = cv2.imread(name[0])
        modality2 = cv2.imread(name[1])
        label = cv2.imread(name[2], cv2.IMREAD_ANYCOLOR)
        try:
            assert len(label.shape)==2
        except AssertionError, e:
            raise( AssertionError( "Label should be one channel!" ) )
        
        height = modality1.shape[0]
        width = modality1.shape[1]
        modality1 = modality1.tostring()
        modality2 = modality2.tostring()
        label = label.tostring()
        features = {'height':_int64_feature(height),
                    'width':_int64_feature(width),
                    'modality1':_bytes_feature(modality1),
                    'label':_bytes_feature(label),
                    'modality2':_bytes_feature(modality2)
                   }
        example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(example.SerializeToString())

        if (count+1)%1 == 0:
            print 'Processed data: {}'.format(count)

        count = count+1 
开发者ID:DeepSceneSeg,项目名称:SSMA,代码行数:33,代码来源:convert_to_tfrecords.py


注:本文中的cv2.IMREAD_ANYCOLOR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。