当前位置: 首页>>代码示例>>Python>>正文


Python cv2.IMREAD_COLOR属性代码示例

本文整理汇总了Python中cv2.IMREAD_COLOR属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.IMREAD_COLOR属性的具体用法?Python cv2.IMREAD_COLOR怎么用?Python cv2.IMREAD_COLOR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.IMREAD_COLOR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: validate_on_lfw

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
开发者ID:ppwwyyxx,项目名称:Adversarial-Face-Attack,代码行数:27,代码来源:face_attack.py

示例2: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]
        image = np.asarray(image, np.float32)
        image -= self.mean
        
        img_h, img_w, _ = image.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
        image = image.transpose((2, 0, 1))
        return image, name, size 
开发者ID:speedinghzl,项目名称:pytorch-segmentation-toolbox,代码行数:19,代码来源:datasets.py

示例3: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def __init__(self, files, channel=3, resize=None, shuffle=False):
        """
        Args:
            files (list): list of file paths.
            channel (int): 1 or 3. Will convert grayscale to RGB images if channel==3.
                Will produce (h, w, 1) array if channel==1.
            resize (tuple): int or (h, w) tuple. If given, resize the image.
        """
        assert len(files), "No image files given to ImageFromFile!"
        self.files = files
        self.channel = int(channel)
        assert self.channel in [1, 3], self.channel
        self.imread_mode = cv2.IMREAD_GRAYSCALE if self.channel == 1 else cv2.IMREAD_COLOR
        if resize is not None:
            resize = shape2d(resize)
        self.resize = resize
        self.shuffle = shuffle 
开发者ID:tensorpack,项目名称:dataflow,代码行数:19,代码来源:image.py

示例4: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]
        if self.f_scale != 1:
            image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation=cv2.INTER_LINEAR)
            label = cv2.resize(label, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_NEAREST)

        label[label == 11] = self.ignore_label

        image = np.asarray(image, np.float32)

        if self.rgb:
            image = image[:, :, ::-1]  ## BGR -> RGB
            image /= 255  ## using pytorch pretrained models

        image -= self.mean
        image /= self.vars

        image = image.transpose((2, 0, 1))  # HWC -> CHW

        # print('image.shape:',image.shape)
        return image.copy(), label.copy(), np.array(size), name 
开发者ID:lxtGH,项目名称:Fast_Seg,代码行数:27,代码来源:camvid.py

示例5: imread

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def imread(filename, flags=cv2.IMREAD_COLOR):
    global _im_zfile
    path = filename
    pos_at = path.index('@')
    if pos_at == -1:
        print("character '@' is not found from the given path '%s'"%(path))
        assert 0
    path_zip = path[0: pos_at]
    path_img = path[pos_at + 2:]
    if not os.path.isfile(path_zip):
        print("zip file '%s' is not found"%(path_zip))
        assert 0
    for i in range(len(_im_zfile)):
        if _im_zfile[i]['path'] == path_zip:
            data = _im_zfile[i]['zipfile'].read(path_img)
            return cv2.imdecode(np.frombuffer(data, np.uint8), flags)

    _im_zfile.append({
        'path': path_zip,
        'zipfile': zipfile.ZipFile(path_zip, 'r')
    })
    data = _im_zfile[-1]['zipfile'].read(path_img)

    return cv2.imdecode(np.frombuffer(data, np.uint8), flags) 
开发者ID:facebookresearch,项目名称:PoseWarper,代码行数:26,代码来源:zipreader.py

示例6: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def __getitem__(self, index):
        img_id = self.ids[index]
        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        height, width, _ = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target)


        if self.preproc is not None:
            img, target = self.preproc(img, target)
            #print(img.size())

                    # target = self.target_transform(target, width, height)
        #print(target.shape)

        return img, target 
开发者ID:ShuangXieIrene,项目名称:ssds.pytorch,代码行数:20,代码来源:voc.py

示例7: pull_img_anno

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def pull_img_anno(self, index):
        '''Returns the original annotation of image at index

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to get annotation of
        Return:
            list:  [img_id, [(label, bbox coords),...]]
                eg: ('001718', [('dog', (96, 13, 438, 332))])
        '''
        img_id = self.ids[index]
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        anno = ET.parse(self._annopath % img_id).getroot()
        gt = self.target_transform(anno)
        height, width, _ = img.shape
        boxes = gt[:,:-1]
        labels = gt[:,-1]
        boxes[:, 0::2] /= width
        boxes[:, 1::2] /= height
        labels = np.expand_dims(labels,1)
        targets = np.hstack((boxes,labels))
        
        return img, targets 
开发者ID:ShuangXieIrene,项目名称:ssds.pytorch,代码行数:27,代码来源:voc.py

示例8: pull_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def pull_image(self, index):
        """Returns the original image object at index in PIL form

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to show
        Return:
            img
        """
        img_id = self.id_to_img_map[index]

        path = self.coco.loadImgs(img_id)[0]['file_name']

        return cv2.imread(os.path.join(self.root, path), cv2.IMREAD_COLOR) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:18,代码来源:coco.py

示例9: read_image_pair

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def read_image_pair(pair_path, resize_or_crop=None, image_size=(256,256)):
    image_blur = cv2.imread(pair_path[0], cv2.IMREAD_COLOR)
    image_blur = image_blur / 255.0 * 2.0 - 1.0
    image_real = cv2.imread(pair_path[1], cv2.IMREAD_COLOR)
    image_real = image_real / 255.0 * 2.0 - 1.0

    if resize_or_crop != None: 
        assert image_size != None

    if resize_or_crop == 'resize':
        image_blur = cv2.resize(image_blur, image_size, interpolation=cv2.INTER_AREA)
        image_real = cv2.resize(image_real, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image_blur = cv2.crop(image_blur, image_size)
        image_real = cv2.crop(image_real, image_size)
    else:
        raise

    if np.size(np.shape(image_blur)) == 3:
        image_blur = np.expand_dims(image_blur, axis=0)
    if np.size(np.shape(image_real)) == 3:
        image_real = np.expand_dims(image_real, axis=0)
    image_blur = np.array(image_blur, dtype=np.float32)
    image_real = np.array(image_real, dtype=np.float32)
    return image_blur, image_real 
开发者ID:LeeDoYup,项目名称:DeblurGAN-tf,代码行数:27,代码来源:data_loader.py

示例10: read_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def read_image(path, resize_or_crop=None, image_size=(256,256)):
    image = cv2.imread(path, cv2.IMREAD_COLOR)
    image = image/255.0 * 2.0 - 1.0

    assert resize_or_crop != None
    assert image_size != None

    if resize_or_crop == 'resize':
        image = cv2.resize(image, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image = cv2.crop(image, image_size)

    if np.size(np.shape(image)) == 3: 
        image = np.expand_dims(image, axis=0)

    image = np.array(image, dtype=np.float32)
    return image 
开发者ID:LeeDoYup,项目名称:DeblurGAN-tf,代码行数:19,代码来源:data_loader.py

示例11: test_lmdb_train

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def test_lmdb_train(db, augs, batch):
    ds = LMDBData(db, shuffle=False)
    ds = LocallyShuffleData(ds, 50000)
    ds = MultiProcessRunner(ds, 5000, 1)
    return ds

    ds = LMDBDataPoint(ds)

    def f(x):
        return cv2.imdecode(x, cv2.IMREAD_COLOR)
    ds = MapDataComponent(ds, f, 0)
    ds = AugmentImageComponent(ds, augs)

    ds = BatchData(ds, batch, use_list=True)
    # ds = PlasmaPutData(ds)
    ds = MultiProcessRunnerZMQ(ds, 40, hwm=80)
    # ds = PlasmaGetData(ds)
    return ds 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:20,代码来源:benchmark-dataflow.py

示例12: test_lmdb_inference

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def test_lmdb_inference(db, augs, batch):
    ds = LMDBData(db, shuffle=False)
    # ds = LocallyShuffleData(ds, 50000)

    augs = AugmentorList(augs)

    def mapper(data):
        im, label = loads(data[1])
        im = cv2.imdecode(im, cv2.IMREAD_COLOR)
        im = augs.augment(im)
        return im, label

    ds = MultiProcessMapData(ds, 40, mapper,
                             buffer_size=200)
    # ds = MultiThreadMapData(ds, 40, mapper, buffer_size=2000)

    ds = BatchData(ds, batch)
    ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:21,代码来源:benchmark-dataflow.py

示例13: get_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def get_data(self):
        idxs = np.arange(len(self.train_list))
        if self.shuffle:
            self.rng.shuffle(idxs)

        caches = {}
        for i, k in enumerate(idxs):
            path = self.train_list[k]
            label = self.lb_list[k]

            if i % self.preload == 0:
                try:
                    caches = ILSVRCTenth._read_tenth_batch(self.train_list[idxs[i:i+self.preload]])
                except Exception as e:
                    logging.warning('tenth local cache failed, err=%s' % str(e))

            content = caches.get(path, '')
            if not content:
                content = ILSVRCTenth._read_tenth(path)

            img = cv2.imdecode(np.fromstring(content, dtype=np.uint8), cv2.IMREAD_COLOR)
            yield [img, label] 
开发者ID:ildoonet,项目名称:tf-lcnn,代码行数:24,代码来源:data_feeder.py

示例14: test_solution_close_to_original_implementation

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def test_solution_close_to_original_implementation(self):
        image = cv2.imread('testdata/source.png', cv2.IMREAD_COLOR) / 255.0
        scribles = cv2.imread('testdata/scribbles.png', cv2.IMREAD_COLOR) / 255.0

        alpha = closed_form_matting.closed_form_matting_with_scribbles(image, scribles)
        foreground, background = solve_foreground_background(image, alpha)

        matlab_alpha = cv2.imread('testdata/matlab_alpha.png', cv2.IMREAD_GRAYSCALE) / 255.0
        matlab_foreground = cv2.imread('testdata/matlab_foreground.png', cv2.IMREAD_COLOR) / 255.0
        matlab_background = cv2.imread('testdata/matlab_background.png', cv2.IMREAD_COLOR) / 255.0

        sad_alpha = np.mean(np.abs(alpha - matlab_alpha))
        sad_foreground = np.mean(np.abs(foreground - matlab_foreground))
        sad_background = np.mean(np.abs(background - matlab_background))

        self.assertLess(sad_alpha, 1e-2)
        self.assertLess(sad_foreground, 1e-2)
        self.assertLess(sad_background, 1e-2) 
开发者ID:MarcoForte,项目名称:closed-form-matting,代码行数:20,代码来源:test_matting.py

示例15: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import IMREAD_COLOR [as 别名]
def __getitem__(self, index):
        img_id = self.ids[index]
        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        #img = Image.open(self._imgpath % img_id).convert('RGB')

        height, width, _ = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target)


        if self.preproc is not None:
            img, target = self.preproc(img, target, self.input_dim)
            #print(img.size())

        img_info = (width, height)

        return img, target, img_info, img_id 
开发者ID:ruinmessi,项目名称:ASFF,代码行数:21,代码来源:vocdataset.py


注:本文中的cv2.IMREAD_COLOR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。