当前位置: 首页>>代码示例>>Python>>正文


Python cv2.imread方法代码示例

本文整理汇总了Python中cv2.imread方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.imread方法的具体用法?Python cv2.imread怎么用?Python cv2.imread使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.imread方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def main():
	imagePath = "img.jpg"
	
	img = cv2.imread(imagePath)
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	
	generate_histogram(gray)
	
	cv2.imwrite("before.jpg", gray)

	gray = cv2.equalizeHist(gray)
	
	generate_histogram(gray)
	
	cv2.imwrite("after.jpg",gray)
	
	return 0 
开发者ID:felipecorrea,项目名称:pedestrian-haar-based-detector,代码行数:19,代码来源:histcomparison.py

示例2: _get_image_blob

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def _get_image_blob(roidb, scale_inds):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)
  processed_ims = []
  im_scales = []
  for i in range(num_images):
    im = cv2.imread(roidb[i]['image'])
    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    target_size = cfg.TRAIN.SCALES[scale_inds[i]]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:23,代码来源:minibatch.py

示例3: processFrames

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def processFrames(self):
        try:
            for img in self.anotations_list:
                img = img.split(';')
                # print(img)
                # ret,imgcv = cap.read()
                if self.video:
                    ret,imgcv = self.cap.read()
                else:
                    imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0]))
                result = self.tfnet.return_predict(imgcv)
                print(result)
                imgcv = self.drawBoundingBox(imgcv,result)        
                cv2.imshow('detected objects',imgcv)
                if cv2.waitKey(10) == ord('q'):
                    print('exitting loop')
                    break
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            print('exitting program') 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:22,代码来源:objectDetectorYOLO.py

示例4: validate_on_lfw

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
开发者ID:ppwwyyxx,项目名称:Adversarial-Face-Attack,代码行数:27,代码来源:face_attack.py

示例5: get_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def get_data(path, activation):
    '''Get the dataset
    '''
    data = []
    image_names = []
    for filename in os.listdir(path):
        img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE)
        image_names.append(filename)
        if img is not None:
            data.append(img)

    data = np.asarray(data)

    if activation == 'sigmoid':
        data = data.astype(np.float32)/(255.0)
    elif activation == 'tanh':
        data = data.astype(np.float32)/(255.0/2) - 1.0

    data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2]))

    np.random.seed(1234)
    p = np.random.permutation(data.shape[0])
    X = data[p]

    return X, image_names 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:27,代码来源:vaegan_mxnet.py

示例6: test_resize_short

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def test_resize_short(self):
        try:
            import cv2
        except ImportError:
            return
        for img in TestImage.IMAGES:
            cv_img = cv2.imread(img)
            mx_img = mx.nd.array(cv_img[:, :, (2, 1, 0)])
            h, w, _ = cv_img.shape
            for _ in range(3):
                new_size = np.random.randint(1, 1000)
                if h > w:
                    new_h, new_w = new_size * h // w, new_size
                else:
                    new_h, new_w = new_size, new_size * w // h
                for interp in range(0, 2):
                    # area-based/lanczos don't match with cv2?
                    cv_resized = cv2.resize(cv_img, (new_w, new_h), interpolation=interp)
                    mx_resized = mx.image.resize_short(mx_img, new_size, interp)
                    assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:22,代码来源:test_image.py

示例7: reWriteImgWithMask

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def reWriteImgWithMask(srcpath, dstpath, gtpath, srcform, dstform):
    namelist = GetFileFromThisRootDir(gtpath)
    for fullname in namelist:
        objects = parse_bod_poly(fullname)
        mask_polys = []
        for obj in objects:
            clsname = obj['name']
            matches = re.findall('area|mask', clsname)
            if 'mask' in matches:
                #print('mask:')
                mask_polys.append(shgeo.Polygon(obj['poly']))
            elif 'area' in matches:
                #print('area:')
                mask_polys.append(shgeo.Polygon(obj['poly']))
        basename = mybasename(fullname)
        imgname = os.path.join(srcpath, basename + srcform)
        img = cv2.imread(imgname)
        dstname = os.path.join(dstpath, basename + dstform)
        if len(mask_polys) > 0:
            saveimageWithMask(img, dstname, mask_polys) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:utils.py

示例8: load_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def load_image(self, index):
    # loads 1 image from dataset
    img = self.imgs[index]
    if img is None:
        img_path = self.img_files[index]
        img = cv2.imread(img_path)  # BGR
        assert img is not None, 'Image Not Found ' + img_path
        r = self.img_size / max(img.shape)  # size ratio
        if self.augment and r < 1:  # if training (NOT testing), downsize to inference shape
            h, w, _ = img.shape
            img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR)  # _LINEAR fastest

    # Augment colorspace
    if self.augment:
        augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])

    return img 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:19,代码来源:datasets.py

示例9: convert_images2bmp

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def convert_images2bmp():
    # cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
    for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
        folder = os.sep + Path(path).name
        output = path.replace(folder, folder + 'bmp')
        if os.path.exists(output):
            shutil.rmtree(output)  # delete output folder
        os.makedirs(output)  # make new output folder

        for f in tqdm(glob.glob('%s*.jpg' % path)):
            save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
            cv2.imwrite(save_name, cv2.imread(f))

    for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
        with open(label_path, 'r') as file:
            lines = file.read()
        lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
            '/Users/glennjocher/PycharmProjects/', '../')
        with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
            file.write(lines) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:22,代码来源:datasets.py

示例10: crop_images_random

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def crop_images_random(path='../images/', scale=0.50):  # from utils.utils import *; crop_images_random()
    # crops images into random squares up to scale fraction
    # WARNING: overwrites images!
    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
        img = cv2.imread(file)  # BGR
        if img is not None:
            h, w = img.shape[:2]

            # create random mask
            a = 30  # minimum size (pixels)
            mask_h = random.randint(a, int(max(a, h * scale)))  # mask height
            mask_w = mask_h  # mask width

            # box
            xmin = max(0, random.randint(0, w) - mask_w // 2)
            ymin = max(0, random.randint(0, h) - mask_h // 2)
            xmax = min(w, xmin + mask_w)
            ymax = min(h, ymin + mask_h)

            # apply random color mask
            cv2.imwrite(file, img[ymin:ymax, xmin:xmax]) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:utils.py

示例11: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def __getitem__(self, idx):
        images, masks = [], []

        for (image_path, mask_path) in zip(self.image_path_list[idx * self.batch_size: (idx + 1) * self.batch_size],
                                           self.mask_path_list[idx * self.batch_size: (idx + 1) * self.batch_size]):
            image = cv2.imread(image_path, 1)
            mask = cv2.imread(mask_path, 0)

            image = self._padding(image)
            mask = self._padding(mask)

            # augumentation
            augmentation = self.transformer(image=image, mask=mask)
            image = augmentation['image']
            mask = self._get_result_map(augmentation['mask'])

            images.append(image)
            masks.append(mask)

        images = np.array(images)
        masks = np.array(masks)
        images = pinput(images)

        return images, masks 
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:26,代码来源:data_loader.py

示例12: image_channel_means

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def image_channel_means(image_filenames):
    '''
    Calculate the means of RGB channels in image dataset.
    Support extremely large images of different sizes and arbitrarily large number of images.
    image_filenames: list of image filenames
    '''

    num_pixels = 0
    channel_sums = np.zeros(3, dtype=object)

    for image_filename in tqdm(image_filenames):
        image = cv2.imread(image_filename)
        channel_sums += np.sum(image, axis=(0, 1))
        num_pixels += np.prod(image.shape[:2])

    channel_means = (channel_sums / num_pixels).astype(float)

    return channel_means 
开发者ID:leimao,项目名称:DeepLab_v3,代码行数:20,代码来源:utils.py

示例13: __getitem__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]
        image = np.asarray(image, np.float32)
        image -= self.mean
        
        img_h, img_w, _ = image.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
        image = image.transpose((2, 0, 1))
        return image, name, size 
开发者ID:speedinghzl,项目名称:pytorch-segmentation-toolbox,代码行数:19,代码来源:datasets.py

示例14: pull_item

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        # return torch.from_numpy(img), target, height, width 
开发者ID:soo89,项目名称:CSD-SSD,代码行数:21,代码来源:voc0712.py

示例15: pull_item

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imread [as 别名]
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))

        if(img_id[0][(len(img_id[0]) - 7):]=='VOC2007'):
            semi = np.array([1])
        else:
            semi = np.array([0])
            target = np.zeros([1, 5])
        return torch.from_numpy(img).permute(2, 0, 1), target, height, width, semi
        # return torch.from_numpy(img), target, height, width 
开发者ID:soo89,项目名称:CSD-SSD,代码行数:27,代码来源:voc07_consistency_init.py


注:本文中的cv2.imread方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。