本文整理匯總了Python中cv2.imread方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.imread方法的具體用法?Python cv2.imread怎麽用?Python cv2.imread使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.imread方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def main():
imagePath = "img.jpg"
img = cv2.imread(imagePath)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
generate_histogram(gray)
cv2.imwrite("before.jpg", gray)
gray = cv2.equalizeHist(gray)
generate_histogram(gray)
cv2.imwrite("after.jpg",gray)
return 0
示例2: _get_image_blob
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:23,代碼來源:minibatch.py
示例3: processFrames
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def processFrames(self):
try:
for img in self.anotations_list:
img = img.split(';')
# print(img)
# ret,imgcv = cap.read()
if self.video:
ret,imgcv = self.cap.read()
else:
imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0]))
result = self.tfnet.return_predict(imgcv)
print(result)
imgcv = self.drawBoundingBox(imgcv,result)
cv2.imshow('detected objects',imgcv)
if cv2.waitKey(10) == ord('q'):
print('exitting loop')
break
except KeyboardInterrupt:
cv2.destroyAllWindows()
print('exitting program')
示例4: validate_on_lfw
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例5: get_data
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def get_data(path, activation):
'''Get the dataset
'''
data = []
image_names = []
for filename in os.listdir(path):
img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE)
image_names.append(filename)
if img is not None:
data.append(img)
data = np.asarray(data)
if activation == 'sigmoid':
data = data.astype(np.float32)/(255.0)
elif activation == 'tanh':
data = data.astype(np.float32)/(255.0/2) - 1.0
data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2]))
np.random.seed(1234)
p = np.random.permutation(data.shape[0])
X = data[p]
return X, image_names
示例6: test_resize_short
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def test_resize_short(self):
try:
import cv2
except ImportError:
return
for img in TestImage.IMAGES:
cv_img = cv2.imread(img)
mx_img = mx.nd.array(cv_img[:, :, (2, 1, 0)])
h, w, _ = cv_img.shape
for _ in range(3):
new_size = np.random.randint(1, 1000)
if h > w:
new_h, new_w = new_size * h // w, new_size
else:
new_h, new_w = new_size, new_size * w // h
for interp in range(0, 2):
# area-based/lanczos don't match with cv2?
cv_resized = cv2.resize(cv_img, (new_w, new_h), interpolation=interp)
mx_resized = mx.image.resize_short(mx_img, new_size, interp)
assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3)
示例7: reWriteImgWithMask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def reWriteImgWithMask(srcpath, dstpath, gtpath, srcform, dstform):
namelist = GetFileFromThisRootDir(gtpath)
for fullname in namelist:
objects = parse_bod_poly(fullname)
mask_polys = []
for obj in objects:
clsname = obj['name']
matches = re.findall('area|mask', clsname)
if 'mask' in matches:
#print('mask:')
mask_polys.append(shgeo.Polygon(obj['poly']))
elif 'area' in matches:
#print('area:')
mask_polys.append(shgeo.Polygon(obj['poly']))
basename = mybasename(fullname)
imgname = os.path.join(srcpath, basename + srcform)
img = cv2.imread(imgname)
dstname = os.path.join(dstpath, basename + dstform)
if len(mask_polys) > 0:
saveimageWithMask(img, dstname, mask_polys)
示例8: load_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def load_image(self, index):
# loads 1 image from dataset
img = self.imgs[index]
if img is None:
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # _LINEAR fastest
# Augment colorspace
if self.augment:
augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])
return img
示例9: convert_images2bmp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def convert_images2bmp():
# cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
folder = os.sep + Path(path).name
output = path.replace(folder, folder + 'bmp')
if os.path.exists(output):
shutil.rmtree(output) # delete output folder
os.makedirs(output) # make new output folder
for f in tqdm(glob.glob('%s*.jpg' % path)):
save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
cv2.imwrite(save_name, cv2.imread(f))
for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
with open(label_path, 'r') as file:
lines = file.read()
lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
'/Users/glennjocher/PycharmProjects/', '../')
with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
file.write(lines)
示例10: crop_images_random
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
# crops images into random squares up to scale fraction
# WARNING: overwrites images!
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
img = cv2.imread(file) # BGR
if img is not None:
h, w = img.shape[:2]
# create random mask
a = 30 # minimum size (pixels)
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
mask_w = mask_h # mask width
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
示例11: __getitem__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def __getitem__(self, idx):
images, masks = [], []
for (image_path, mask_path) in zip(self.image_path_list[idx * self.batch_size: (idx + 1) * self.batch_size],
self.mask_path_list[idx * self.batch_size: (idx + 1) * self.batch_size]):
image = cv2.imread(image_path, 1)
mask = cv2.imread(mask_path, 0)
image = self._padding(image)
mask = self._padding(mask)
# augumentation
augmentation = self.transformer(image=image, mask=mask)
image = augmentation['image']
mask = self._get_result_map(augmentation['mask'])
images.append(image)
masks.append(mask)
images = np.array(images)
masks = np.array(masks)
images = pinput(images)
return images, masks
開發者ID:JACKYLUO1991,項目名稱:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代碼行數:26,代碼來源:data_loader.py
示例12: image_channel_means
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def image_channel_means(image_filenames):
'''
Calculate the means of RGB channels in image dataset.
Support extremely large images of different sizes and arbitrarily large number of images.
image_filenames: list of image filenames
'''
num_pixels = 0
channel_sums = np.zeros(3, dtype=object)
for image_filename in tqdm(image_filenames):
image = cv2.imread(image_filename)
channel_sums += np.sum(image, axis=(0, 1))
num_pixels += np.prod(image.shape[:2])
channel_means = (channel_sums / num_pixels).astype(float)
return channel_means
示例13: __getitem__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, name, size
示例14: pull_item
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
示例15: pull_item
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imread [as 別名]
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
if(img_id[0][(len(img_id[0]) - 7):]=='VOC2007'):
semi = np.array([1])
else:
semi = np.array([0])
target = np.zeros([1, 5])
return torch.from_numpy(img).permute(2, 0, 1), target, height, width, semi
# return torch.from_numpy(img), target, height, width