本文整理汇总了Python中skimage.io.imread函数的典型用法代码示例。如果您正苦于以下问题:Python imread函数的具体用法?Python imread怎么用?Python imread使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了imread函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_mean_image
def compute_mean_image(training_data_path, testing_data_path, save_flag=True, save_file=''):
print('computing mean images')
folder = os.listdir(training_data_path)
trainNum = len(folder)
init_flag = True
for f in folder:
img = skimage.img_as_float( skio.imread(training_data_path+f) )
if init_flag:
mean_image = img
init_flag = False
else:
mean_image += img
folder = os.listdir(testing_data_path)
testNum = len(folder)
for f in folder:
img = skimage.img_as_float( skio.imread(testing_data_path+f) )
mean_image += img
mean_image /= (trainNum + testNum)
if len(mean_image.shape) == 2:
'''if gray, (h, w) to (1, h, w)'''
tmp = np.zeros((1, mean_image.shape[0], mean_image.shape[1]))
tmp[0, ...] = mean_image
mean_image = tmp
else:
'''if color, swap (h, w, ch) to (ch, h, w)'''
mean_image = mean_image.swapaxes(1,2)
mean_image = mean_image.swapaxes(0,1)
if save_flag:
with open(save_file, 'wb') as f:
np.save(f, mean_image)
return mean_image
示例2: extract_features
def extract_features():
des_type = 'HOG'
# If feature directories don't exist, create them
if not os.path.isdir(pos_feat_ph):
os.makedirs(pos_feat_ph)
# If feature directories don't exist, create them
if not os.path.isdir(neg_feat_ph):
os.makedirs(neg_feat_ph)
print "Calculating the descriptors for the positive samples and saving them"
for im_path in glob.glob(os.path.join(pos_im_path, "*")):
#print im_path
im = imread(im_path, as_grey=True)
if des_type == "HOG":
fd = hog(im, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
fd_path = os.path.join(pos_feat_ph, fd_name)
joblib.dump(fd, fd_path)
print "Positive features saved in {}".format(pos_feat_ph)
print "Calculating the descriptors for the negative samples and saving them"
for im_path in glob.glob(os.path.join(neg_im_path, "*")):
im = imread(im_path, as_grey=True)
if des_type == "HOG":
fd = hog(im, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
fd_path = os.path.join(neg_feat_ph, fd_name)
joblib.dump(fd, fd_path)
print "Negative features saved in {}".format(neg_feat_ph)
print "Completed calculating features from training images"
示例3: run_simple
def run_simple(self):
u'''Simple prediction'''
if len(self.datapath) >= 2:
# Use only two previous images
af_img = io.imread(self.datapath[0])
bf_img = io.imread(self.datapath[1])
#af_img = io.imread(r'./viptrafficof_02.png')
#bf_img = io.imread(r'./viptrafficof_03.png')
# Convert to gray image
af_gray = color.rgb2gray(af_img)
bf_gray = color.rgb2gray(bf_img)
# Calculate density flow
# Small -> WHY?
flow = cv2.calcOpticalFlowFarneback(bf_gray, af_gray, \
0.5, 6, 20, 10, 5, 1.2, 0)
print flow.shape, flow[:, :, 0].min(), flow[:, :, 1].max()
self.before = bf_gray
self.after = af_gray
#self.result = self.current
self.result = transform(af_img, flow)
# Color code the result for better visualization of optical flow.
# Direction corresponds to Hue value of the image.
# Magnitude corresponds to Value plane
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv = np.zeros_like(af_img)
hsv[...,1] = 255
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
self.optical = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
示例4: load_images
def load_images(random_state=1234):
train_df = pd.read_csv("data/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("data/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
print(">>> train_df:",train_df.shape)
print(train_df.head())
print(">>> test_df:", test_df.shape)
print(test_df.head())
train_df["images"] = [gradmag(np.array(imread(path_train_images+"{}.png".format(idx)))) for idx in tqdm(train_df.index)]
train_df["masks"] = [np.array(load_img(path_train_masks+"{}.png".format(idx),grayscale=True))/255 for idx in tqdm(train_df.index)]
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
print("*** TRAIN ***")
print(train_df.head())
print("*** TEST ***")
print(test_df.head())
ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split(
train_df.index.values,
np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1),
np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1),
train_df.coverage.values,
train_df.z.values,
test_size=0.2,
stratify=train_df.coverage_class,
random_state=random_state)
#Data augmentation
x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
print(x_train2.shape)
print(y_valid.shape)
x_test = np.array([gradmag(np.array(imread(path_test_images+"{}.png".format(idx)))) for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)
return x_train2, x_valid, y_train2, y_valid, x_test, test_df.index.values
示例5: count_bubble
def count_bubble(image_filename, ref_filename, plot_show = 0):
image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
image_filename)
ref_image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
ref_filename)
image_gray = rgb2gray(image)
ref_gray = rgb2gray(ref_image)
# Constants
Window_Size = 5
pre_image = pre.noise_reduction(image_gray,
ref_gray,
Window_Size,
mode = 0)
seg_image = segmentation(pre_image,'self_design')
perimeters = perimeter_exaction(seg_image, image, image_filename)
if(plot_show == 1):
fig, ax = plt.subplots(1,3)
ax[0].imshow(image)
ax[0].set_title('Original')
ax[1].imshow(seg_image, cmap=plt.cm.gray)
ax[1].set_title('Segmentation')
result = io.imread(gv.__DIR__ + gv.cu__image_dir + image_filename)
ax[2].imshow(result)
ax[2].set_title('Result')
plt.show()
return perimeters
示例6: test
def test(classifier, pca):
building = io.imread("http://www.nps.gov/tps/images/briefs/14-commercial-building.jpg")
building = transform.resize(building, (200, 200, 3))
building = color.rgb2gray(building)
building = building.reshape(1, -1)
# building = pca.transform(building)
print building
print classifier.predict(building)[0]
print to_cat[str(classifier.predict(building)[0])] + " (expect building)"
# print classifier.predict_proba(building)
snow = io.imread("http://farm4.static.flickr.com/3405/3332148397_92d89db2ab.jpg")
snow = transform.resize(snow, (200, 200, 3))
snow = color.rgb2gray(snow)
snow = snow.reshape(1, -1)
# snow = pca.transform(snow)
print snow
print to_cat[str(classifier.predict(snow)[0])] + " (expect snow)"
# print classifier.predict_proba(snow)
flower = io.imread("https://upload.wikimedia.org/wikipedia/commons/f/fd/Daisy_flower_green_background.jpg")
flower = transform.resize(flower, (200, 200, 3))
flower = color.rgb2gray(flower)
flower = flower.reshape(1, -1)
# flower = pca.transform(flower)
print to_cat[str(classifier.predict(flower)[0])] + " (expect plant)"
示例7: test_multi_page_gif
def test_multi_page_gif():
img = imread(os.path.join(data_dir, 'no_time_for_that.gif'))
assert img.shape == (24, 280, 500, 3), img.shape
img2 = imread(os.path.join(data_dir, 'no_time_for_that.gif'),
img_num=5)
assert img2.shape == (280, 500, 3)
assert_allclose(img[5], img2)
示例8: put_image
def put_image(self, image_path):
# print "Loading the image"
self.image = io.imread(image_path, as_grey=True)
self.image = transform.resize(self.image,(50,50))
self.image_scaled = io.imread(image_path, as_grey=True)
self.image_scaled = transform.resize(self.image_scaled,(50,50))
self.image_scaled *= (1/self.image_scaled.max())
示例9: iris_scan_orb
def iris_scan_orb(request):
from skimage import io
from skimage.feature import (match_descriptors, ORB)
from skimage.color import rgb2gray
from .settings import MEDIA_ROOT
img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg')) # Query
img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg')) # Comparing to
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors # Query Descriptor
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# print("Matched: ", len(matches12), " of ", len(descriptors1))
percent = len(matches12) / len(descriptors1) * 100
# print("Percent Match - ", percent, "%")
"""if percent > 80:
print("Matched!")
else:
print("Not Matched!")"""
return render(request, 'scan.html', {'percent': percent})
示例10: readTestingFragment
def readTestingFragment(datapath, fragList, imgSize=(1,224,224), meanImage=[]):
ch, ih, iw = imgSize
fragLen = len(fragList)
if ch == 1:
X = np.zeros((fragLen, 1, ih, iw))
idx = -1
print('reading data')
for f in fragList:
idx += 1
# print(f)
img = skimage.img_as_float(skio.imread(datapath+f) )
# img -= meanImage
X[idx, 0, ...] = img
elif ch == 3:
X = np.zeros((fragLen, 3, ih, iw))
idx = -1
print('reading data')
for f in fragList:
idx += 1
img = skimage.img_as_float(skio.imread(datapath+f) )
img = img.swapaxes(1, 2)
img = img.swapaxes(0, 1)
# img -= meanImage
X[idx, ...] = img
X -= np.tile(meanImage, [fragLen, 1, 1, 1])
return X
示例11: read_input_OLE
def read_input_OLE():
alphabet=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
#alphabet=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,17,19,20,21,2]
n=70
train_data=[]
train_solutions=[]
for i in range(n):
for letter in alphabet:
train_data.append(np.asarray(flatten_image(imread("chars74k-lite/chars74k-lite/"+letter+"/"+letter+"_"+str(i)+".jpg"))))
train_data[-1] = np.divide(train_data[-1],255)
train_solutions.append(ord(letter))
#
m=88
test_data=[]
test_solutions=[]
for j in range(n,m):
for letter in alphabet:
test_data.append(np.asarray(flatten_image(imread("chars74k-lite/chars74k-lite/"+letter+"/"+letter+"_"+str(j)+".jpg"))))
test_data[-1] = np.divide(test_data[-1],255)
test_solutions.append(ord(letter))
#
print(len(train_data))
print(len(train_solutions))
print(len(test_data))
print(test_solutions)
return train_data, train_solutions, test_data, test_solutions
示例12: readTrainingFragment
def readTrainingFragment(datapath, fragList, imgSize=(1,224,224), meanImage=[], classNum=10):
ch, ih, iw = imgSize
fragLen = len(fragList)
if ch == 1:
X = np.zeros((fragLen, 1, ih, iw))
Y = np.zeros((fragLen), dtype=int)
idx = -1
print('reading data')
for f in fragList:
idx += 1
# print(f)
label = np.int(f[0])
img = skimage.img_as_float(skio.imread(datapath+f) )
# img -= meanImage
X[idx, 0, ...] = img
Y[idx] = label
elif ch == 3:
X = np.zeros((fragLen, 3, ih, iw))
Y = np.zeros((fragLen), dtype=int)
idx = -1
print('reading data')
for f in fragList:
idx += 1
label = np.int(f[0])
img = skimage.img_as_float(skio.imread(datapath+f) )
img = img.swapaxes(1, 2)
img = img.swapaxes(0, 1)
# img -= meanImage
X[idx, ...] = img
Y[idx] = label
X -= np.tile(meanImage, [fragLen, 1, 1, 1])
Y = np_utils.to_categorical(Y, classNum)
return X, Y
示例13: computeMeanImage
def computeMeanImage(trainingPath, testingPath, savePath, imgSize):
ch, ih, iw = imgSize
meanImage = np.zeros((ch, ih, iw))
print('computing mean image')
folder = os.listdir(trainingPath)
trainNum = 0
for f in folder:
if not f[-4:] == '.jpg':
continue
img = skimage.img_as_float( skio.imread(trainingPath+f) )
trainNum += 1
if ch == 3:
img = img.swapaxes(1, 2)
img = img.swapaxes(0, 1)
meanImage += img
folder = os.listdir(testingPath)
testNum = 0
for f in folder:
if not f[-4:] == '.jpg':
continue
img = skimage.img_as_float( skio.imread(testingPath+f) )
testNum += 1
if ch == 3:
img = img.swapaxes(1, 2)
img = img.swapaxes(0, 1)
meanImage += img
meanImage /= (trainNum + testNum)
with open(savePath, 'wb') as f:
np.save(f, meanImage)
示例14: load_sequence
def load_sequence(self, sequence):
"""Load a sequence of images/frames
Auxiliary function that loads a sequence of frames with
the corresponding ground truth and their filenames.
Returns a dict with the images in [0, 1], their corresponding
labels, their subset (i.e. category, clip, prefix) and their
filenames.
"""
from skimage import io
X = []
Y = []
F = []
for prefix, frame in sequence:
img = io.imread(os.path.join(self.image_path, frame))
mask = io.imread(os.path.join(self.mask_path, frame))
img = img.astype(floatX) / 255.
mask = mask.astype('int32')
X.append(img)
Y.append(mask)
F.append(frame)
ret = {}
ret['data'] = np.array(X)
ret['labels'] = np.array(Y)
ret['subset'] = prefix
ret['filenames'] = np.array(F)
return ret
示例15: load_sequence
def load_sequence(self, sequence):
"""Load a sequence of images/frames
Auxiliary function that loads a sequence of frames with
the corresponding ground truth and their filenames.
Returns a dict with the images in [0, 1], their corresponding
labels, their subset (i.e. category, clip, prefix) and their
filenames.
"""
from skimage import io
image_batch, mask_batch, filename_batch = [], [], []
for prefix, img_name in sequence:
img = io.imread(os.path.join(self.image_path, img_name + ".bmp"))
img = img.astype(floatX) / 255.
mask = np.array(io.imread(os.path.join(self.mask_path,
img_name + ".tif")))
mask = mask.astype('int32')
# Add to minibatch
image_batch.append(img)
mask_batch.append(mask)
filename_batch.append(img_name)
ret = {}
ret['data'] = np.array(image_batch)
ret['labels'] = np.array(mask_batch)
ret['subset'] = prefix
ret['filenames'] = np.array(filename_batch)
return ret