本文整理匯總了Python中keras.preprocessing.image.load_img方法的典型用法代碼示例。如果您正苦於以下問題:Python image.load_img方法的具體用法?Python image.load_img怎麽用?Python image.load_img使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.preprocessing.image
的用法示例。
在下文中一共展示了image.load_img方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: predict
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def predict(self, f, k=5, resize_mode='fill'):
from keras.preprocessing import image
from vergeml.img import resize_image
filename = os.path.basename(f)
if not os.path.exists(f):
return dict(filename=filename, prediction=[])
img = image.load_img(f)
img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = self.preprocess_input(x)
preds = self.model.predict(x)
pred = self._decode(preds, top=k)[0]
prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]
return dict(filename=filename, prediction=prediction)
示例2: extract_features
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def extract_features(filename, model, model_type):
if model_type == 'inceptionv3':
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299)
elif model_type == 'vgg16':
from keras.applications.vgg16 import preprocess_input
target_size = (224, 224)
# Loading and resizing image
image = load_img(filename, target_size=target_size)
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the CNN Model model
image = preprocess_input(image)
# Pass image into model to get encoded features
features = model.predict(image, verbose=0)
return features
# Load the tokenizer
示例3: extract_features
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def extract_features(path, model_type):
if model_type == 'inceptionv3':
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299)
elif model_type == 'vgg16':
from keras.applications.vgg16 import preprocess_input
target_size = (224, 224)
# Get CNN Model from model.py
model = CNNModel(model_type)
features = dict()
# Extract features from each photo
for name in tqdm(os.listdir(path)):
# Loading and resizing image
filename = path + name
image = load_img(filename, target_size=target_size)
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the CNN Model model
image = preprocess_input(image)
# Pass image into model to get encoded features
feature = model.predict(image, verbose=0)
# Store encoded features for the image
image_id = name.split('.')[0]
features[image_id] = feature
return features
示例4: data_loader
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def data_loader(q, ):
for bi in batch_indices:
start, end = bi
x_batch = []
filenames_batch = filenames[start:end]
for filename in filenames_batch:
imgs = []
for d in dirs:
img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))
imgs.append(np.squeeze(img))
x_batch.append(np.array(imgs).transpose((1, 2, 0)))
q.put((filenames_batch, np.array(x_batch)))
for gpu in gpus:
q.put((None, None))
示例5: load_image_pixels
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
# get all of the results above a threshold
示例6: predict
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def predict(img_dir, model):
img_files = []
for root, dirs, files in os.walk(img_dir, topdown=False):
for name in files:
img_files.append(os.path.join(root, name))
img_files = sorted(img_files)
y_pred = []
y_test = []
for img_path in tqdm(img_files):
# print(img_path)
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
preds = model.predict(x[None, :, :, :])
decoded = decode_predictions(preds, top=1)
pred_label = decoded[0][0][0]
# print(pred_label)
y_pred.append(pred_label)
tokens = img_path.split(os.pathsep)
class_id = int(tokens[-2])
# print(str(class_id))
y_test.append(class_id)
return y_pred, y_test
示例7: create_test_data
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def create_test_data(self):
# 測試集生成npy
i = 0
print('-' * 30)
print('Creating test images...')
print('-' * 30)
imgs = glob.glob(self.test_path + "/*." + self.img_type) # ../data_set/train
print(len(imgs))
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
for imgname in imgs:
midname = imgname[imgname.rindex("/") + 1:] # 圖像的名字
img = load_img(self.test_path + "/" + midname, grayscale=True) # 轉換為灰度圖
img = img_to_array(img)
imgdatas[i] = img
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('loading done', imgdatas.shape)
np.save(self.npy_path + '/imgs_test.npy', imgdatas) # 將30張訓練集和30張label生成npy數據
# np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
print('Saving to .npy files done.')
示例8: main
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def main(self):
self.logger.info('Will load keras model')
model = ResNet50(weights='imagenet')
self.logger.info('Keras model loaded')
feature_list = []
img_path_list = []
for raw_file in self.inp.raw_files:
media_path = raw_file.path
file_list = os.listdir(media_path)
total = float(len(file_list))
for index, img_file in enumerate(file_list):
img_path = os.path.join(media_path, img_file)
img_path_list.append(img_path)
img = image.load_img(img_path, target_size=(224, 224))
x = keras_image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# extract features
scores = model.predict(x)
sim_class = np.argmax(scores)
print('Scores {}\nSimClass: {}'.format(scores, sim_class))
self.outp.request_annos(img_path, img_sim_class=sim_class)
self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
self.update_progress(index*100/total)
示例9: preprocess_image_crop
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def preprocess_image_crop(image_path, img_size):
'''
Preprocess the image scaling it so that its smaller size is img_size.
The larger size is then cropped in order to produce a square image.
'''
img = load_img(image_path)
scale = float(img_size) / min(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
# print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
crop_h = img.shape[0] - img_size
crop_v = img.shape[1] - img_size
img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to open, resize and format pictures into appropriate tensors
示例10: preprocess_image_scale
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def preprocess_image_scale(image_path, img_size=None):
'''
Preprocess the image scaling it so that its larger size is max_size.
This function preserves aspect ratio.
'''
img = load_img(image_path)
if img_size:
scale = float(img_size) / max(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
示例11: display_heatmap
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def display_heatmap(new_model, img_path, ids, preprocessing=None):
# The quality is reduced.
# If you have more than 8GB of RAM, you can try to increase it.
img = image.load_img(img_path, target_size=(800, 1280))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
if preprocessing is not None:
x = preprocess_input(x)
out = new_model.predict(x)
heatmap = out[0] # Removing batch axis.
if K.image_data_format() == 'channels_first':
heatmap = heatmap[ids]
if heatmap.ndim == 3:
heatmap = np.sum(heatmap, axis=0)
else:
heatmap = heatmap[:, :, ids]
if heatmap.ndim == 3:
heatmap = np.sum(heatmap, axis=2)
plt.imshow(heatmap, interpolation="none")
plt.show()
示例12: helper_test
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def helper_test(model):
img_path = "../examples/dog.jpg"
new_model = to_heatmap(model)
# Loading the image
img = image.load_img(img_path, target_size=(800, 800))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
out = new_model.predict(x)
s = "n02084071" # Imagenet code for "dog"
ids = synset_to_dfs_ids(s)
heatmap = out[0]
if K.image_data_format() == 'channels_first':
heatmap = heatmap[ids]
heatmap = np.sum(heatmap, axis=0)
else:
heatmap = heatmap[:, :, ids]
heatmap = np.sum(heatmap, axis=2)
print(heatmap.shape)
assert heatmap.shape[0] == heatmap.shape[1]
K.clear_session()
示例13: predict
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def predict(imagePath):
img = load_img(imagePath)
img = img_to_array(img)
output = img.copy()
# make prediction
results = rcnn.detect([img], verbose=0)
r = results[0]
for (box, score) in zip(r['rois'], r['scores']):
# filter out weak detections
if score < 0.5:
continue
label = "{}: {:.2f}".format('table', score)
cv2.rectangle(output, (box[1], box[0]), (box[3], box[2]),(0, 255, 0), 2)
cv2.putText(output, label, (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imwrite("prediction.jpg", output)
return r['rois']
示例14: test_01_image_classifier_with_image_as_input
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def test_01_image_classifier_with_image_as_input(self):
cnn_pmml = KerasToPmml(self.model_final,model_name="MobileNetImage",description="Demo",\
copyright="Internal User",dataSet='image',predictedClasses=['dogs','cats'])
cnn_pmml.export(open('2classMBNet.pmml', "w"), 0)
img = image.load_img('nyoka/tests/resizedCat.png')
img = img_to_array(img)
img = preprocess_input(img)
imgtf = np.expand_dims(img, axis=0)
model_pred=self.model_final.predict(imgtf)
model_preds = {'dogs':model_pred[0][0],'cats':model_pred[0][1]}
model_name = self.adapa_utility.upload_to_zserver('2classMBNet.pmml')
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, 'nyoka/tests/resizedCat.png','DN')
self.assertEqual(abs(probabilities['cats'] - model_preds['cats']) < 0.00001, True)
self.assertEqual(abs(probabilities['dogs'] - model_preds['dogs']) < 0.00001, True)
示例15: _compute_stats
# 需要導入模塊: from keras.preprocessing import image [as 別名]
# 或者: from keras.preprocessing.image import load_img [as 別名]
def _compute_stats(self, mean = None, std = None):
""" Computes channel-wise mean and standard deviation of all images in the dataset.
If `mean` and `std` arguments are given, they will just be stored instead of being re-computed.
The channel order of both is always "RGB", independent of `color_mode`.
"""
if mean is None:
mean = 0
for fn in tqdm(self.train_img_files, desc = 'Computing channel mean'):
mean += np.mean(np.asarray(load_img(fn), dtype=np.float64), axis = (0,1))
mean /= len(self.train_img_files)
print('Channel-wise mean: {}'.format(mean))
self.mean = np.asarray(mean, dtype=np.float32)
if (mean is None) or (std is None):
std = 0
for fn in tqdm(self.train_img_files, desc = 'Computing channel variance'):
std += np.mean((np.asarray(load_img(fn), dtype=np.float64) - self.mean) ** 2, axis = (0,1))
std = np.sqrt(std / (len(self.train_img_files) - 1))
print('Channel-wise standard deviation: {}'.format(std))
self.std = np.asarray(std, dtype=np.float32)