本文整理汇总了Python中keras.applications.vgg16.preprocess_input方法的典型用法代码示例。如果您正苦于以下问题:Python vgg16.preprocess_input方法的具体用法?Python vgg16.preprocess_input怎么用?Python vgg16.preprocess_input使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.applications.vgg16
的用法示例。
在下文中一共展示了vgg16.preprocess_input方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extract_features
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def extract_features(path, model_type):
if model_type == 'inceptionv3':
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299)
elif model_type == 'vgg16':
from keras.applications.vgg16 import preprocess_input
target_size = (224, 224)
# Get CNN Model from model.py
model = CNNModel(model_type)
features = dict()
# Extract features from each photo
for name in tqdm(os.listdir(path)):
# Loading and resizing image
filename = path + name
image = load_img(filename, target_size=target_size)
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the CNN Model model
image = preprocess_input(image)
# Pass image into model to get encoded features
feature = model.predict(image, verbose=0)
# Store encoded features for the image
image_id = name.split('.')[0]
features[image_id] = feature
return features
示例2: extract_features
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def extract_features(filename, model, model_type):
if model_type == 'inceptionv3':
from keras.applications.inception_v3 import preprocess_input
target_size = (299, 299)
elif model_type == 'vgg16':
from keras.applications.vgg16 import preprocess_input
target_size = (224, 224)
# Loading and resizing image
image = load_img(filename, target_size=target_size)
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the CNN Model model
image = preprocess_input(image)
# Pass image into model to get encoded features
features = model.predict(image, verbose=0)
return features
# Load the tokenizer
示例3: test_ShapGradientExplainer
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def test_ShapGradientExplainer(self):
# model = VGG16(weights='imagenet', include_top=True)
# X, y = shap.datasets.imagenet50()
# to_explain = X[[39, 41]]
#
# url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
# fname = shap.datasets.cache(url)
# with open(fname) as f:
# class_names = json.load(f)
#
# def map2layer(x, layer):
# feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
# return K.get_session().run(model.layers[layer].input, feed_dict)
#
# e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
# map2layer(preprocess_input(X.copy()), 7))
# shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
#
print("Skipped Shap GradientExplainer")
示例4: preprocess_image_crop
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def preprocess_image_crop(image_path, img_size):
'''
Preprocess the image scaling it so that its smaller size is img_size.
The larger size is then cropped in order to produce a square image.
'''
img = load_img(image_path)
scale = float(img_size) / min(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
# print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
crop_h = img.shape[0] - img_size
crop_v = img.shape[1] - img_size
img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to open, resize and format pictures into appropriate tensors
示例5: preprocess_image_scale
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def preprocess_image_scale(image_path, img_size=None):
'''
Preprocess the image scaling it so that its larger size is max_size.
This function preserves aspect ratio.
'''
img = load_img(image_path)
if img_size:
scale = float(img_size) / max(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
示例6: extract_vgg16_features_live
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def extract_vgg16_features_live(model, video_input_file_path):
print('Extracting frames from video: ', video_input_file_path)
vidcap = cv2.VideoCapture(video_input_file_path)
success, image = vidcap.read()
features = []
success = True
count = 0
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000)) # added this line
success, image = vidcap.read()
# print('Read a new frame: ', success)
if success:
img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
input = img_to_array(img)
input = np.expand_dims(input, axis=0)
input = preprocess_input(input)
feature = model.predict(input).ravel()
features.append(feature)
count = count + 1
unscaled_features = np.array(features)
return unscaled_features
示例7: extract_vgg16_features
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
if os.path.exists(feature_output_file_path):
return np.load(feature_output_file_path)
count = 0
print('Extracting frames from video: ', video_input_file_path)
vidcap = cv2.VideoCapture(video_input_file_path)
success, image = vidcap.read()
features = []
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000)) # added this line
success, image = vidcap.read()
# print('Read a new frame: ', success)
if success:
img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
input = img_to_array(img)
input = np.expand_dims(input, axis=0)
input = preprocess_input(input)
feature = model.predict(input).ravel()
features.append(feature)
count = count + 1
unscaled_features = np.array(features)
np.save(feature_output_file_path, unscaled_features)
return unscaled_features
示例8: extract_vgg16_features
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def extract_vgg16_features(x):
from keras.preprocessing.image import img_to_array, array_to_img
from keras.applications.vgg16 import preprocess_input, VGG16
from keras.models import Model
# im_h = x.shape[1]
im_h = 224
model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
# if flatten:
# add_layer = Flatten()
# else:
# add_layer = GlobalMaxPool2D()
# feature_model = Model(model.input, add_layer(model.output))
feature_model = Model(model.input, model.get_layer('fc1').output)
print('extracting features...')
x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
x = preprocess_input(x) # data - 127. #data/255.#
features = feature_model.predict(x)
print('Features shape = ', features.shape)
return features
示例9: vgg16_fe
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def vgg16_fe(img_input):
# net = preprocess_input(img_input)
vgg_model = VGG16(weights='imagenet', include_top=True, input_tensor=img_input)
vgg_model.layers.pop()
return vgg_model.layers[-1].output
# return model.layers[-1].output
示例10: vgg16F_fe
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def vgg16F_fe(img_input):
# net = preprocess_input(img_input)
from keras_vggface.vggface import VGGFace
vgg_model = VGGFace(include_top=False, input_tensor=img_input, pooling='avg')
#vgg_model.layers.pop()
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(1024, activation='relu', trainable=True)(x)
x = Dense(512, activation='relu', trainable=True)(x)
model = dnn.Model(input=vgg_model.input, output=x)
return model.layers[-1].output
示例11: computeFeatures
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def computeFeatures(self, video):
x = vgg16.preprocess_input(video)
features = self.model.predict(x)
return features
示例12: vgg_preprocess_input
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def vgg_preprocess_input(x):
return vgg16.preprocess_input(x)
示例13: preprocess_input
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def preprocess_input(x):
return vgg16.preprocess_input(x.astype('float32'))
示例14: load_image
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def load_image(path):
img_path = path
img = load_img(img_path, target_size=(299, 299))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
示例15: preprocess_image
# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 别名]
def preprocess_image(image_path, desired_dims):
img = load_img(image_path, target_size=desired_dims)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image