當前位置: 首頁>>代碼示例>>Python>>正文


Python vgg16.preprocess_input方法代碼示例

本文整理匯總了Python中keras.applications.vgg16.preprocess_input方法的典型用法代碼示例。如果您正苦於以下問題:Python vgg16.preprocess_input方法的具體用法?Python vgg16.preprocess_input怎麽用?Python vgg16.preprocess_input使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.applications.vgg16的用法示例。


在下文中一共展示了vgg16.preprocess_input方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extract_features

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
開發者ID:dabasajay,項目名稱:Image-Caption-Generator,代碼行數:29,代碼來源:preprocessing.py

示例2: extract_features

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
開發者ID:dabasajay,項目名稱:Image-Caption-Generator,代碼行數:22,代碼來源:test.py

示例3: test_ShapGradientExplainer

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
開發者ID:IBM,項目名稱:AIX360,代碼行數:22,代碼來源:test_shap.py

示例4: preprocess_image_crop

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to open, resize and format pictures into appropriate tensors 
開發者ID:robertomest,項目名稱:neural-style-keras,代碼行數:21,代碼來源:utils.py

示例5: preprocess_image_scale

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def preprocess_image_scale(image_path, img_size=None):
    '''
    Preprocess the image scaling it so that its larger size is max_size.
    This function preserves aspect ratio.
    '''
    img = load_img(image_path)
    if img_size:
        scale = float(img_size) / max(img.size)
        new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
        img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img


# util function to convert a tensor into a valid image 
開發者ID:robertomest,項目名稱:neural-style-keras,代碼行數:19,代碼來源:utils.py

示例6: extract_vgg16_features_live

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def extract_vgg16_features_live(model, video_input_file_path):
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    count = 0
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    return unscaled_features 
開發者ID:chen0040,項目名稱:keras-video-classifier,代碼行數:23,代碼來源:vgg16_feature_extractor.py

示例7: extract_vgg16_features

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
開發者ID:chen0040,項目名稱:keras-video-classifier,代碼行數:26,代碼來源:vgg16_feature_extractor.py

示例8: extract_vgg16_features

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def extract_vgg16_features(x):
    from keras.preprocessing.image import img_to_array, array_to_img
    from keras.applications.vgg16 import preprocess_input, VGG16
    from keras.models import Model

    # im_h = x.shape[1]
    im_h = 224
    model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
    # if flatten:
    #     add_layer = Flatten()
    # else:
    #     add_layer = GlobalMaxPool2D()
    # feature_model = Model(model.input, add_layer(model.output))
    feature_model = Model(model.input, model.get_layer('fc1').output)
    print('extracting features...')
    x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
    x = preprocess_input(x)  # data - 127. #data/255.#
    features = feature_model.predict(x)
    print('Features shape = ', features.shape)

    return features 
開發者ID:XifengGuo,項目名稱:DEC-keras,代碼行數:23,代碼來源:datasets.py

示例9: vgg16_fe

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def vgg16_fe(img_input):
    # net = preprocess_input(img_input)
    vgg_model = VGG16(weights='imagenet', include_top=True, input_tensor=img_input)
    vgg_model.layers.pop()
    return vgg_model.layers[-1].output
    # return  model.layers[-1].output 
開發者ID:bbdamodaran,項目名稱:deepJDOT,代碼行數:8,代碼來源:architectures.py

示例10: vgg16F_fe

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def vgg16F_fe(img_input):
    # net = preprocess_input(img_input)
    from keras_vggface.vggface import VGGFace
    vgg_model = VGGFace(include_top=False, input_tensor=img_input, pooling='avg')
    #vgg_model.layers.pop()
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(1024, activation='relu', trainable=True)(x)
    x = Dense(512, activation='relu', trainable=True)(x)
    model = dnn.Model(input=vgg_model.input, output=x)
    return model.layers[-1].output 
開發者ID:bbdamodaran,項目名稱:deepJDOT,代碼行數:13,代碼來源:architectures.py

示例11: computeFeatures

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def computeFeatures(self, video):
    x = vgg16.preprocess_input(video)
    features = self.model.predict(x)
    return features 
開發者ID:jonasrothfuss,項目名稱:videofeatures,代碼行數:6,代碼來源:CNNFeatures.py

示例12: vgg_preprocess_input

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def vgg_preprocess_input(x):
    return vgg16.preprocess_input(x) 
開發者ID:dolaameng,項目名稱:udacity-SDC-baseline,代碼行數:4,代碼來源:data.py

示例13: preprocess_input

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def preprocess_input(x):
    return vgg16.preprocess_input(x.astype('float32')) 
開發者ID:robertomest,項目名稱:neural-style-keras,代碼行數:4,代碼來源:utils.py

示例14: load_image

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def load_image(path):
    img_path = path
    img = load_img(img_path, target_size=(299, 299))
    x = img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
開發者ID:gautamMalu,項目名稱:Aesthetic_attributes_maps,代碼行數:9,代碼來源:visualization.py

示例15: preprocess_image

# 需要導入模塊: from keras.applications import vgg16 [as 別名]
# 或者: from keras.applications.vgg16 import preprocess_input [as 別名]
def preprocess_image(image_path, desired_dims):
	img = load_img(image_path, target_size=desired_dims)
	img = img_to_array(img)
	img = np.expand_dims(img, axis=0)
	img = vgg16.preprocess_input(img)
	return img

# util function to convert a tensor into a valid image 
開發者ID:kevinzakka,項目名稱:style-transfer,代碼行數:10,代碼來源:utils.py


注:本文中的keras.applications.vgg16.preprocess_input方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。