本文整理匯總了Python中keras.applications.VGG16屬性的典型用法代碼示例。如果您正苦於以下問題:Python applications.VGG16屬性的具體用法?Python applications.VGG16怎麽用?Python applications.VGG16使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類keras.applications
的用法示例。
在下文中一共展示了applications.VGG16屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: vgg16
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def vgg16(self):
"""Build the structure of a convolutional neural network from input
image data to the last hidden layer on the model of a similar manner
than VGG-net
See: Simonyan & Zisserman, Very Deep Convolutional Networks for
Large-Scale Image Recognition, arXiv technical report, 2014
Returns
-------
tensor
(batch_size, nb_labels)-shaped output predictions, that have to be
compared with ground-truth values
"""
vgg16_model = VGG16(input_tensor=self.X, include_top=False)
y = self.flatten(vgg16_model.output, block_name="flatten")
y = self.dense(y, 1024, block_name="fc1")
y = self.dense(y, 1024, block_name="fc2")
return self.output_layer(y, depth=self.nb_labels)
示例2: build_vgg_original_shape
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def build_vgg_original_shape(y_pred, vgg_layers):
input_shape = y_pred.shape.as_list()[1:4]
img = Input(shape=input_shape)
img_reshaped = Lambda(lambda x: tf.image.resize_nearest_neighbor(x, size=ORIGINAL_VGG_16_SHAPE))(
img)
img_norm = _norm_inputs(img_reshaped)
vgg = VGG16(weights="imagenet", include_top=False)
# Output the first three pooling layers
vgg.outputs = [vgg.layers[i].output for i in vgg_layers]
# Create model and compile
model = Model(inputs=img, outputs=vgg(img_norm))
model.trainable = False
model.compile(loss='mse', optimizer='adam')
return model
示例3: build_vgg_img_shape
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def build_vgg_img_shape(y_pred, vgg_layers):
input_shape = y_pred.shape.as_list()[1:4]
img = Input(shape=input_shape)
img_norm = _norm_inputs(img)
vgg = VGG16(weights="imagenet", include_top=False)
# Output the first three pooling layers
vgg.outputs = [vgg.layers[i].output for i in vgg_layers]
# Create model and compile
model = Model(inputs=img, outputs=vgg(img_norm))
model.trainable = False
model.compile(loss='mse', optimizer='adam')
return model
示例4: model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def model():
model = VGG16(include_top=False, input_shape=(128, 128, 3))
x = model.output
y = x
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
probability = Dense(5, activation='sigmoid', name='probabilistic_output')(x)
y = UpSampling2D((3, 3))(y)
y = Activation('relu')(y)
y = Conv2D(1, (3, 3), activation='linear')(y)
position = Reshape(target_shape=(10, 10), name='positional_output')(y)
model = Model(input=model.input, outputs=[probability, position])
return model
示例5: learn
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def learn():
(train_x, train_y, sample_weight), (test_x, test_y) = load_data()
datagen = ImageDataGenerator(horizontal_flip=True,
vertical_flip=True)
train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight)
base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3))
for layer in base.layers[:-4]:
layer.trainable = False
model = models.Sequential([
base,
layers.BatchNormalization(),
layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
layers.GlobalAveragePooling2D(),
layers.BatchNormalization(),
layers.Dense(64, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.20),
layers.Dense(80, activation='softmax')
])
model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
reduce_lr = ReduceLROnPlateau(verbose=1)
model.fit_generator(train_generator, epochs=400,
steps_per_epoch=100,
validation_data=(test_x[:800], test_y[:800]),
callbacks=[reduce_lr])
result = model.evaluate(test_x, test_y)
print(result)
model.save('12306.image.model.h5', include_optimizer=False)
示例6: save_bottlebeck_features_btl
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def save_bottlebeck_features_btl():
dataset_btl_path = 'dataset_btl/train'
batch_size = 1
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet') # exclude 3 FC layers on top of network
score_iou_btl_g, nb_btl_samples = get_images_count_recursive(dataset_btl_path)
logging.debug('score_iou_btl_g {}'.format(score_iou_btl_g))
logging.debug('nb_btl_samples {}'.format(nb_btl_samples))
## Train
generator = datagen.flow_from_directory(
dataset_btl_path,
target_size=(img_width, img_height),
batch_size=batch_size,
classes=None, # the order of the classes, which will map to the label indices, will be alphanumeric
class_mode=None, # "categorical": 2D one-hot encoded labels; "None": yield batches of data, no labels; "sparse" will be 1D integer labels.
save_to_dir='temp',
shuffle=False) # Don't shuffle else [class index = alphabetical folder order] logic used below might become wrong; first 1000 images will be cats, then 1000 dogs
logging.info('generator.class_indices {}'.format(generator.class_indices))
# classes: If not given, the order of the classes, which will map to the label indices, will be alphanumeric
bottleneck_features_btl = model.predict_generator(
generator, nb_btl_samples // batch_size)
logging.debug('bottleneck_features_btl {}'.format(bottleneck_features_btl.shape)) # bottleneck_features_train (10534, 4, 4, 512) where train images i.e Blazer+Jeans=5408+5126=10532 images;
# save the output as a Numpy array
logging.debug('Saving bottleneck_features_btl...')
np.save(open('output/bottleneck_features_btl.npy', 'w'),
bottleneck_features_btl)
示例7: test_validate_keras_vgg
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def test_validate_keras_vgg(self):
input_tensor = Input(shape=(224, 224, 3))
model = VGG16(weights="imagenet", input_tensor=input_tensor)
file_name = "keras"+model.name+".pmml"
pmml_obj = KerasToPmml(model,dataSet="image",predictedClasses=[str(i) for i in range(1000)])
pmml_obj.export(open(file_name,'w'),0)
self.assertEqual(self.schema.is_valid(file_name), True)
示例8: setup_model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def setup_model(encoder, layer_name):
image_input = Input(shape=(224, 224, 3))
base_model = None
if encoder == 'vgg16':
base_model = VGG16(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
elif encoder == 'vgg19':
base_model = VGG19(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
else:
raise ValueError("not implemented encoder type")
model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)
return model
示例9: test_vgg
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def test_vgg():
app = random.choice([applications.VGG16, applications.VGG19])
last_dim = 512
_test_application_basic(app)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
示例10: build_model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def build_model():
import keras.applications as kapp
from keras.layers import Input
from keras.backend import floatx
inputLayer = Input(shape=(224, 224, 3), dtype=floatx())
return kapp.VGG16(input_tensor=inputLayer)
示例11: __init__
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def __init__(self):
self.checkpoint = pickle.load(open(CHECKPOINT_PATH, 'rb'),encoding='latin1')
self.checkpoint_params = self.checkpoint['params']
self.language_model = self.checkpoint['model']
self.ixtoword = self.checkpoint['ixtoword']
model = VGG16(weights="imagenet")
self.visual_model = Model(input=model.input,output=model.layers[21].output)
self.visual_model._make_predict_function()
self.graph = tf.get_default_graph()
self.BEAM_SIZE = 2
示例12: build_vgg16
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def build_vgg16(y_pred, use_original_vgg_shape, vgg_layers):
"""
Load pre-trained VGG16 from keras applications
"""
if use_original_vgg_shape:
return build_vgg_original_shape(y_pred, vgg_layers)
else:
return build_vgg_img_shape(y_pred, vgg_layers)
示例13: __init__
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def __init__(self):
self.matrix_res = None
self.similarity_deep = None
self.model = VGG16(include_top=False, weights='imagenet')
self.matrix_idx_to_item_id = None
self.item_id_to_matrix_idx = None
示例14: model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def model():
model = VGG16(include_top=False, input_shape=(416, 416, 3))
x = model.output
x = Conv2D(1, (1, 1), activation='sigmoid')(x)
output = Reshape((13, 13), name='output')(x)
model = Model(model.input, output)
return model
示例15: make_model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG16 [as 別名]
def make_model(model, image_size):
if model == "inceptionv3":
base_model = InceptionV3(include_top=False, input_shape=image_size + (3,))
elif model == "vgg16" or model is None:
base_model = VGG16(include_top=False, input_shape=image_size + (3,))
elif model == "mobilenet":
base_model = MobileNet(include_top=False, input_shape=image_size + (3,))
return base_model