本文整理匯總了Python中keras.applications.VGG19屬性的典型用法代碼示例。如果您正苦於以下問題:Python applications.VGG19屬性的具體用法?Python applications.VGG19怎麽用?Python applications.VGG19使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類keras.applications
的用法示例。
在下文中一共展示了applications.VGG19屬性的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_vgg
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def build_vgg(self):
"""
Builds a pre-trained VGG19 model that outputs image features extracted at the
third block of the model
"""
vgg = VGG19(weights="imagenet")
# Set outputs to outputs of last conv. layer in block 3
# See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
# Extract image features
img_features = vgg(img)
return Model(img, img_features)
示例2: build_vgg
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def build_vgg():
"""
Build VGG network to extract image features
"""
input_shape = (256, 256, 3)
# Load a pre-trained VGG19 model trained on 'Imagenet' dataset
vgg = VGG19(weights="imagenet")
vgg.outputs = [vgg.layers[9].output]
input_layer = Input(shape=input_shape)
# Extract features
features = vgg(input_layer)
# Create a Keras model
model = Model(inputs=[input_layer], outputs=[features])
return model
示例3: model_vgg_create
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def model_vgg_create(input_shape, num_classes):
logging.debug('input_shape {}'.format(input_shape))
#model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (256, 256, 3))
model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (input_shape)) # input_shape (128, 128, 1)
# input_shape (128, 128, 3)
# Freeze the layers which you don't want to train. Freezing the first 5 layers.
for layer in model.layers[:5]:
layer.trainable = False
# Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(num_classes, activation="softmax")(x)
# Creating the final model
model_final = Model(inputs = model.input, outputs = predictions)
# Compile the model
# opt = RMSprop(lr=0.0001, decay=1e-6)
opt = SGD(lr=0.0001, momentum=0.9)
model_final.compile(loss = "categorical_crossentropy", optimizer = opt, metrics=["accuracy"])
return model_final
示例4: setup_model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def setup_model(encoder, layer_name):
image_input = Input(shape=(224, 224, 3))
base_model = None
if encoder == 'vgg16':
base_model = VGG16(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
elif encoder == 'vgg19':
base_model = VGG19(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
else:
raise ValueError("not implemented encoder type")
model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)
return model
示例5: test_vgg
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def test_vgg():
app = random.choice([applications.VGG16, applications.VGG19])
last_dim = 512
_test_application_basic(app)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
示例6: build_model
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def build_model():
import keras.applications as kapp
from keras.layers import Input
from keras.backend import floatx
inputLayer = Input(shape=(224, 224, 3), dtype=floatx())
return kapp.VGG19(input_tensor=inputLayer)
示例7: get_imagenet_architecture
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'):
from keras import applications, Model
if include_top:
assert output_layer == 'last'
if size == 'auto':
size = get_image_size(architecture, variant, size)
shape = (size, size, 3)
if architecture == 'densenet':
if variant == 'auto':
variant = 'densenet-121'
if variant == 'densenet-121':
model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape)
elif variant == 'densenet-169':
model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape)
elif variant == 'densenet-201':
model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'inception-resnet-v2':
model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'mobilenet':
model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
elif architecture == 'mobilenet-v2':
model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
elif architecture == 'nasnet':
if variant == 'auto':
variant = 'large'
if variant == 'large':
model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape)
else:
model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'resnet-50':
model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'vgg-16':
model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'vgg-19':
model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'xception':
model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape)
elif architecture == 'inception-v3':
model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape)
if output_layer != 'last':
try:
if isinstance(output_layer, int):
layer = model.layers[output_layer]
else:
layer = model.get_layer(output_layer)
except Exception:
raise VergeMLError('layer not found: {}'.format(output_layer))
model = Model(inputs=model.input, outputs=layer.output)
return model
示例8: train
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
for epoch in range(epochs):
# ----------------------
# Train Discriminator
# ----------------------
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)
# From low res. image generate high res. version
fake_hr = self.generator.predict(imgs_lr)
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
# Train the discriminators (original images = real / generated = Fake)
d_loss_real = self.discriminator.train_on_batch(imgs_hr, valid)
d_loss_fake = self.discriminator.train_on_batch(fake_hr, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ------------------
# Train Generator
# ------------------
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)
# The generators want the discriminators to label the generated images as real
valid = np.ones((batch_size,) + self.disc_patch)
# Extract ground truth image features using pre-trained VGG19 model
image_features = self.vgg.predict(imgs_hr)
# Train the generators
g_loss = self.combined.train_on_batch([imgs_lr, imgs_hr], [valid, image_features])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("%d time: %s" % (epoch, elapsed_time))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
示例9: __init__
# 需要導入模塊: from keras import applications [as 別名]
# 或者: from keras.applications import VGG19 [as 別名]
def __init__(self):
# Input shape
self.channels = 3
self.lr_height = 64 # Low resolution height
self.lr_width = 64 # Low resolution width
self.lr_shape = (self.lr_height, self.lr_width, self.channels)
self.hr_height = self.lr_height*4 # High resolution height
self.hr_width = self.lr_width*4 # High resolution width
self.hr_shape = (self.hr_height, self.hr_width, self.channels)
# Number of residual blocks in the generator
self.n_residual_blocks = 16
# We use a pre-trained VGG19 model to extract image features from the high resolution
# and the generated high resolution images and minimize the mse between them
self.vgg = self.build_vgg()
self.vgg.trainable = False
# Calculate output shape of D (PatchGAN)
patch = int(self.hr_height / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# High res. and low res. images
img_hr = Input(shape=self.hr_shape)
img_lr = Input(shape=self.lr_shape)
# Generate high res. version from low res.
fake_hr = self.generator(img_lr)
# Extract image features of the generated img
fake_features = self.vgg(fake_hr)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminator determines validity of generated high res. images
validity = self.discriminator(fake_hr)
self.combined = Model([img_lr, img_hr], [validity, fake_features])