本文整理汇总了Python中keras.applications.inception_resnet_v2.InceptionResNetV2方法的典型用法代码示例。如果您正苦于以下问题:Python inception_resnet_v2.InceptionResNetV2方法的具体用法?Python inception_resnet_v2.InceptionResNetV2怎么用?Python inception_resnet_v2.InceptionResNetV2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.applications.inception_resnet_v2
的用法示例。
在下文中一共展示了inception_resnet_v2.InceptionResNetV2方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def build_model():
base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(img_size, img_size, channel),
pooling='avg')
image_input = base_model.input
x = base_model.layers[-1].output
out = Dense(embedding_size)(x)
image_embedder = Model(image_input, out)
input_a = Input((img_size, img_size, channel), name='anchor')
input_p = Input((img_size, img_size, channel), name='positive')
input_n = Input((img_size, img_size, channel), name='negative')
normalize = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='normalize')
x = image_embedder(input_a)
output_a = normalize(x)
x = image_embedder(input_p)
output_p = normalize(x)
x = image_embedder(input_n)
output_n = normalize(x)
merged_vector = concatenate([output_a, output_p, output_n], axis=-1)
model = Model(inputs=[input_a, input_p, input_n],
outputs=merged_vector)
return model
示例2: create_model
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def create_model(train_generator, validation_generator):
l2_reg = regularizers.l2({{loguniform(log(1e-6), log(1e-2))}})
base_model = InceptionResNetV2(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout({{uniform(0, 1)}})(x)
x = Dense(1024, activation='relu', kernel_regularizer=l2_reg, activity_regularizer=l2_reg)(x)
x = Dropout({{uniform(0, 1)}})(x)
predictions = Dense(num_classes, activation='softmax', kernel_regularizer=l2_reg, activity_regularizer=l2_reg)(x)
model = Model(inputs=base_model.input, outputs=predictions)
model_weights_path = os.path.join('models', best_model)
model.load_weights(model_weights_path)
for i in range(int(len(base_model.layers) * {{uniform(0, 1)}})):
layer = base_model.layers[i]
layer.trainable = False
adam = keras.optimizers.Adam(lr={{loguniform(log(1e-6), log(1e-3))}})
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam)
# print(model.summary())
model.fit_generator(
train_generator,
steps_per_epoch=num_train_samples // batch_size,
validation_data=validation_generator,
validation_steps=num_valid_samples // batch_size)
score, acc = model.evaluate_generator(validation_generator)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例3: build_model
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def build_model():
base_model = InceptionResNetV2(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
示例4: build_inception_resnet_V2
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def build_inception_resnet_V2(
img_shape=(416, 416, 3),
n_classes=16,
l2_reg=0.0,
load_pretrained=True,
freeze_layers_from="base_model",
):
# Decide if load pretrained weights from imagenet
if load_pretrained:
weights = "imagenet"
else:
weights = None
# Get base model
base_model = InceptionResNetV2(
include_top=False, weights=weights, input_tensor=None, input_shape=img_shape
)
# Add final layers
x = base_model.output
x = AveragePooling2D((8, 8), strides=(8, 8), name="avg_pool")(x)
x = Flatten(name="flatten")(x)
x = Dense(512, activation="swish", name="dense_1", kernel_initializer="he_uniform")(
x
)
x = Dropout(0.25)(x)
predictions = Dense(
n_classes,
activation="softmax",
name="predictions",
kernel_initializer="he_uniform",
)(x)
# This is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# Freeze some layers
if freeze_layers_from is not None:
if freeze_layers_from == "base_model":
print(" Freezing base model layers")
for layer in base_model.layers:
layer.trainable = False
else:
for i, layer in enumerate(model.layers):
print(i, layer.name)
print(" Freezing from layer 0 to " + str(freeze_layers_from))
for layer in model.layers[:freeze_layers_from]:
layer.trainable = False
for layer in model.layers[freeze_layers_from:]:
layer.trainable = True
return model
示例5: calculate_inception_score
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def calculate_inception_score(images_path, batch_size=1, splits=10):
# Create an instance of InceptionV3
model = InceptionResNetV2()
images = None
for image_ in glob.glob(images_path):
# Load image
loaded_image = image.load_img(image_, target_size=(299, 299))
# Convert PIL image to numpy ndarray
loaded_image = image.img_to_array(loaded_image)
# Another another dimension (Add batch dimension)
loaded_image = np.expand_dims(loaded_image, axis=0)
# Concatenate all images into one tensor
if images is None:
images = loaded_image
else:
images = np.concatenate([images, loaded_image], axis=0)
# Calculate number of batches
num_batches = (images.shape[0] + batch_size - 1) // batch_size
probs = None
# Use InceptionV3 to calculate probabilities
for i in range(num_batches):
image_batch = images[i * batch_size:(i + 1) * batch_size, :, :, :]
prob = model.predict(preprocess_input(image_batch))
if probs is None:
probs = prob
else:
probs = np.concatenate([prob, probs], axis=0)
# Calculate Inception scores
divs = []
split_size = probs.shape[0] // splits
for i in range(splits):
prob_batch = probs[(i * split_size):((i + 1) * split_size), :]
p_y = np.expand_dims(np.mean(prob_batch, 0), 0)
div = prob_batch * (np.log(prob_batch / p_y))
div = np.mean(np.sum(div, 1))
divs.append(np.exp(div))
return np.mean(divs), np.std(divs)
示例6: get_tst_neural_net
# 需要导入模块: from keras.applications import inception_resnet_v2 [as 别名]
# 或者: from keras.applications.inception_resnet_v2 import InceptionResNetV2 [as 别名]
def get_tst_neural_net(type):
model = None
custom_objects = dict()
if type == 'mobilenet_small':
from keras.applications.mobilenet import MobileNet
model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
elif type == 'mobilenet':
from keras.applications.mobilenet import MobileNet
model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
elif type == 'mobilenet_v2':
from keras.applications.mobilenetv2 import MobileNetV2
model = MobileNetV2((224, 224, 3), depth_multiplier=1, alpha=1.4, include_top=True, weights='imagenet')
elif type == 'resnet50':
from keras.applications.resnet50 import ResNet50
model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'inception_v3':
from keras.applications.inception_v3 import InceptionV3
model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'inception_resnet_v2':
from keras.applications.inception_resnet_v2 import InceptionResNetV2
model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'xception':
from keras.applications.xception import Xception
model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'densenet121':
from keras.applications.densenet import DenseNet121
model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'densenet169':
from keras.applications.densenet import DenseNet169
model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'densenet201':
from keras.applications.densenet import DenseNet201
model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'nasnetmobile':
from keras.applications.nasnet import NASNetMobile
model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'nasnetlarge':
from keras.applications.nasnet import NASNetLarge
model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
elif type == 'vgg16':
from keras.applications.vgg16 import VGG16
model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
elif type == 'vgg19':
from keras.applications.vgg19 import VGG19
model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
elif type == 'multi_io':
model = get_custom_multi_io_model()
elif type == 'multi_model_layer_1':
model = get_custom_model_with_other_model_as_layer()
elif type == 'multi_model_layer_2':
model = get_small_model_with_other_model_as_layer()
elif type == 'Conv2DTranspose':
model = get_Conv2DTranspose_model()
elif type == 'RetinaNet':
model, custom_objects = get_RetinaNet_model()
elif type == 'conv3d_model':
model = get_simple_3d_model()
return model, custom_objects