本文整理匯總了Python中keras.applications.densenet.DenseNet121方法的典型用法代碼示例。如果您正苦於以下問題:Python densenet.DenseNet121方法的具體用法?Python densenet.DenseNet121怎麽用?Python densenet.DenseNet121使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.applications.densenet
的用法示例。
在下文中一共展示了densenet.DenseNet121方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_model_pretrain
# 需要導入模塊: from keras.applications import densenet [as 別名]
# 或者: from keras.applications.densenet import DenseNet121 [as 別名]
def get_model_pretrain(arch):
modlrate = 1
if "VGG16" in arch: base_model = vgg16.VGG16
elif "VGG19" in arch: base_model = vgg19.VGG19
elif "RESNET50" in arch: base_model = resnet50.ResNet50
elif "DENSENET121" in arch: base_model = densenet.DenseNet121
elif "MOBILENET" in arch:
base_model = mobilenet.MobileNet
modlrate = 10
else: print("model not avaiable"); exit()
base_model = base_model(weights='imagenet', include_top=False)
return base_model, modlrate
示例2: test_DenseNet121
# 需要導入模塊: from keras.applications import densenet [as 別名]
# 或者: from keras.applications.densenet import DenseNet121 [as 別名]
def test_DenseNet121(self):
from keras.applications.densenet import DenseNet121
model = DenseNet121(include_top=True, weights='imagenet')
res = run_image(model, self.model_files, img_path)
self.assertTrue(*res)
示例3: get_densenet121_unet_softmax
# 需要導入模塊: from keras.applications import densenet [as 別名]
# 或者: from keras.applications.densenet import DenseNet121 [as 別名]
def get_densenet121_unet_softmax(input_shape, weights='imagenet'):
blocks = [6, 12, 24, 16]
img_input = Input(input_shape + (4,))
x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='conv1/bn')(x)
x = Activation('relu', name='conv1/relu')(x)
conv1 = x
x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
conv2 = x
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
conv3 = x
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
conv4 = x
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, blocks[3], name='conv5')
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='bn')(x)
conv5 = x
conv6 = conv_block(UpSampling2D()(conv5), 320)
conv6 = concatenate([conv6, conv4], axis=-1)
conv6 = conv_block(conv6, 320)
conv7 = conv_block(UpSampling2D()(conv6), 256)
conv7 = concatenate([conv7, conv3], axis=-1)
conv7 = conv_block(conv7, 256)
conv8 = conv_block(UpSampling2D()(conv7), 128)
conv8 = concatenate([conv8, conv2], axis=-1)
conv8 = conv_block(conv8, 128)
conv9 = conv_block(UpSampling2D()(conv8), 96)
conv9 = concatenate([conv9, conv1], axis=-1)
conv9 = conv_block(conv9, 96)
conv10 = conv_block(UpSampling2D()(conv9), 64)
conv10 = conv_block(conv10, 64)
res = Conv2D(3, (1, 1), activation='softmax')(conv10)
model = Model(img_input, res)
if weights == 'imagenet':
densenet = DenseNet121(input_shape=input_shape + (3,), weights=weights, include_top=False)
w0 = densenet.layers[2].get_weights()
w = model.layers[2].get_weights()
w[0][:, :, [0, 1, 2], :] = 0.9 * w0[0][:, :, :3, :]
w[0][:, :, 3, :] = 0.1 * w0[0][:, :, 1, :]
model.layers[2].set_weights(w)
for i in range(3, len(densenet.layers)):
model.layers[i].set_weights(densenet.layers[i].get_weights())
model.layers[i].trainable = False
return model
示例4: get_tst_neural_net
# 需要導入模塊: from keras.applications import densenet [as 別名]
# 或者: from keras.applications.densenet import DenseNet121 [as 別名]
def get_tst_neural_net(type):
model = None
custom_objects = dict()
if type == 'mobilenet_small':
from keras.applications.mobilenet import MobileNet
model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
elif type == 'mobilenet':
from keras.applications.mobilenet import MobileNet
model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
elif type == 'mobilenet_v2':
from keras.applications.mobilenetv2 import MobileNetV2
model = MobileNetV2((224, 224, 3), depth_multiplier=1, alpha=1.4, include_top=True, weights='imagenet')
elif type == 'resnet50':
from keras.applications.resnet50 import ResNet50
model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'inception_v3':
from keras.applications.inception_v3 import InceptionV3
model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'inception_resnet_v2':
from keras.applications.inception_resnet_v2 import InceptionResNetV2
model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'xception':
from keras.applications.xception import Xception
model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
elif type == 'densenet121':
from keras.applications.densenet import DenseNet121
model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'densenet169':
from keras.applications.densenet import DenseNet169
model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'densenet201':
from keras.applications.densenet import DenseNet201
model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'nasnetmobile':
from keras.applications.nasnet import NASNetMobile
model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
elif type == 'nasnetlarge':
from keras.applications.nasnet import NASNetLarge
model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
elif type == 'vgg16':
from keras.applications.vgg16 import VGG16
model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
elif type == 'vgg19':
from keras.applications.vgg19 import VGG19
model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
elif type == 'multi_io':
model = get_custom_multi_io_model()
elif type == 'multi_model_layer_1':
model = get_custom_model_with_other_model_as_layer()
elif type == 'multi_model_layer_2':
model = get_small_model_with_other_model_as_layer()
elif type == 'Conv2DTranspose':
model = get_Conv2DTranspose_model()
elif type == 'RetinaNet':
model, custom_objects = get_RetinaNet_model()
elif type == 'conv3d_model':
model = get_simple_3d_model()
return model, custom_objects