本文整理汇总了Python中keras.layers.Dense方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Dense方法的具体用法?Python layers.Dense怎么用?Python layers.Dense使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.Dense方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def create_model(time_window_size, metric):
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
input_shape=(time_window_size, 1)))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(64))
model.add(Dense(units=time_window_size, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer="sgd", loss="mse", metrics=[metric])
print(model.summary())
return model
示例2: _makenet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def _makenet(x, num_layers, dropout, random_seed):
from keras.layers import Dense, Dropout
dropout_seeder = random.Random(random_seed)
for i in range(num_layers - 1):
# add intermediate layers
if dropout:
x = Dropout(dropout, seed=dropout_seeder.randint(0, 10000))(x)
x = Dense(1024, activation="relu", name='dense_layer_{}'.format(i))(x)
if dropout:
# add the final dropout layer
x = Dropout(dropout, seed=dropout_seeder.randint(0, 10000))(x)
return x
示例3: _save
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def _save(model, base_model, layers, labels, random_seed, checkpoints_dir):
from keras.layers import Flatten, Dense
from keras import Model
nclasses = len(labels)
x = Flatten()(base_model.output)
x = _makenet(x, layers, dropout=None, random_seed=random_seed)
predictions = Dense(nclasses, activation="softmax", name="predictions")(x)
model_final = Model(inputs=base_model.input, outputs=predictions)
for i in range(layers - 1):
weights = model.get_layer(name='dense_layer_{}'.format(i)).get_weights()
model_final.get_layer(name='dense_layer_{}'.format(i)).set_weights(weights)
weights = model.get_layer(name='predictions').get_weights()
model_final.get_layer(name='predictions').set_weights(weights)
model_final.save(os.path.join(checkpoints_dir, "model.h5"))
with open(os.path.join(checkpoints_dir, "labels.txt"), "w") as f:
f.write("\n".join(labels))
return model_final
示例4: RNNModel
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
image_model = Dense(embedding_size, activation='relu')(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例5: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def create_model(self, input_dim):
encoding_dim = 14
input_layer = Input(shape=(input_dim,))
encoder = Dense(encoding_dim, activation="tanh",
activity_regularizer=regularizers.l1(10e-5))(input_layer)
encoder = Dense(encoding_dim // 2, activation="relu")(encoder)
decoder = Dense(encoding_dim // 2, activation='tanh')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
model = Model(inputs=input_layer, outputs=decoder)
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
return model
示例6: weather_l2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def weather_l2(hidden_nums=100,l2=0.01):
input_img = Input(shape=(37,))
hn = Dense(hidden_nums, activation='relu')(input_img)
hn = Dense(hidden_nums, activation='relu',
kernel_regularizer=regularizers.l2(l2))(hn)
out_u = Dense(37, activation='sigmoid',
name='ae_part')(hn)
out_sig = Dense(37, activation='linear',
name='pred_part')(hn)
out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')
#weather_model = Model(input_img, outputs=[out_ae, out_pred])
mve_model = Model(input_img, outputs=[out_both])
mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
return mve_model
示例7: CausalCNN
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def CausalCNN(n_filters, lr, decay, loss,
seq_len, input_features,
strides_len, kernel_size,
dilation_rates):
inputs = Input(shape=(seq_len, input_features), name='input_layer')
x=inputs
for dilation_rate in dilation_rates:
x = Conv1D(filters=n_filters,
kernel_size=kernel_size,
padding='causal',
dilation_rate=dilation_rate,
activation='linear')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = Dense(7, activation='relu', name='dense_layer')(x)
outputs = Dense(3, activation='sigmoid', name='output_layer')(x)
causalcnn = Model(inputs, outputs=[outputs])
return causalcnn
示例8: weather_ae
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def weather_ae(layers, lr, decay, loss,
input_len, input_features):
inputs = Input(shape=(input_len, input_features), name='input_layer')
for i, hidden_nums in enumerate(layers):
if i==0:
hn = Dense(hidden_nums, activation='relu')(inputs)
else:
hn = Dense(hidden_nums, activation='relu')(hn)
outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)
weather_model = Model(inputs, outputs=[outputs])
return weather_model
示例9: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def __init__(self, model_path=None):
if model_path is not None:
self.model = self.load_model(model_path)
else:
# VGG16 last conv features
inputs = Input(shape=(7, 7, 512))
x = Convolution2D(128, 1, 1)(inputs)
x = Flatten()(x)
# Cls head
h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_cls = Dropout(p=0.5)(h_cls)
cls_head = Dense(20, activation='softmax', name='cls')(h_cls)
# Reg head
h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_reg = Dropout(p=0.5)(h_reg)
reg_head = Dense(4, activation='linear', name='reg')(h_reg)
# Joint model
self.model = Model(input=inputs, output=[cls_head, reg_head])
示例10: build_generator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
示例11: build_discriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.missing_shape)
validity = model(img)
return Model(img, validity)
示例12: build_discriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_discriminator(self):
img = Input(shape=self.img_shape)
model = Sequential()
model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
model.add(LeakyReLU(alpha=0.8))
model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
model.summary()
img = Input(shape=self.img_shape)
features = model(img)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)
label = Flatten()(features)
label = Dense(self.num_classes+1, activation="softmax")(label)
return Model(img, [validity, label])
示例13: build_encoder
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_encoder(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
model.summary()
img = Input(shape=self.img_shape)
z = model(img)
return Model(img, z)
示例14: build_discriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
示例15: build_classifier
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dense [as 别名]
def build_classifier(self):
def clf_layer(layer_input, filters, f_size=4, normalization=True):
"""Classifier layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
c1 = clf_layer(img, self.cf, normalization=False)
c2 = clf_layer(c1, self.cf*2)
c3 = clf_layer(c2, self.cf*4)
c4 = clf_layer(c3, self.cf*8)
c5 = clf_layer(c4, self.cf*8)
class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))
return Model(img, class_pred)