本文整理汇总了Python中keras.models.Sequential.summary方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.summary方法的具体用法?Python Sequential.summary怎么用?Python Sequential.summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.summary方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mnist_cnn_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def mnist_cnn_model(inputShape, nb_classes):
#inputShape 3dim
model = Sequential()
# each input is 1*28*28, output is 32*26*26 because stride is 1 and image size is 28
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=inputShape))
model.add(Activation('relu'))
# output is 32*24*24 because stride is 1 and input is 32*26*26
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
# pooling will reduce the output size to 32*12*12
model.add(MaxPooling2D(pool_size=(2, 2)))
# dropout not effect the output shape
model.add(Dropout(0.25))
# conver the 32*12*12 output to 4608
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
return model
示例2: build_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='softmax'))
model.summary()
return model
示例3: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def test_nested_sequential(in_tmpdir):
(x_train, y_train), (x_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)
model.train_on_batch(x_train[:32], y_train[:32])
loss = model.evaluate(x_test, y_test, verbose=0)
model.predict(x_test, verbose=0)
model.predict_classes(x_test, verbose=0)
model.predict_proba(x_test, verbose=0)
fname = 'test_nested_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(x_test, y_test, verbose=0)
assert(loss == nloss)
# test serialization
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
示例4: build_generator
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(100,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))
input = multiply([noise, label_embedding])
img = model(input)
return Model([noise, label], img)
示例5: build_critic
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
示例6: get_convBNeluMPdrop
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def get_convBNeluMPdrop(num_conv_layers, nums_feat_maps, feat_scale_factor, conv_sizes, pool_sizes, dropout_conv, input_shape):
#[Convolutional Layers]
model = Sequential()
input_shape_specified = False
for conv_idx in xrange(num_conv_layers):
# add conv layer
n_feat_here = int(nums_feat_maps[conv_idx]*feat_scale_factor)
if not input_shape_specified:
print ' ---->>First conv layer is being added! with %d' % n_feat_here
model.add(Convolution2D(n_feat_here, conv_sizes[conv_idx][0], conv_sizes[conv_idx][1],
input_shape=input_shape,
border_mode='same',
init='he_normal'))
input_shape_specified = True
else:
print ' ---->>%d-th conv layer is being added with %d units' % (conv_idx, n_feat_here)
model.add(Convolution2D(n_feat_here, conv_sizes[conv_idx][0], conv_sizes[conv_idx][1],
border_mode='same',
init='he_normal'))
# add BN, Activation, pooling, and dropout
model.add(BatchNormalization(axis=1, mode=2))
model.add(keras.layers.advanced_activations.ELU(alpha=1.0)) # TODO: select activation
model.add(MaxPooling2D(pool_size=pool_sizes[conv_idx]))
if not dropout_conv == 0.0:
model.add(Dropout(dropout_conv))
print ' ---->>Add dropout of %f for %d-th conv layer' % (dropout_conv, conv_idx)
model.summary()
return model
示例7: run
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def run(dataset):
batch_size = 16
nb_epoch = 20
train_X, train_y = dataset['train']
dev_X, dev_y = dataset['dev']
test_X, test_y = dataset['test']
print('train_X shape:', train_X.shape)
print('Building model...')
model = Sequential()
model.add(Dense(1024, input_dim=train_X.shape[1], activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(train_X, train_y,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(dev_X, dev_y))
score = model.evaluate(test_X, test_y, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
示例8: test_image_classification
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def test_image_classification():
np.random.seed(1337)
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=input_shape,
classification=True,
num_classes=4)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
layers.Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
layers.MaxPooling2D(pool_size=2),
layers.Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
layers.GlobalAveragePooling2D(),
layers.Dense(y_test.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=0)
assert history.history['val_acc'][-1] > 0.75
config = model.get_config()
model = Sequential.from_config(config)
示例9: build_generator
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img)
示例10: test_vector_classification
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def test_vector_classification():
'''
Classify random float vectors into 2 classes with logistic regression
using 2 layer neural network with ReLU hidden units.
'''
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=(20,),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Test with Sequential API
model = Sequential([
layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'),
layers.Dense(8),
layers.Activation('relu'),
layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['val_acc'][-1] > 0.8)
config = model.get_config()
model = Sequential.from_config(config)
示例11: build_generator
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_generator(self):
noise_shape = (self.noise_dims,)
model = Sequential()
model.add(Dense(self.noise_dims, input_shape=noise_shape)) #256
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(int(self.noise_dims*1.5))) # 512
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(int(self.noise_dims*2))) # 512
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(150)) # 1000
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=noise_shape)
img = model(noise)
return Model(noise, img)
示例12: build_generators
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def build_generators(self):
noise_shape = (100,)
noise = Input(shape=noise_shape)
# Shared weights between generators
model = Sequential()
model.add(Dense(256, input_shape=noise_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
latent = model(noise)
# Generator 1
g1 = Dense(1024)(latent)
g1 = LeakyReLU(alpha=0.2)(g1)
g1 = BatchNormalization(momentum=0.8)(g1)
g1 = Dense(np.prod(self.img_shape), activation='tanh')(g1)
img1 = Reshape(self.img_shape)(g1)
# Generator 2
g2 = Dense(1024)(latent)
g2 = LeakyReLU(alpha=0.2)(g2)
g2 = BatchNormalization(momentum=0.8)(g2)
g2 = Dense(np.prod(self.img_shape), activation='tanh')(g2)
img2 = Reshape(self.img_shape)(g2)
model.summary()
return Model(noise, img1), Model(noise, img2)
示例13: moustafa_model1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def moustafa_model1(inputShape, nb_classes):
model = Sequential()
model.add(Convolution2D(62, 3, 3, border_mode='same', input_shape=inputShape))
model.add(Activation('relu'))
model.add(Convolution2D(62, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
return model
示例14: mnist_transferCNN_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def mnist_transferCNN_model(inputShape, nb_classes):
# inputShape 3dim
# define two groups of layers: feature (convolutions) and classification (dense)
feature_layers = [
Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=inputShape),
Activation('relu'),
Convolution2D(32, 3, 3),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')
]
# create complete model
model = Sequential()
for l in feature_layers + classification_layers:
model.add(l)
model.summary()
return model
示例15: create_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import summary [as 别名]
def create_model(train_X, test_X, train_y, test_y):
model = Sequential()
model.add(Dense(500, input_shape=(238,),kernel_initializer= {{choice(['glorot_uniform','random_uniform'])}}))
model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None))
model.add(Activation({{choice(['relu','sigmoid','tanh'])}}))
model.add(Dropout({{uniform(0, 0.3)}}))
model.add(Dense({{choice([128,256])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 0.4)}}))
model.add(Dense({{choice([128,256])}}))
model.add(Activation({{choice(['relu','tanh'])}}))
model.add(Dropout(0.3))
model.add(Dense(41))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam'])}})
model.summary()
early_stops = EarlyStopping(patience=3, monitor='val_acc')
ckpt_callback = ModelCheckpoint('keras_model',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
model.fit(train_X, train_y, batch_size={{choice([128,264])}}, nb_epoch={{choice([10,20])}}, validation_data=(test_X, test_y), callbacks=[early_stops,ckpt_callback])
score, acc = model.evaluate(test_X, test_y, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}