本文整理汇总了Python中keras.models.Sequential.get_config方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.get_config方法的具体用法?Python Sequential.get_config怎么用?Python Sequential.get_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.get_config方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_merge_overlap
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_merge_overlap():
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
assert(loss < 0.7)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
fname = 'test_merge_overlap_temp.h5'
model.save_weights(fname, overwrite=True)
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
示例2: test_recursive
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_recursive(self):
print('test layer-like API')
graph = containers.Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, 32, name='first_seq_dense'))
seq.add(graph)
seq.add(Dense(4, 4, name='last_seq_dense'))
seq.compile('rmsprop', 'mse')
history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test, y_test)
print(loss)
assert(loss < 1.4)
loss = seq.evaluate(X_test, y_test, show_accuracy=True)
pred = seq.predict(X_test)
seq.get_config(verbose=1)
示例3: test_recursive
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_recursive(self):
print("test layer-like API")
graph = containers.Graph()
graph.add_input(name="input1", ndim=2)
graph.add_node(Dense(32, 16), name="dense1", input="input1")
graph.add_node(Dense(32, 4), name="dense2", input="input1")
graph.add_node(Dense(16, 4), name="dense3", input="dense1")
graph.add_output(name="output1", inputs=["dense2", "dense3"], merge_mode="sum")
seq = Sequential()
seq.add(Dense(32, 32, name="first_seq_dense"))
seq.add(graph)
seq.add(Dense(4, 4, name="last_seq_dense"))
seq.compile("rmsprop", "mse")
history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test, y_test)
print(loss)
assert loss < 2.5
loss = seq.evaluate(X_test, y_test, show_accuracy=True)
pred = seq.predict(X_test)
seq.get_config(verbose=1)
示例4: test_recursive
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_recursive():
# test layer-like API
graph = containers.Graph()
graph.add_input(name='input1', input_shape=(32,))
graph.add_node(Dense(16), name='dense1', input='input1')
graph.add_node(Dense(4), name='dense2', input='input1')
graph.add_node(Dense(4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'],
merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, input_shape=(32,)))
seq.add(graph)
seq.add(Dense(4))
seq.compile('rmsprop', 'mse')
seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test_graph, y_test_graph)
assert(loss < 2.5)
loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
seq.predict(X_test_graph)
seq.get_config(verbose=1)
示例5: test_merge_concat
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_merge_concat(self):
print('Test merge: concat')
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_train, X_train], y_train, verbose=0)
print('loss:', loss)
if loss > 0.7:
raise Exception('Score too low, learning issue.')
model.predict([X_test, X_test], verbose=0)
model.predict_classes([X_test, X_test], verbose=0)
model.predict_proba([X_test, X_test], verbose=0)
model.get_config(verbose=0)
print('test weight saving')
fname = 'test_merge_concat_temp.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
assert(loss == nloss)
示例6: test_Bidirectional
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_Bidirectional():
rnn = layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
dropout_rate = 0.2
for mode in ['sum', 'concat']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim,
return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = Input((timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode)(inputs)
if dropout_rate and K.backend() != 'cntk':
# Dropout is disabled with CNTK for now.
assert outputs._uses_learning_phase
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = Input(batch_shape=(1, timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
示例7: test_siamese_1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_siamese_1():
(X_train, y_train), (X_test, y_test) = _get_test_data()
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_test, X_test], y_test, verbose=0)
assert(loss < 0.8)
model.predict([X_test, X_test], verbose=0)
model.predict_classes([X_test, X_test], verbose=0)
model.predict_proba([X_test, X_test], verbose=0)
model.get_config(verbose=0)
# test weight saving
fname = 'test_siamese_1.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.load_weights(fname)
os.remove(fname)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
assert(loss == nloss)
示例8: test_TimeDistributed
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_TimeDistributed():
# first, test with Dense layer
model = Sequential()
model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), nb_epoch=1, batch_size=10)
# test config
model.get_config()
# compare to TimeDistributedDense
test_input = np.random.random((1, 3, 4))
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(core.TimeDistributedDense(2, input_shape=(3, 4), weights=weights))
reference.add(core.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test when specifying a batch_input_shape
reference = Sequential()
reference.add(core.TimeDistributedDense(2, batch_input_shape=(1, 3, 4), weights=weights))
reference.add(core.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Convolution2D
model = Sequential()
model.add(wrappers.TimeDistributed(convolutional.Convolution2D(5, 2, 2, border_mode='same'), input_shape=(2, 3, 4, 4)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.random.random((1, 2, 3, 4, 4)), np.random.random((1, 2, 5, 4, 4)))
model = model_from_json(model.to_json())
model.summary()
# test stacked layers
model = Sequential()
model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
model.add(wrappers.TimeDistributed(core.Dense(3)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
示例9: test_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_sequential(self):
print('Test sequential')
model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
print('loss:', loss)
if loss > 0.7:
raise Exception('Score too low, learning issue.')
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
print('test weight saving')
fname = 'test_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
# test json serialization
json_data = model.to_json()
model = model_from_json(json_data)
# test yaml serialization
yaml_data = model.to_yaml()
model = model_from_yaml(yaml_data)
示例10: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_nested_sequential():
(X_train, y_train), (X_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
inner.add(Activation("relu"))
inner.add(Dense(nb_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_test, y_test, verbose=0)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
fname = "test_nested_sequential_temp.h5"
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
inner.add(Activation("relu"))
inner.add(Dense(nb_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_test, y_test, verbose=0)
assert loss == nloss
# test serialization
config = model.get_config()
new_model = Sequential.from_config(config)
model.summary()
json_str = model.to_json()
new_model = model_from_json(json_str)
yaml_str = model.to_yaml()
new_model = model_from_yaml(yaml_str)
示例11: test_recursive
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_recursive():
# test layer-like API
graph = Graph()
graph.add_input(name='input1', input_shape=(32,))
graph.add_node(Dense(16), name='dense1', input='input1')
graph.add_node(Dense(4), name='dense2', input='input1')
graph.add_node(Dense(4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'],
merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, input_shape=(32,)))
seq.add(graph)
seq.add(Dense(4))
seq.compile('rmsprop', 'mse')
seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test_graph, y_test_graph)
# test serialization
config = seq.get_config()
new_graph = Sequential.from_config(config)
seq.summary()
json_str = seq.to_json()
new_graph = model_from_json(json_str)
yaml_str = seq.to_yaml()
new_graph = model_from_yaml(yaml_str)
示例12: test_temporal_classification
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_temporal_classification():
'''
Classify temporal sequences of float numbers
of length 3 into 2 classes using
single layer of GRU units and softmax applied
to the last activations of the units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 4),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(layers.GRU(8,
input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=4, batch_size=10,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['acc'][-1] >= 0.8)
config = model.get_config()
model = Sequential.from_config(config)
示例13: gen_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
RNN_CLASS = LSTM
if RNN_LAYER_TYPE == "GRU":
RNN_CLASS = GRU
logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
(vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
logger.info("Building Model")
model = Sequential()
logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
model.add(Embedding(vocab_size, embedding_size, input_length=maxlen))
logger.info("Added Embedding Layer")
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
for i in xrange(num_hidden_layers):
model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(TimeDistributedDense(output_size, activation="softmax"))
logger.info("Added Dropout Layer")
logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4))
logger.info("Compiling model with optimizer %s" % optimizer)
start_time = time.time()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
total_time = time.time() - start_time
logger.info("Model compiled in %.4f seconds." % total_time)
return model
示例14: test_image_classification
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_image_classification():
np.random.seed(1337)
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=input_shape,
classification=True,
num_classes=4)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
layers.Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
layers.MaxPooling2D(pool_size=2),
layers.Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
layers.GlobalAveragePooling2D(),
layers.Dense(y_test.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=0)
assert history.history['val_acc'][-1] > 0.75
config = model.get_config()
model = Sequential.from_config(config)
示例15: test_vector_classification
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_config [as 别名]
def test_vector_classification():
'''
Classify random float vectors into 2 classes with logistic regression
using 2 layer neural network with ReLU hidden units.
'''
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=(20,),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Test with Sequential API
model = Sequential([
layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'),
layers.Dense(8),
layers.Activation('relu'),
layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['val_acc'][-1] > 0.8)
config = model.get_config()
model = Sequential.from_config(config)