本文整理汇总了Python中keras.models.Sequential.add方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.add方法的具体用法?Python Sequential.add怎么用?Python Sequential.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_partial_cnn1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def build_partial_cnn1(img_rows, img_cols):
model = Sequential()
#model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
model.add(Convolution2D(nb_filter=10, nb_row=2, nb_col=2,
init='glorot_uniform', activation='linear',
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
'''model.add(Convolution2D(nb_filter=512, nb_row=5, nb_col=5,
init='glorot_uniform', activation='linear',
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))'''
return model
示例2: create
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def create(self):
language_model = Sequential()
self.textual_embedding(language_model, mask_zero=True)
self.temporal_pooling(language_model)
language_model.add(DropMask())
#language_model.add(BatchNormalization(mode=1))
self.language_model = language_model
visual_model_factory = \
select_sequential_visual_model[self._config.trainable_perception_name](
self._config.visual_dim)
visual_model = visual_model_factory.create()
visual_dimensionality = visual_model_factory.get_dimensionality()
self.visual_embedding(visual_model, visual_dimensionality)
#visual_model.add(BatchNormalization(mode=1))
self.visual_model = visual_model
if self._config.multimodal_merge_mode == 'dot':
self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
else:
self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))
self.deep_mlp()
self.add(Dense(self._config.output_dim))
self.add(Activation('softmax'))
示例3: define_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def define_model(lr, momentum):
# CONFIG
model = Sequential()
# Create Layers
# CONVNET
layers = []
#layers.append(GaussianNoise(0.02))
layers.append(Convolution2D(8, 9, 9, activation = "relu", input_shape=(1,100,100)))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(16, 7, 7, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(32, 5, 5, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(64, 3, 3, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(250, 3, 3, activation= "relu"))
# MLP
layers.append(Flatten())
layers.append(Dense(125, activation="relu"))
layers.append(Dense(2, activation="softmax"))
# Adding Layers
for layer in layers:
model.add(layer)
# COMPILE (learning rate, momentum, objective...)
sgd = SGD(lr=lr, momentum=momentum)
model.compile(loss="categorical_crossentropy", optimizer=sgd)
return model
示例4: test_LambdaCallback
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
示例5: get_ts_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def get_ts_model( trainX, trainY, look_back = 1, nb_epochs = 100 ):
model = Sequential()
# takes input array of shape (*, 1) where (2,1) - (row,col) array example looks like [23]
# [43]
model.add(LSTM(20, input_shape=(None , look_back) ))
#model.add(LSTM(20, batch_input_shape=(None, None, look_back), return_sequences= True ))
#print(model.summary)
model.add( Dense(1) )
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
#model.add(LSTM(1, return_sequences= True))
#model.add(LSTM(1))
# outputs array of shape (*,1)
#model.add(Dense(1))
#model.compile(loss='mean_absolute_error', optimizer='SGD') # mape
#model.compile(loss='poisson', optimizer='adam') # mape
model.compile( loss = 'mean_squared_error', optimizer = 'adam' ) # values closer to zero are better.
#model.compile(loss='mean_squared_error', optimizer='adagrad')
# Values of MSE are used for comparative purposes of two or more statistical meythods. Heavily weight outliers, i.e weighs large errors more heavily than the small ones.
# "In cases where this is undesired, mean absolute error is used.
# REF: Available loss functions https://keras.io/objectives.
print('Start : Training model')
# default configuration
model.fit(trainX, trainY, nb_epoch=nb_epochs, batch_size=1, verbose=2)
#model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
print('Ends : Training Model')
return model
示例6: train_rnn
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
model = Sequential()
model.add(Embedding(character_corpus.char_num(), 256))
model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributedDense(5120, character_corpus.char_num()))
model.add(Activation('time_distributed_softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
seq_X, seq_Y = character_corpus.make_sequences(seq_len)
print "Sequences are made"
train_seq_num = train_test_split_ratio*seq_X.shape[0]
X_train = seq_X[:train_seq_num]
Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())
X_test = seq_X[train_seq_num:]
Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())
print "Begin train model"
checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])
print "Model is trained"
score = model.evaluate(X_test, Y_test, batch_size=512)
print "valid score = ", score
return model
示例7: test_img_clf
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def test_img_clf(self):
print('image classification data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000,
nb_test=200,
input_shape=(3, 8, 8),
classification=True,
nb_class=2)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Convolution2D(8, 8, 8, input_shape=(3, 8, 8)))
model.add(Activation('sigmoid'))
model.add(Flatten())
model.add(Dense(y_test.shape[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16,
validation_data=(X_test, y_test),
show_accuracy=True, verbose=0)
print(history.history['val_acc'][-1])
self.assertTrue(history.history['val_acc'][-1] > 0.9)
示例8: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def __init__(self):
model = Sequential()
model.add(Embedding(115227, 50, input_length=75, weights=pre_weights))
model.compile(loss=MCE, optimizer="adadelta")
print "Build Network Completed..."
self.model = model
self.vocab = {"get_index":{}, "get_word":[]}
示例9: test_multiprocessing_predict_error
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def test_multiprocessing_predict_error():
batch_size = 32
good_batches = 5
def myGenerator():
"""Raises an exception after a few good batches"""
for i in range(good_batches):
yield (np.random.randint(batch_size, 256, (500, 2)),
np.random.randint(batch_size, 2, 500))
raise RuntimeError
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.compile(loss='mse', optimizer='adadelta')
samples = batch_size * (good_batches + 1)
with pytest.raises(Exception):
model.predict_generator(
myGenerator(), samples, 1,
nb_worker=4, pickle_safe=True,
)
with pytest.raises(Exception):
model.predict_generator(
myGenerator(), samples, 1,
pickle_safe=False,
)
示例10: test_simple_keras_udf
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def test_simple_keras_udf(self):
""" Simple Keras sequential model """
# Notice that the input layer for a image UDF model
# must be of shape (width, height, numChannels)
# The leading batch size is taken care of by Keras
with IsolatedSession(using_keras=True) as issn:
model = Sequential()
model.add(Flatten(input_shape=(640,480,3)))
model.add(Dense(units=64))
model.add(Activation('relu'))
model.add(Dense(units=10))
model.add(Activation('softmax'))
# Initialize the variables
init_op = tf.global_variables_initializer()
issn.run(init_op)
makeGraphUDF(issn.graph,
'my_keras_model_udf',
model.outputs,
{tfx.op_name(issn.graph, model.inputs[0]): 'image_col'})
# Run the training procedure
# Export the graph in this IsolatedSession as a GraphFunction
# gfn = issn.asGraphFunction(model.inputs, model.outputs)
fh_name = "test_keras_simple_sequential_model"
registerKerasImageUDF(fh_name, model)
self._assert_function_exists(fh_name)
示例11: test_multiprocessing_predicting
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def test_multiprocessing_predicting():
reached_end = False
arr_data = np.random.randint(0, 256, (500, 2))
def myGenerator():
batch_size = 32
n_samples = 500
while True:
batch_index = np.random.randint(0, n_samples - batch_size)
start = batch_index
end = start + batch_size
X = arr_data[start: end]
yield X
# Build a NN
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.compile(loss='mse', optimizer='adadelta')
model.predict_generator(myGenerator(),
val_samples=320,
max_q_size=10,
nb_worker=2,
pickle_safe=True)
model.predict_generator(myGenerator(),
val_samples=320,
max_q_size=10,
pickle_safe=False)
reached_end = True
assert reached_end
示例12: ae
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def ae(data, feature_dim, train, test, learning_rate, lr_decay, reg_fn, l, momentum, evaluation):
''' Autoencoder '''
batch_size=len(train)
data_dim = data.shape[1]
model = single_layer_autoencoder(data_dim, feature_dim, reg_fn(l), learning_rate, lr_decay, momentum)
model.fit(data[train], data[train], batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose)
output = model.predict(data)
# Reconstruction
model_rec = Sequential()
model_rec.add(Dense(data_dim, input_dim=feature_dim, activation=activation, weights=model.layers[0].decoder.get_weights()[0:2]))
model_rec.layers[0].get_input(False) # Get input from testing data
model_rec.compile(loss='mse', optimizer='sgd')
if evaluation:
data_rec = model_rec.predict(output[test])
loss = mean_squared_error(data[test], data_rec)
return loss
name = 'Autoencoder'
return output, name, model_rec.predict
示例13: MLP
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
class MLP(BaseEstimator):
def __init__(self, verbose=0, model=None, final_activation='sigmoid'):
self.verbose = verbose
self.model = model
self.final_activation = final_activation
def fit(self, X, y):
if not self.model:
self.model = Sequential()
self.model.add(Dense(1000, input_dim=X.shape[1]))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(y.shape[1]))
self.model.add(Activation(self.final_activation))
self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
self.model.fit_generator(generator=_batch_generator(X, y, 256, True),
samples_per_epoch=X.shape[0], nb_epoch=20, verbose=self.verbose)
def predict(self, X):
pred = self.predict_proba(X)
return sparse.csr_matrix(pred > 0.2)
def predict_proba(self, X):
pred = self.model.predict_generator(generator=_batch_generatorp(X, 512), val_samples=X.shape[0])
return pred
示例14: getVggModel
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def getVggModel():
"""Pretrained VGG16 model with fine-tunable last two layers"""
input_image = Input(shape = (160,320,3))
model = Sequential()
model.add(Lambda(lambda x: x/255.0 -0.5,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
base_model = VGG16(input_tensor=input_image, include_top=False)
for layer in base_model.layers[:-3]:
layer.trainable = False
W_regularizer = l2(0.01)
x = base_model.get_layer("block5_conv3").output
x = AveragePooling2D((2, 2))(x)
x = Dropout(0.5)(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(4096, activation="elu", W_regularizer=l2(0.01))(x)
x = Dropout(0.5)(x)
x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
x = Dense(1, activation="linear")(x)
return Model(input=input_image, output=x)
示例15: model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import add [as 别名]
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(LSTM(128))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=4)
checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
verbose=1,
save_best_only=True)
model.fit(X_train, y_train,
batch_size={{choice([32, 64, 128])}},
nb_epoch=1,
validation_split=0.08,
callbacks=[early_stopping, checkpointer])
score, acc = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}