本文整理汇总了Python中keras.models.Sequential.compile方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.compile方法的具体用法?Python Sequential.compile怎么用?Python Sequential.compile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.compile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def train():
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
print(X_train.shape)
print(y_train.shape)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
with open("save_weight_lstm.pickle", mode="wb") as f:
pickle.dump(model.get_weights(),f)
示例2: model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例3: parallel_CNN
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def parallel_CNN():
filter_shapes = [ [2, 300], [3, 300], [4, 300], [5, 300] ]
pool_shapes = [ [25, 1], [24, 1], [23, 1], [22, 1] ]
#Four Parallel Convolutional Layers with Four Pooling Layers
model = Sequential()
sub_models = []
for i in range( len(pool_shapes) ):
pool_shape = pool_shapes[i]
filter_shape = filter_shapes[i]
sub_model = Sequential()
sub_model.add( Convolution2D(nb_filter = 512, nb_row = filter_shape[0], nb_col = filter_shape[1],
border_mode='valid', activation='relu',
input_shape=(input_shape[0], input_shape[1], input_shape[2])
))
sub_model.add( MaxPooling2D(pool_size=(pool_shape[1], pool_shape[1])) )
sub_models.append(sub_model)
model.add(Merge(sub_models, mode='concat'))
#Fully Connected Layer with dropout
model.add(Flatten())
model.add(Dense(output_dim=256, activation='relu', input_dim=2048))
model.add(Dropout(0.5))
#Fully Connected Layer as output layer
model.add( Dense(output_dim=label_num, activation='sigmoid', input_dim=256))
adadelta = Adadelta(lr=1.0, rho=0.95, epsilon=1e-6)
model.compile(loss='binary_crossentropy', class_mode = 'multi_label',
optimizer=adadelta)
return model
示例4: model_1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def model_1(lr=.001, rho=.9, epsilon=1.0e-6):
dnn = Sequential()
dnn.add(BatchNormalization(input_shape=(3, 101, 101)))
dnn.add(Convolution2D(16, 2, 2, init='he_normal'))
dnn.add(MaxPooling2D())
dnn.add(LeakyReLU(alpha=.01))
dnn.add(Convolution2D(16, 3, 3, init='he_normal'))
dnn.add(MaxPooling2D())
dnn.add(LeakyReLU(alpha=.01))
dnn.add(Convolution2D(16, 3, 3, init='he_normal'))
dnn.add(MaxPooling2D())
dnn.add(LeakyReLU(alpha=.01))
dnn.add(Convolution2D(16, 2, 2, init='he_normal'))
dnn.add(MaxPooling2D())
dnn.add(LeakyReLU(alpha=.01))
dnn.add(Convolution2D(16, 2, 2, init='he_normal'))
dnn.add(MaxPooling2D())
dnn.add(LeakyReLU(alpha=.01))
dnn.add(Flatten())
dnn.add(Dense(100))
dnn.add(Dense(2))
dnn.add(Activation('softmax'))
dnn.compile(loss='binary_crossentropy', optimizer=Adamax(lr=lr))
return dnn
示例5: cnn
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def cnn():
model = Sequential()
# **Worth taking into consideration that our image size is tiny (8x8), convolution may work much better for
# **with 1792 sipms
# kernal size is 3x3, 32 filters, padding is same.
# Same padding works better, this is probably because same padding makes it easier for network no to retain as
# much information as possible around the edges.
model.add(Convolution2D(32,3,3,border_mode='same',input_shape=(1, nsipm, nsipm)))
model.add(Activation('relu'))
model.add(Convolution2D(32,3,3,border_mode='same', input_shape=(32, nsipm, nsipm)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(output_dim=128))
model.add(Activation('relu'))
model.add(Dense(output_dim=64))
model.add(Activation('relu'))
model.add(Dense(output_dim=2*N_ELpts))
model.add(Activation('sigmoid'))
# Nadam optimizer is a safe choice at least for deep networks. It is adam optimizer with Nesterov
# Momentum. Nesterov Momentum takes into account future expected future gradient gradient, unlike traditional Mom.
model.compile(loss='mse', optimizer=Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004))
N_layers = 'cnn'
return model,N_layers
示例6: define_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def define_model(lr, momentum):
# CONFIG
model = Sequential()
# Create Layers
# CONVNET
layers = []
#layers.append(GaussianNoise(0.02))
layers.append(Convolution2D(8, 9, 9, activation = "relu", input_shape=(1,100,100)))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(16, 7, 7, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(32, 5, 5, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(64, 3, 3, activation = "relu"))
layers.append(MaxPooling2D(pool_size=(2,2)))
layers.append(Convolution2D(250, 3, 3, activation= "relu"))
# MLP
layers.append(Flatten())
layers.append(Dense(125, activation="relu"))
layers.append(Dense(2, activation="softmax"))
# Adding Layers
for layer in layers:
model.add(layer)
# COMPILE (learning rate, momentum, objective...)
sgd = SGD(lr=lr, momentum=momentum)
model.compile(loss="categorical_crossentropy", optimizer=sgd)
return model
示例7: test_LambdaCallback
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
示例8: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def __init__(self, nb_filters=32, nb_conv=3, nb_pool=2):
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, 5, 30)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Convolution2D(nb_filters/2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(500))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(500))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
self.model = model
示例9: create_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def create_model(x_train, y_train, x_test, y_test):
"""
Create your model...
"""
layer_1_size = {{quniform(12, 256, 4)}}
l1_dropout = {{uniform(0.001, 0.7)}}
params = {
'l1_size': layer_1_size,
'l1_dropout': l1_dropout
}
num_classes = 10
model = Sequential()
model.add(Dense(int(layer_1_size), activation='relu'))
model.add(Dropout(l1_dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
out = {
'loss': -acc,
'score': score,
'status': STATUS_OK,
'model_params': params,
}
# optionally store a dump of your model here so you can get it from the database later
temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
model.save(temp_name)
with open(temp_name, 'rb') as infile:
model_bytes = infile.read()
out['model_serial'] = model_bytes
return out
示例10: simple_cnn_vgg_like
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def simple_cnn_vgg_like(lr=1e-3, weights_path=None):
img_rows, img_cols = 210, 70
# standard VGG16 network architecture
structure_path = "%s/cache/simple_cnn_vgg_like.json" % config.project.project_path
if weights_path is not None and os.path.exists(weights_path) \
and os.path.exists(structure_path):
logger.debug("load weigth from fine-tuning weight %s" % weights_path)
model = model_from_json(open(structure_path).read())
model.load_weights(weights_path)
else:
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols)))
model.add(Convolution2D(64, 7, 7, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 7, 7, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
# replace more fc layer
model.add(Dense(124, activation='softmax'))
# load the weights
logger.debug('Model loaded.')
sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
示例11: simple_cnn_for_test
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def simple_cnn_for_test(lr=1e-3, weights_path=None):
img_rows, img_cols = 210, 70
if weights_path is not None and os.path.exists(weights_path):
logging.debug("load weigth from fine-tuning weight %s" % weights_path)
model = load_model(weights_path)
else:
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols)))
model.add(Convolution2D(6, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(6, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(6, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(6, 3, 3, activation='relu'))
model.add(Flatten())
# replace more fc layer
model.add(Dense(124, activation='softmax'))
# load the weights
logging.debug('Model loaded.')
sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
示例12: get_ts_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def get_ts_model( trainX, trainY, look_back = 1, nb_epochs = 100 ):
model = Sequential()
# takes input array of shape (*, 1) where (2,1) - (row,col) array example looks like [23]
# [43]
model.add(LSTM(20, input_shape=(None , look_back) ))
#model.add(LSTM(20, batch_input_shape=(None, None, look_back), return_sequences= True ))
#print(model.summary)
model.add( Dense(1) )
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
model.add(Dense(1))
#model.add(LSTM(1, return_sequences= True))
#model.add(LSTM(1))
# outputs array of shape (*,1)
#model.add(Dense(1))
#model.compile(loss='mean_absolute_error', optimizer='SGD') # mape
#model.compile(loss='poisson', optimizer='adam') # mape
model.compile( loss = 'mean_squared_error', optimizer = 'adam' ) # values closer to zero are better.
#model.compile(loss='mean_squared_error', optimizer='adagrad')
# Values of MSE are used for comparative purposes of two or more statistical meythods. Heavily weight outliers, i.e weighs large errors more heavily than the small ones.
# "In cases where this is undesired, mean absolute error is used.
# REF: Available loss functions https://keras.io/objectives.
print('Start : Training model')
# default configuration
model.fit(trainX, trainY, nb_epoch=nb_epochs, batch_size=1, verbose=2)
#model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
print('Ends : Training Model')
return model
示例13: baseline_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def baseline_model():
model = Sequential()
model.add(Dense(4, input_dim=4, init='normal', activation='relu'))
model.add(Dense(3, init='normal', activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例14: _small_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
def _small_model(self):
'''
Alternative model architecture with fewer layers for computationally expensive
training datasets
'''
print 'Compiling Small Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
示例15: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import compile [as 别名]
class QLearn:
def __init__(self, actions, epsilon, alpha, gamma):
# instead of a dictionary, we'll be using
# a neural network
# self.q = {}
self.epsilon = epsilon # exploration constant
self.alpha = alpha # discount constant
self.gamma = gamma # discount factor
self.actions = actions
# Build the neural network
self.network = Sequential()
self.network.add(Dense(50, init='lecun_uniform', input_shape=(4,)))
# self.network.add(Activation('sigmoid'))
#self.network.add(Dropout(0.2))
self.network.add(Dense(20, init='lecun_uniform'))
# self.network.add(Activation('sigmoid'))
# #self.network.add(Dropout(0.2))
self.network.add(Dense(2, init='lecun_uniform'))
# self.network.add(Activation('linear')) #linear output so we can have range of real-valued outputs
# rms = SGD(lr=0.0001, decay=1e-6, momentum=0.5) # explodes to non
rms = RMSprop()
# rms = Adagrad()
# rms = Adam()
self.network.compile(loss='mse', optimizer=rms)
# Get a summary of the network
self.network.summary()