本文整理汇总了Python中keras.models.Sequential.fit方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.fit方法的具体用法?Python Sequential.fit怎么用?Python Sequential.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main_separatemodels
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def main_separatemodels():
X1, X2, y = generate_data2(TRAINING_SIZE)
X1_test, X2_test, y_test = generate_data2(TEST_SIZE)
print('Defining network...', file=sys.stderr)
firstlstm = Sequential()
firstlstm.add(Embedding(VOCABULARY_SIZE, EMBEDDING_DIMENSION))
firstlstm.add(LSTM(EMBEDDING_DIMENSION, HIDDEN_DIMENSION, return_sequences=False))
secondlstm = Sequential()
secondlstm.add(Embedding(VOCABULARY_SIZE, EMBEDDING_DIMENSION))
secondlstm.add(LSTM(EMBEDDING_DIMENSION, HIDDEN_DIMENSION, return_sequences=False))
model = Sequential()
model.add(Merge([firstlstm, secondlstm], mode='concat'))
model.add(Dense(HIDDEN_DIMENSION + HIDDEN_DIMENSION, 1, activation='sigmoid'))
print('Compiling...', file=sys.stderr)
model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
print('Training...', file=sys.stderr)
model.fit([X1, X2], y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS,
validation_split=0.05, show_accuracy=True)
print("Testing...", file=sys.stderr)
score, acc = model.evaluate([X1_test, X2_test], y_test, batch_size=BATCH_SIZE,
show_accuracy=True)
print("Testing performance = " + str(score) + ", acc = " + str(acc))
示例2: test_TensorBoard_with_ReduceLROnPlateau
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
示例3: model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例4: train_rnn
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
model = Sequential()
model.add(Embedding(character_corpus.char_num(), 256))
model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributedDense(5120, character_corpus.char_num()))
model.add(Activation('time_distributed_softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
seq_X, seq_Y = character_corpus.make_sequences(seq_len)
print "Sequences are made"
train_seq_num = train_test_split_ratio*seq_X.shape[0]
X_train = seq_X[:train_seq_num]
Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())
X_test = seq_X[train_seq_num:]
Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())
print "Begin train model"
checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])
print "Model is trained"
score = model.evaluate(X_test, Y_test, batch_size=512)
print "valid score = ", score
return model
示例5: test_EarlyStopping
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
示例6: train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def train():
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
print(X_train.shape)
print(y_train.shape)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
with open("save_weight_lstm.pickle", mode="wb") as f:
pickle.dump(model.get_weights(),f)
示例7: create_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def create_model(x_train, y_train, x_test, y_test):
"""
Create your model...
"""
layer_1_size = {{quniform(12, 256, 4)}}
l1_dropout = {{uniform(0.001, 0.7)}}
params = {
'l1_size': layer_1_size,
'l1_dropout': l1_dropout
}
num_classes = 10
model = Sequential()
model.add(Dense(int(layer_1_size), activation='relu'))
model.add(Dropout(l1_dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
out = {
'loss': -acc,
'score': score,
'status': STATUS_OK,
'model_params': params,
}
# optionally store a dump of your model here so you can get it from the database later
temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
model.save(temp_name)
with open(temp_name, 'rb') as infile:
model_bytes = infile.read()
out['model_serial'] = model_bytes
return out
示例8: imdb_lstm
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def imdb_lstm():
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print type(X_train)
exit(0)
print len(X_train), 'train sequences'
print len(X_test), 'test sequences'
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
示例9: CNN_3_layer
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def CNN_3_layer(activation):
Xtrain, ytrain, XCV, yCV, Xtest, ytest = load_data("mnist.pkl.gz")
Xtrain = Xtrain.reshape(Xtrain.shape[0], 1, 28, 28)
Xtest = Xtest.reshape(Xtest.shape[0], 1, 28, 28)
XCV = Xtest.reshape(XCV.shape[0], 1, 28, 28)
# 0~9 ten classes
ytrain = np_utils.to_categorical(ytrain, 10)
ytest = np_utils.to_categorical(ytest, 10)
yCV = np_utils.to_categorical(yCV, 10)
# Build the model
model = Sequential()
model.add(Convolution2D(32,3,3,border_mode='valid',input_shape=(1,28,28)))
model.add(Activation(activation))
model.add(Convolution2D(32,3,3))
model.add(Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(16,3,3))
model.add(Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation(activation))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
# fit module
print "fit module"
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit(Xtrain,ytrain,batch_size=100,nb_epoch=20,verbose=1,validation_data=(XCV,yCV))
score = model.evaluate(Xtest,ytest, verbose=0)
print score[0]
print score[1]
示例10: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
class MLP:
'''
[(output_dim, input_dim, init, activation, dropout)]
'''
def __init__(self\
, structure\
, sgd_params_init = sgd_params(0.1,1e-6,0.9,True)\
, loss_name = 'mean_squared_error'):
self.model = Sequential()
for layers in structure:
self.model.add(Dense(output_dim = layers.output_dim\
, input_dim = layers.input_dim\
, init = layers.init\
, activation = layers.activation))
if layers.dropout != None:
self.model.add(Dropout(layers.dropout))
sgd = SGD(lr = sgd_params_init.lr\
, decay = sgd_params_init.decay\
, momentum = sgd_params_init.momentum\
, nesterov = sgd_params_init.nesterov)
self.model.compile(loss = loss_name, optimizer = sgd)
def train(self, X_train, y_train, nb_epoch = 20, batch_size = 16):
self.model.fit(X_train, y_train, nb.epoch, batch_size)
def test(self, X_test, y_test, batch_size = 16):
return self.model.evaluate(X_test, y_test, batch_size)
示例11: train_the_nn
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def train_the_nn(features, label, look_back = 1):
model = Sequential()
model.add(Dense(8, input_dim=look_back, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer = 'adam')
model.fit(features, label, nb_epoch=200, batch_size=2, verbose=2)
return model
开发者ID:sversage,项目名称:Predicting-Airline-Passengers-Time-Series-NN,代码行数:9,代码来源:loading_exploring_data.py
示例12: trainNN
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def trainNN():
# POSITIVE training data
posPX, posSX = getAllWindowedMinMaxPositiveTrainingData('./sample/example30', preSize=10, postSize=20)
posPY = np.array([[1]] * len(posPX))
posSY = np.array([[1]] * len(posSX))
# NEGATIVE training data
negX = getSomeWindowedMinMaxNegativeTrainingData('./sample/example30/', size=30, num=200)
negY = np.array([[0]] * 200)
# ALL training data
X = np.concatenate([posPX, posSX, negX])
Y = np.concatenate([posPY, posSY, negY])
# 使用keras创建神经网络
# Sequential是指一层层堆叠的神经网络
# Dense是指全连接层
# 定义model
model = Sequential()
model.add(Dense(50, input_dim=30, activation='sigmoid'))
model.add(Dense(50, activation='sigmoid'))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(X, Y, epochs=200, batch_size=32)
model.save('model.h5')
return model
示例13: generateModel
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def generateModel(self,docSeries):
topics = docSeries.topicSeries.keys()
seriesLength = 50
sequenceTuples = []
for j in range(len(topics)):
topic = topics[j]
topicLength = len(docSeries.topicSeries[topic])
for i in range(0,topicLength):
if i+seriesLength < topicLength:
sequenceTuples.append((docSeries.topicSeries[topic][i:i+seriesLength],j))
random.shuffle(sequenceTuples)
X = []
y = []
for s,l in sequenceTuples:
X.append(s)
y.append(l)
X = np.array(X).astype(np.uint8)
y = np_utils.to_categorical(np.array(y)).astype(np.bool)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
print len(X_train),len(y_train)
print X.shape,y.shape
model = Sequential()
model.add(Embedding(50, 64, input_length = seriesLength, mask_zero = True))
model.add(LSTM(64,init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(len(topics)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')
early_stopping = EarlyStopping(patience=5, verbose=1)
model.fit(X_train, y_train,nb_epoch=20,show_accuracy=True,verbose=1,shuffle=True)
preds = model.predict_classes(X_test, batch_size=64, verbose=0)
print '\n'
print(classification_report(np.argmax(y_test, axis=1), preds, target_names=topics))
示例14: trainModel
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def trainModel():
inputs, correctOutputs = getNNData()
print("Collected data")
trainingInputs = inputs[:len(inputs)//2]
trainingOutputs = correctOutputs[:len(correctOutputs)//2]
testInputs = inputs[len(inputs)//2:]
testOutputs = correctOutputs[len(correctOutputs)//2:]
model = Sequential()
model.add(Dense(24, input_shape=(24, )))
model.add(Activation('tanh'))
model.add(Dense(24))
model.add(Activation('tanh'))
model.add(Dense(5))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))
model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
score = model.evaluate(testInputs, testOutputs, verbose=0)
print(score)
json_string = model.to_json()
open('my_model_architecture.json', 'w').write(json_string)
model.save_weights('my_model_weights.h5', overwrite=True)
示例15: main
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit [as 别名]
def main():
train_X = np.load('train_X.npy')
train_y = np.load('train_y.npy')
test_X = np.load('test_X.npy')
test_y = np.load('test_y.npy')
model = Sequential()
model.add(Flatten(input_shape=(15,60,2)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(900))
model.add(Activation('sigmoid'))
print model.summary()
adam = Adam(0.001)
#adagrad = Adagrad(lr=0.01)
model.compile(loss='mse', optimizer=adam)
model.fit(train_X, train_y, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test_X, test_y))
model.save_weights('model.h5', overwrite=True)