本文整理汇总了Python中keras.models.Sequential.save_weights方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.save_weights方法的具体用法?Python Sequential.save_weights怎么用?Python Sequential.save_weights使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.save_weights方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_mlp
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train_mlp():
with open('../data/params_0.pkl', 'rb') as f:
w_0, _, _ = cPickle.load(f)
with open('../data/params_1.pkl', 'rb') as f:
w_1, _, _ = cPickle.load(f)
with open('../data/params_2.pkl', 'rb') as f:
w_2, _, _ = cPickle.load(f)
train_x, train_y = SupervisedLoader.load('../data')
model = Sequential()
model.add(Dense(33, 64, weights=[w_0]))
model.add(Activation('sigmoid'))
# model.add(Dropout(0.2))
model.add(Dense(64, 128, weights=[w_1]))
model.add(Activation('sigmoid'))
# model.add(Dropout(0.2))
model.add(Dense(128, 128, weights=[w_2]))
model.add(Dense(128, 1, init='glorot_uniform'))
model.add(Activation('relu'))
# sgd = SGD(lr=1.e-5, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer='adagrad')
model.fit(train_x, train_y, nb_epoch=500, batch_size=128, validation_split=0.2)
model.save_weights('../data/mlp_params.hdf5')
示例2: train_top_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train_top_model():
start = 0.03
stop = 0.001
nb_epoch = 300
train_data = np.load(open('bottleneck_features_train.npy'))
train_labels = np.load(open('label_train.npy'))
validation_data = np.load(open('bottleneck_features_validation.npy'))
validation_labels = np.load(open('label_validation.npy'))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='relu'))
model.add(Dense(30))
sgd = SGD(lr=start, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
learning_rates = np.linspace(start, stop, nb_epoch)
change_lr = LearningRateScheduler(lambda epoch: float(learning_rates[epoch]))
hist = model.fit(train_data, train_labels,
nb_epoch=nb_epoch,
validation_data=(validation_data, validation_labels),
callbacks=[change_lr])
model.save_weights('model_top_vgg.h5')
np.savetxt('model_top_vgg_flip_loss.csv', hist.history['loss'])
np.savetxt('model_top_vgg_flip_val_loss.csv', hist.history['val_loss'])
示例3: train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train():
X, Y = load_data()
# create model
model = Sequential()
# input_dim is the feature number 8
#model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(256, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(16, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
#model.fit(X, Y, nb_epoch=150, batch_size=10)
model.fit(X, Y, nb_epoch=1000, batch_size=32, shuffle=True)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
示例4: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def test_nested_sequential(in_tmpdir):
(x_train, y_train), (x_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)
model.train_on_batch(x_train[:32], y_train[:32])
loss = model.evaluate(x_test, y_test, verbose=0)
model.predict(x_test, verbose=0)
model.predict_classes(x_test, verbose=0)
model.predict_proba(x_test, verbose=0)
fname = 'test_nested_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(x_test, y_test, verbose=0)
assert(loss == nloss)
# test serialization
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
示例5: train_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train_model(self):
print '=======begin to prepare data at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
list_sorted = self.word2vec()
self.y = np.array(list(self.y))
self.x = list(sequence.pad_sequences(list(self.x), maxlen=max_len))
self.x = np.array(list(self.x))
print '=======end to prepare data at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
print '=======begin to train model at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
model = Sequential()
model.add(Embedding(input_dim=len(list_sorted) + 1, output_dim=256, input_length=max_len))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
model.fit(self.x, self.y, batch_size=16, nb_epoch=10)
json_string = model.to_json()
open('sa_model_architecture.json', 'w').write(json_string)
model.save_weights('sa_model_weights.h5', overwrite=True)
print '=======end to train model at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
return model
示例6: run_mlp
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def run_mlp(**args):
print("building mlp model:")
print(args["training_data"].shape[0])
print(args["training_data"].shape[1])
model = Sequential()
model.add(Dense(output_dim=512, input_dim=args["training_data"].shape[1], activation="relu"))
# model.add(Dense(output_dim=64, input_dim=128, activation='relu'))
# model.add(Dense(output_dim=32, input_dim=64, activation='relu'))
model.add(Dense(1))
model.add(Activation("linear"))
model.compile(loss="mse", optimizer="rmsprop")
model.fit(args["training_data"], args["training_label"], nb_epoch=50, batch_size=512)
# pickle.dump(model, open('mlp_testmodel.p', 'w'), protocol=4)
json_string = model.to_json()
open("mlp_model_architecture.json", "w").write(json_string)
model.save_weights("mlp_model_weights.h5", overwrite=True)
# output = model.evaluate(args['test_data'], args['test_label'], batch_size=512)
output = model.predict(args["test_data"], verbose=1, batch_size=512)
output_int = list(map(lambda e: int(e), np.round(output)))
pickle.dump(output_int, open("mlp_output.p", "wb"), protocol=4)
return output_int
示例7: create_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def create_model(X_train, Y_train):
"""create_model will create a very simple neural net model and save the weights in a predefined directory.
Args:
X_train: Input X_train
Y_train: Lables Y_train
"""
xin = X_train.shape[1]
model = Sequential()
model.add(Dense(units=4, input_shape=(xin, )))
model.add(Activation('tanh'))
model.add(Dense(4))
model.add(Activation('linear'))
model.add(Dense(1))
rms = kop.RMSprop()
print('compiling now..')
model.compile(loss='mse', optimizer=rms)
model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
score = model.evaluate(X_train, Y_train, batch_size=1)
print("Evaluation results:", score)
open('pickles/my_model_architecture.json', 'w').write(model.to_json())
print("Saving weights in: ./pickles/my_model_weights.h5")
model.save_weights('pickles/my_model_weights.h5')
示例8: pre_train
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def pre_train(self, X=None, monitor='acc', patience=3):
"""
:param X: Series. Conjunto de datos de texto utilizados para el pre entrenamiento
:param monitor: String. Variable utilizada para la monitorización de la red
:param patience: Integer. Cantidad de epochs que han de pasar sin obtener mejora para parar de entrenar la red.
:return:
Conjunto de pesos resultado del entrenamiento de la red.
Estos pesos serán utilizados como inizialización de los pesos de la red de clasificación real
"""
# Preparamos los datos
X = self.data_transform.df_towem(X=X, persist=True, direction='data/pre-train-deep.pickle')
modelo = Sequential()
modelo.add(Convolution1D(nb_filter=128, filter_length=4, border_mode='same', init='uniform', bias=True,
input_shape=(X.shape[1], None), name='conv_layer'))
modelo.add(Activation('relu'))
modelo.add(Dropout(0.2))
modelo.add(MaxPooling1D(pool_length=4, stride=2))
modelo.add(LSTM(200), name='lstm_layer')
modelo.add(Activation('tanh'))
modelo.add(Dropout(0.1))
modelo.add(Dense(2, activation='softmax'))
modelo.compile(loss="categorical_crossentropy", optimizer='adamax', metrics=['accuracy'])
cbks = [callbacks.EarlyStopping(monitor=monitor, patience=patience)]
modelo.fit(X, X, callbacks=cbks, validation_split=0.25, shuffle=True,
nb_epoch=self.epoch_number, batch_size=self.batch_size, verbose=1)
modelo.save_weights('model_weights.h5')
示例9: test_merge_overlap
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def test_merge_overlap():
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
assert(loss < 0.7)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
fname = 'test_merge_overlap_temp.h5'
model.save_weights(fname, overwrite=True)
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
示例10: run_mlp
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def run_mlp(**args):
print("building mlp model:")
print(args['training_data'].shape[0])
print(args['training_data'].shape[1])
model = Sequential()
model.add(Dense(output_dim=512, input_dim=args['training_data'].shape[1], activation='relu'))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(args['training_data'], args['training_label'], nb_epoch=20, batch_size=512)
json_string = model.to_json()
open('mlp_model_architecture.json', 'w').write(json_string)
model.save_weights(args['output_weight_filename'], overwrite=True)
output = model.predict(args['test_data'], verbose=1, batch_size=512)
if (args['output_type']=='int'):
output_int = list(map(lambda e:int(e), np.round(output)))
pickle.dump(output_int, open(args['output_feat_filename'], 'wb'), protocol=4)
return output_int
else:
pickle.dump(output, open(args['output_feat_filename'], 'wb'), protocol=4)
return output
示例11: train_lstm
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train_lstm(n_symbols,embedding_weights,x_train,y_train,x_test,y_test):
print 'Defining a Simple Keras Model...'
model = Sequential() # or Graph or whatever
model.add(Embedding(output_dim=vocab_dim,
input_dim=n_symbols,
mask_zero=True,
weights=[embedding_weights],
input_length=input_length)) # Adding Input Length
model.add(LSTM(output_dim=50, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
print 'Compiling the Model...'
model.compile(loss='binary_crossentropy',
optimizer='adam',metrics=['accuracy'])
print "Train..."
model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=n_epoch,verbose=1, validation_data=(x_test, y_test),show_accuracy=True)
print "Evaluate..."
score = model.evaluate(x_test, y_test,
batch_size=batch_size)
yaml_string = model.to_yaml()
with open('lstm_data/lstm.yml', 'w') as outfile:
outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
model.save_weights('lstm_data/lstm.h5')
print 'Test score:', score
示例12: get_nn_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def get_nn_model(token_dict_size):
_logger.info('Initializing NN model with the following params:')
_logger.info('Input dimension: %s (token vector size)' % TOKEN_REPRESENTATION_SIZE)
_logger.info('Hidden dimension: %s' % HIDDEN_LAYER_DIMENSION)
_logger.info('Output dimension: %s (token dict size)' % token_dict_size)
_logger.info('Input seq length: %s ' % INPUT_SEQUENCE_LENGTH)
_logger.info('Output seq length: %s ' % ANSWER_MAX_TOKEN_LENGTH)
_logger.info('Batch size: %s' % SAMPLES_BATCH_SIZE)
model = Sequential()
seq2seq = SimpleSeq2seq(
input_dim=TOKEN_REPRESENTATION_SIZE,
input_length=INPUT_SEQUENCE_LENGTH,
hidden_dim=HIDDEN_LAYER_DIMENSION,
output_dim=token_dict_size,
output_length=ANSWER_MAX_TOKEN_LENGTH,
depth=1
)
model.add(seq2seq)
model.compile(loss='mse', optimizer='rmsprop')
model.save_weights(NN_MODEL_PATH)
# use previously saved model if it exists
_logger.info('Looking for a model %s' % NN_MODEL_PATH)
if os.path.isfile(NN_MODEL_PATH):
_logger.info('Loading previously calculated weights...')
model.load_weights(NN_MODEL_PATH)
_logger.info('Model is built')
return model
示例13: trainModel
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def trainModel():
inputs, correctOutputs = getNNData()
print("Collected data")
trainingInputs = inputs[:len(inputs)//2]
trainingOutputs = correctOutputs[:len(correctOutputs)//2]
testInputs = inputs[len(inputs)//2:]
testOutputs = correctOutputs[len(correctOutputs)//2:]
model = Sequential()
model.add(Dense(24, input_shape=(24, )))
model.add(Activation('tanh'))
model.add(Dense(24))
model.add(Activation('tanh'))
model.add(Dense(5))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))
model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
score = model.evaluate(testInputs, testOutputs, verbose=0)
print(score)
json_string = model.to_json()
open('my_model_architecture.json', 'w').write(json_string)
model.save_weights('my_model_weights.h5', overwrite=True)
示例14: train_48calibration_net
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def train_48calibration_net(X_train, y_train):
print (X_train.shape,y_train.shape)
Y_train = np_utils.to_categorical(y_train, nb_classes)
X_train = X_train.astype('float32')
X_train /= 255
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(3, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool),strides=(2,2)))
#model.add(BatchNormalization(mode=2))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
#model.add(BatchNormalization(mode=2))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_split=0.2)
json_string = model.to_json()
open('../model/48calibration_architecture.json', 'w').write(json_string)
model.save_weights('../model/48calibration_weights.h5')
示例15: main
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import save_weights [as 别名]
def main():
train_X = np.load('train_X.npy')
train_y = np.load('train_y.npy')
test_X = np.load('test_X.npy')
test_y = np.load('test_y.npy')
model = Sequential()
model.add(Flatten(input_shape=(15,60,2)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(900))
model.add(Activation('sigmoid'))
print model.summary()
adam = Adam(0.001)
#adagrad = Adagrad(lr=0.01)
model.compile(loss='mse', optimizer=adam)
model.fit(train_X, train_y, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test_X, test_y))
model.save_weights('model.h5', overwrite=True)