本文整理匯總了Python中keras.callbacks.ModelCheckpoint方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.ModelCheckpoint方法的具體用法?Python callbacks.ModelCheckpoint怎麽用?Python callbacks.ModelCheckpoint使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.callbacks
的用法示例。
在下文中一共展示了callbacks.ModelCheckpoint方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: threadsafe_generator
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return ThreadsafeIterator(f(*a, **kw))
return g
###############################################################
## MODEL CHECKPOINT FOR MULTI GPU
## When using multiple GPUs, we need to save the base model,
## not the one defined by multi_gpu_model
## see example: https://keras.io/utils/#multi_gpu_model
## Therefore, to save the model after each epoch by leveraging
## ModelCheckpoint callback, we need to adapt it to save the
## base model. To do so, we pass the base model to the callback.
## Inspired by:
## https://github.com/keras-team/keras/issues/8463#issuecomment-345914612
###############################################################
示例2: get_call_back
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def get_call_back():
"""
定義call back
:return:
"""
checkpoint = ModelCheckpoint(filepath='/tmp/ctpn.{epoch:03d}.h5',
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=True,
period=5)
# 驗證誤差沒有提升
lr_reducer = ReduceLROnPlateau(monitor='loss',
factor=0.1,
cooldown=0,
patience=10,
min_lr=1e-4)
log = TensorBoard(log_dir='log')
return [lr_reducer, checkpoint, log]
示例3: train_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train_model(self):
checkpoint = ModelCheckpoint(self.PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
if self.modality == "audio":
model = self.get_audio_model()
model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
elif self.modality == "text":
model = self.get_text_model()
model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
elif self.modality == "bimodal":
model = self.get_bimodal_model()
model.compile(optimizer='adam', loss='categorical_crossentropy', sample_weight_mode='temporal')
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
model.fit(self.train_x, self.train_y,
epochs=self.epochs,
batch_size=self.batch_size,
sample_weight=self.train_mask,
shuffle=True,
callbacks=[early_stopping, checkpoint],
validation_data=(self.val_x, self.val_y, self.val_mask))
self.test_model()
示例4: train_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train_model(weight = None, batch_size=32, epochs = 10):
cg = caption_generator.CaptionGenerator()
model = cg.create_model()
if weight != None:
model.load_weights(weight)
counter = 0
file_name = 'weights-improvement-{epoch:02d}.hdf5'
checkpoint = ModelCheckpoint(file_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit_generator(cg.data_generator(batch_size=batch_size), steps_per_epoch=cg.total_samples/batch_size, epochs=epochs, verbose=2, callbacks=callbacks_list)
try:
model.save('Models/WholeModel.h5', overwrite=True)
model.save_weights('Models/Weights.h5',overwrite=True)
except:
print "Error in saving model."
print "Training complete...\n"
示例5: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train(model, image_data, y_true, log_dir='logs/'):
'''retrain/fine-tune the model'''
model.compile(optimizer='adam', loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
monitor='val_loss', save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
model.fit([image_data, *y_true],
np.zeros(len(image_data)),
validation_split=.1,
batch_size=32,
epochs=30,
callbacks=[logging, checkpoint, early_stopping])
model.save_weights(log_dir + 'trained_weights.h5')
# Further training.
示例6: train_simple_inference_net
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train_simple_inference_net(n_epochs=30):
inf_net = SimpleInferenceNet()
tr_ids, val_ids, te_ids = train_document_ids(), validation_document_ids(), test_document_ids()
tr_ids = list(train_document_ids())
train_Xy, inference_vectorizer = get_train_Xy(tr_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=True)
X_k, y_k = make_Xy_inference(train_Xy, inf_net.bc)
print("train data for inference task loaded!")
val_Xy = get_Xy(val_ids, inference_vectorizer, include_raw_texts=True)
X_kv, y_kv = make_Xy_inference(val_Xy, inf_net.bc)
print("val data loaded!")
filepath="inference.weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
with open("inference_model.json", "w") as outf:
outf.write(inf_net.model.to_json())
print("fitting inference model!")
inf_net.model.fit(X_k, y_k, validation_data=(X_kv, y_kv), callbacks=callbacks_list, epochs=n_epochs)
示例7: train_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train_model(self,model,X_train,X_test,y_train,y_test):
input_y_train = self.include_start_token(y_train)
print(input_y_train.shape)
input_y_test = self.include_start_token(y_test)
print(input_y_test.shape)
early = EarlyStopping(monitor='val_loss',patience=10,mode='auto')
checkpoint = ModelCheckpoint(self.outpath + 's2s_model_' + str(self.version) + '_.h5',monitor='val_loss',verbose=1,save_best_only=True,mode='auto')
lr_reduce = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=2, verbose=0, mode='auto')
model.fit([X_train,input_y_train],y_train,
epochs=self.epochs,
batch_size=self.batch_size,
validation_data=[[X_test,input_y_test],y_test],
callbacks=[early,checkpoint,lr_reduce],
shuffle=True)
return model
示例8: train_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train_model(model, X, X_test, Y, Y_test):
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
# Creates live data:
# For better yield. The duration of the training is extended.
# If you don't want, use this:
# model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
from keras.preprocessing.image import ImageDataGenerator
generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
generated_data.fit(X)
import numpy
model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)
return model
示例9: get_callbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def get_callbacks(self,log, model_prefix='Model'):
"""
Creates a list of callbacks that can be used during training to create a
snapshot ensemble of the model.
Args:
model_prefix: prefix for the filename of the weights.
Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
SnapshotModelCheckpoint] which can be provided to the 'fit' function
"""
if not os.path.exists(self.save_dir+'/weights/'):
os.makedirs(self.save_dir+'/weights/')
callback_list = [callbacks.ModelCheckpoint(self.save_dir+"/weights/weights_{epoch:002d}.h5", monitor="val_capsnet_acc",
save_best_only=True, save_weights_only=False),
callbacks.LearningRateScheduler(schedule=self._cosine_anneal_schedule),
SnapshotModelCheckpoint(self.T, self.M, fn_prefix=self.save_dir+'/weights/%s' % model_prefix), log]
return callback_list
示例10: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train():
model = create_model()
model.compile(optimizer='adam',
loss=losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="../Output/checkpoint.hdf5", verbose=1, save_best_only=True)
x_train, x_test, y_train, y_test = load_audio_data()
model.fit(x_train,
y_train,
epochs=1000,
batch_size=1000,
validation_split=0.2,
callbacks=[checkpointer])
results = model.evaluate(x_test, y_test)
print('test_results: ', results)
model.save(MODEL_FILE_PATH)
示例11: NerCallbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def NerCallbacks(id_to_tag, best_fit_params=None, mask_tag=None, log_path=None):
"""模型訓練過程中的回調函數
"""
callbacks = [Accuracy(id_to_tag, mask_tag, log_path)]
if best_fit_params is not None:
early_stopping = EarlyStopping(
monitor="val_crf_accuracy",
patience=best_fit_params.get("early_stop_patience"))
reduce_lr_on_plateau = ReduceLROnPlateau(
monitor="val_crf_accuracy", verbose=1, mode="max",
factor=best_fit_params.get("reduce_lr_factor"),
patience=best_fit_params.get("reduce_lr_patience"))
model_check_point = ModelCheckpoint(
best_fit_params.get("save_path"),
monitor="val_crf_accuracy", verbose=2, mode="max", save_best_only=True)
callbacks.extend([early_stopping, reduce_lr_on_plateau, model_check_point])
return callbacks
示例12: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train():
# load data
train_dataset = Dataset(training=True)
dev_dataset = Dataset(training=False)
# model
MODEL = name_model[model_name]
model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)
# callback
my_callback = MyCallback()
f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
# train
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.categorical_crossentropy, metrics=['acc'])
model.fit_generator(train_dataset.gen_batch_data(),
steps_per_epoch=train_dataset.steps_per_epoch,
verbose=0,
epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
keras.models.save_model(model, conf.model_path.format(model_name))
示例13: cnn_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def cnn_model():
num_of_classes = get_num_of_classes()
model = Sequential()
model.add(Conv2D(16, (2,2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='same'))
model.add(Conv2D(64, (5,5), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_of_classes, activation='softmax'))
sgd = optimizers.SGD(lr=1e-2)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
filepath="cnn_model_keras2.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint1]
#from keras.utils import plot_model
#plot_model(model, to_file='model.png', show_shapes=True)
return model, callbacks_list
示例14: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
# callbacks
ear = EarlyStopping(monitor='val_acc', patience=5)
mcp = ModelCheckpoint(join(save_path, 'malconv.h5'),
monitor="val_acc",
save_best_only=save_best,
save_weights_only=False)
history = model.fit_generator(
utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size + 1,
epochs=epochs,
verbose=verbose,
callbacks=[ear, mcp],
validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
validation_steps=len(x_test)//batch_size + 1)
return history
示例15: finetuning_callbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import ModelCheckpoint [as 別名]
def finetuning_callbacks(checkpoint_path, patience, verbose):
""" Callbacks for model training.
# Arguments:
checkpoint_path: Where weight checkpoints should be saved.
patience: Number of epochs with no improvement after which
training will be stopped.
# Returns:
Array with training callbacks that can be passed straight into
model.fit() or similar.
"""
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience,
verbose=cb_verbose)
return [checkpointer, earlystop]