本文整理匯總了Python中tensorflow.keras.callbacks.ReduceLROnPlateau方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.ReduceLROnPlateau方法的具體用法?Python callbacks.ReduceLROnPlateau怎麽用?Python callbacks.ReduceLROnPlateau使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.callbacks
的用法示例。
在下文中一共展示了callbacks.ReduceLROnPlateau方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def main():
model = create_model(trainable=TRAINABLE)
model.summary()
if TRAINABLE:
model.load_weights(WEIGHTS)
train_datagen = DataGenerator(TRAIN_CSV)
validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV))
optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss=loss, optimizer=optimizer, metrics=[])
checkpoint = ModelCheckpoint("model-{val_dice:.2f}.h5", monitor="val_dice", verbose=1, save_best_only=True,
save_weights_only=True, mode="max")
stop = EarlyStopping(monitor="val_dice", patience=PATIENCE, mode="max")
reduce_lr = ReduceLROnPlateau(monitor="val_dice", factor=0.2, patience=5, min_lr=1e-6, verbose=1, mode="max")
model.fit_generator(generator=train_datagen,
epochs=EPOCHS,
callbacks=[validation_datagen, checkpoint, reduce_lr, stop],
workers=THREADS,
use_multiprocessing=MULTI_PROCESSING,
shuffle=True,
verbose=1)
示例2: main
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def main():
model = create_model()
train_datagen = DataGenerator(TRAIN_CSV)
validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV))
optimizer = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss={"coords" : log_mse, "classes" : focal_loss()}, loss_weights={"coords" : 1, "classes" : 1}, optimizer=optimizer, metrics=[])
checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True,
save_weights_only=True, mode="max")
stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max")
reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max")
model.summary()
model.fit_generator(generator=train_datagen,
epochs=EPOCHS,
callbacks=[validation_datagen, checkpoint, reduce_lr, stop],
workers=THREADS,
use_multiprocessing=MULTI_PROCESSING,
shuffle=True,
verbose=1)
示例3: main
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def main():
model = create_model()
model.summary()
train_datagen = DataGenerator(TRAIN_CSV)
validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV))
model.compile(loss="mean_squared_error", optimizer="adam", metrics=[])
checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True,
save_weights_only=True, mode="max")
stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max")
reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max")
model.fit_generator(generator=train_datagen,
epochs=EPOCHS,
callbacks=[validation_datagen, checkpoint, reduce_lr, stop],
workers=THREADS,
use_multiprocessing=MULTI_PROCESSING,
shuffle=True,
verbose=1)
示例4: _callbacks
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def _callbacks(
self,
*,
es_params={
'patience': 20,
'monitor': 'val_loss'
},
lr_params={
'monitor': 'val_loss',
'patience': 4,
'factor': 0.2
}
):
early_stopping = EarlyStopping(**es_params)
learning_rate_reduction = ReduceLROnPlateau(**lr_params)
return {
'forecaster': [],
'embedder': [],
'combined': [
early_stopping, learning_rate_reduction
]
}
示例5: main
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def main():
model = create_model(trainable=TRAINABLE)
model.summary()
if TRAINABLE:
model.load_weights(WEIGHTS)
train_datagen = DataGenerator(TRAIN_CSV)
val_generator = DataGenerator(VALIDATION_CSV, rnd_rescale=False, rnd_multiply=False, rnd_crop=False, rnd_flip=False, debug=False)
validation_datagen = Validation(generator=val_generator)
learning_rate = LEARNING_RATE
if TRAINABLE:
learning_rate /= 10
optimizer = SGD(lr=learning_rate, decay=LR_DECAY, momentum=0.9, nesterov=False)
model.compile(loss=detection_loss(), optimizer=optimizer, metrics=[])
checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True,
save_weights_only=True, mode="max")
stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max")
reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.6, patience=5, min_lr=1e-6, verbose=1, mode="max")
model.fit_generator(generator=train_datagen,
epochs=EPOCHS,
callbacks=[validation_datagen, checkpoint, reduce_lr, stop],
workers=THREADS,
use_multiprocessing=MULTITHREADING,
shuffle=True,
verbose=1)
示例6: fit_model_softmax
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def fit_model_softmax(dsm: DeepSpeakerModel, kx_train, ky_train, kx_test, ky_test,
batch_size=BATCH_SIZE, max_epochs=1000, initial_epoch=0):
checkpoint_name = dsm.m.name + '_checkpoint'
checkpoint_filename = os.path.join(CHECKPOINTS_SOFTMAX_DIR, checkpoint_name + '_{epoch}.h5')
checkpoint = ModelCheckpoint(monitor='val_accuracy', filepath=checkpoint_filename, save_best_only=True)
# if the accuracy does not increase by 0.1% over 20 epochs, we stop the training.
early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=20, verbose=1, mode='max')
# if the accuracy does not increase over 10 epochs, we reduce the learning rate by half.
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=10, min_lr=0.0001, verbose=1)
max_len_train = len(kx_train) - len(kx_train) % batch_size
kx_train = kx_train[0:max_len_train]
ky_train = ky_train[0:max_len_train]
max_len_test = len(kx_test) - len(kx_test) % batch_size
kx_test = kx_test[0:max_len_test]
ky_test = ky_test[0:max_len_test]
dsm.m.fit(x=kx_train,
y=ky_train,
batch_size=batch_size,
epochs=initial_epoch + max_epochs,
initial_epoch=initial_epoch,
verbose=1,
shuffle=True,
validation_data=(kx_test, ky_test),
callbacks=[early_stopping, reduce_lr, checkpoint])
示例7: train
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def train(self, training_data, cfg, **kwargs):
classifier_model = eval("clf." + self.classifier_model)
epochs = self.component_config.get('epochs')
batch_size = self.component_config.get('batch_size')
validation_split = self.component_config.get('validation_split')
patience = self.component_config.get('patience')
factor = self.component_config.get('factor')
verbose = self.component_config.get('verbose')
X, Y = [], []
for msg in training_data.intent_examples:
X.append(self.tokenizer.tokenize(msg.text))
Y.append(msg.get('intent'))
train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100)
self.bert_embedding.processor.add_bos_eos = False
self.model = classifier_model(self.bert_embedding)
checkpoint = ModelCheckpoint(
'intent_weights.h5',
monitor='val_loss',
save_best_only=True,
save_weights_only=False,
verbose=verbose)
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=factor,
patience=patience,
verbose=verbose)
self.model.fit(
train_x,
train_y,
validate_x,
validate_y,
epochs=epochs,
batch_size=batch_size,
callbacks=[checkpoint, early_stopping, reduce_lr]
)
示例8: train
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def train(self, training_data, cfg, **kwargs):
labeling_model = eval("labeling." + self.labeling_model)
epochs = self.component_config.get('epochs')
batch_size = self.component_config.get('batch_size')
validation_split = self.component_config.get('validation_split')
patience = self.component_config.get('patience')
factor = self.component_config.get('factor')
verbose = self.component_config.get('verbose')
filtered_entity_examples = self.filter_trainable_entities(training_data.training_examples)
X, Y = self._create_dataset(filtered_entity_examples)
train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100)
self.model = labeling_model(self.bert_embedding)
checkpoint = ModelCheckpoint(
'entity_weights.h5',
monitor='val_loss',
save_best_only=True,
save_weights_only=False,
verbose=verbose)
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=factor,
patience=patience,
verbose=verbose)
self.model.fit(
train_x,
train_y,
validate_x,
validate_y,
epochs=epochs,
batch_size=batch_size,
callbacks=[checkpoint, early_stopping, reduce_lr]
)
示例9: main
# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import ReduceLROnPlateau [as 別名]
def main(batch_size: int = 24,
epochs: int = 384,
train_path: str = 'train',
val_path: str = 'val',
weights=None,
workers: int = 8):
# We use an extra input during training to discount bounding box loss when a class is not present in an image.
discount_input = Input(shape=(7, 7), name='discount')
keras_model = MobileDetectNetModel.complete_model(extra_inputs=[discount_input])
keras_model.summary()
if weights is not None:
keras_model.load_weights(weights, by_name=True)
train_seq = MobileDetectNetSequence(train_path, stage="train", batch_size=batch_size)
val_seq = MobileDetectNetSequence(val_path, stage="val", batch_size=batch_size)
callbacks = []
def region_loss(classes):
def loss_fn(y_true, y_pred):
# Don't penalize bounding box errors when there is no object present
return 10 * (classes * K.abs(y_pred[:, :, :, 0] - y_true[:, :, :, 0]) +
classes * K.abs(y_pred[:, :, :, 1] - y_true[:, :, :, 1]) +
classes * K.abs(y_pred[:, :, :, 2] - y_true[:, :, :, 2]) +
classes * K.abs(y_pred[:, :, :, 3] - y_true[:, :, :, 3]))
return loss_fn
keras_model.compile(optimizer=Nadam(lr=0.001), loss=['mean_absolute_error',
region_loss(discount_input),
'binary_crossentropy'])
filepath = "weights-{epoch:02d}-{val_loss:.4f}-multi-gpu.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks.append(checkpoint)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001, verbose=1)
callbacks.append(reduce_lr)
try:
os.mkdir('logs')
except FileExistsError:
pass
tensorboard = TensorBoard(log_dir='logs/%s' % time.strftime("%Y-%m-%d_%H-%M-%S"))
callbacks.append(tensorboard)
keras_model.fit_generator(train_seq,
validation_data=val_seq,
epochs=epochs,
steps_per_epoch=np.ceil(len(train_seq) / batch_size),
validation_steps=np.ceil(len(val_seq) / batch_size),
callbacks=callbacks,
use_multiprocessing=True,
workers=workers,
shuffle=True)