本文整理汇总了Python中tensorflow.keras.callbacks.Callback方法的典型用法代码示例。如果您正苦于以下问题:Python callbacks.Callback方法的具体用法?Python callbacks.Callback怎么用?Python callbacks.Callback使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.callbacks
的用法示例。
在下文中一共展示了callbacks.Callback方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_train_callbacks
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def _create_train_callbacks(self) -> List[Callback]:
# TODO: do we want to use early stopping? if so, use the right chechpoint manager and set the correct
# `monitor` quantity (example: monitor='val_acc', mode='max')
keras_callbacks = [
ModelTrainingStatusTrackerCallback(self.training_status),
ModelTrainingProgressLoggerCallback(self.config, self.training_status),
]
if self.config.is_saving:
keras_callbacks.append(ModelCheckpointSaverCallback(
self, self.config.SAVE_EVERY_EPOCHS, self.logger))
if self.config.is_testing:
keras_callbacks.append(ModelEvaluationCallback(self))
if self.config.USE_TENSORBOARD:
log_dir = "logs/scalars/train_" + common.now_str()
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=log_dir,
update_freq=self.config.NUM_BATCHES_TO_LOG_PROGRESS * self.config.TRAIN_BATCH_SIZE)
keras_callbacks.append(tensorboard_callback)
return keras_callbacks
示例2: train
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def train(weights_path, epochs, batch_size, initial_epoch,
kl_start_epoch, kl_alpha_increase_per_epoch):
"""Trains a model."""
print ('loading data...')
# Loads or creates training data.
input_shape, train, valid, train_targets, valid_targets = get_train_data()
print ('getting model...')
# Loads or creates model.
model, checkpoint_path, kl_alpha = get_model(input_shape,
scale_factor=len(train)/batch_size,
weights_path=weights_path)
# Sets callbacks.
checkpointer = ModelCheckpoint(checkpoint_path, verbose=1,
save_weights_only=True, save_best_only=True)
scheduler = LearningRateScheduler(schedule)
annealer = Callback() if kl_alpha is None else AnnealingCallback(kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch)
print ('fitting model...')
# Trains model.
model.fit(train, train_targets, batch_size, epochs,
initial_epoch=initial_epoch,
callbacks=[checkpointer, scheduler, annealer],
validation_data=(valid, valid_targets))
示例3: __init__
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def __init__(self, schedule: Callable, batch_size: int, steps_per_epoch: int):
"""
Callback to update learning rate on every batch instead of epoch.
Parameters
----------
schedule
Function taking the epoch and batch index as input which returns the new
learning rate as output.
batch_size
Batch size.
steps_per_epoch
Number of batches or steps per epoch.
"""
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
示例4: log
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def log(self):
self.logger("PrintLayerWeights Callback")
self.logger("Layer: ", self.layer)
self.logger("Every: ", self.every)
self.logger("First: ", self.first)
self.logger("Per epoch: ", self.per_epoch)
示例5: pretrain
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def pretrain(self, x, y=None, optimizer='adam', epochs=200, batch_size=256,
save_dir='results/temp', verbose=1, aug_pretrain=False):
print('Begin pretraining: ', '-' * 60)
self.autoencoder.compile(optimizer=optimizer, loss='mse')
csv_logger = callbacks.CSVLogger(save_dir + '/pretrain_log.csv')
cb = [csv_logger]
if y is not None and verbose > 0:
class PrintACC(callbacks.Callback):
def __init__(self, x, y):
self.x = x
self.y = y
super(PrintACC, self).__init__()
def on_epoch_end(self, epoch, logs=None):
if int(epochs / 10) != 0 and epoch % int(epochs/10) != 0:
return
feature_model = Model(self.model.input,
self.model.get_layer(index=int(len(self.model.layers) / 2)).output)
features = feature_model.predict(self.x)
km = KMeans(n_clusters=len(np.unique(self.y)), n_init=20, n_jobs=4)
y_pred = km.fit_predict(features)
print(' '*8 + '|==> acc: %.4f, nmi: %.4f <==|'
% (metrics.acc(self.y, y_pred), metrics.nmi(self.y, y_pred)))
cb.append(PrintACC(x, y))
# begin pretraining
t0 = time()
if not aug_pretrain:
self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=cb, verbose=verbose)
else:
print('-=*'*20)
print('Using augmentation for ae')
print('-=*'*20)
def gen(x, batch_size):
if len(x.shape) > 2: # image
gen0 = self.datagen.flow(x, shuffle=True, batch_size=batch_size)
while True:
batch_x = gen0.next()
yield [batch_x, batch_x]
else:
width = int(np.sqrt(x.shape[-1]))
if width * width == x.shape[-1]: # gray
im_shape = [-1, width, width, 1]
else: # RGB
width = int(np.sqrt(x.shape[-1] / 3.0))
im_shape = [-1, width, width, 3]
gen0 = self.datagen.flow(np.reshape(x, im_shape), shuffle=True, batch_size=batch_size)
while True:
batch_x = gen0.next()
batch_x = np.reshape(batch_x, [batch_x.shape[0], x.shape[-1]])
yield [batch_x, batch_x]
self.autoencoder.fit_generator(gen(x, batch_size), steps_per_epoch=int(x.shape[0]/batch_size),
epochs=epochs, callbacks=cb, verbose=verbose,
workers=8, use_multiprocessing=True if platform.system() != "Windows" else False)
print('Pretraining time: ', time() - t0)
self.autoencoder.save_weights(save_dir + '/ae_weights.h5')
print('Pretrained weights are saved to %s/ae_weights.h5' % save_dir)
self.pretrained = True
print('End pretraining: ', '-' * 60)
示例6: main
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def main():
numpy.random.seed(7)
# data. definition of the problem.
seq_length = 20
x_train, y_train = task_add_two_numbers_after_delimiter(20_000, seq_length)
x_val, y_val = task_add_two_numbers_after_delimiter(4_000, seq_length)
# just arbitrary values. it's for visual purposes. easy to see than random values.
test_index_1 = 4
test_index_2 = 9
x_test, _ = task_add_two_numbers_after_delimiter(10, seq_length, 0, test_index_1, test_index_2)
# x_test_mask is just a mask that, if applied to x_test, would still contain the information to solve the problem.
# we expect the attention map to look like this mask.
x_test_mask = np.zeros_like(x_test[..., 0])
x_test_mask[:, test_index_1:test_index_1 + 1] = 1
x_test_mask[:, test_index_2:test_index_2 + 1] = 1
# model
i = Input(shape=(seq_length, 1))
x = LSTM(100, return_sequences=True)(i)
x = attention_3d_block(x)
x = Dropout(0.2)(x)
x = Dense(1, activation='linear')(x)
model = Model(inputs=[i], outputs=[x])
model.compile(loss='mse', optimizer='adam')
print(model.summary())
output_dir = 'task_add_two_numbers'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
max_epoch = int(sys.argv[1]) if len(sys.argv) > 1 else 200
class VisualiseAttentionMap(Callback):
def on_epoch_end(self, epoch, logs=None):
attention_map = get_activations(model, x_test, layer_name='attention_weight')['attention_weight']
# top is attention map.
# bottom is ground truth.
plt.imshow(np.concatenate([attention_map, x_test_mask]), cmap='hot')
iteration_no = str(epoch).zfill(3)
plt.axis('off')
plt.title(f'Iteration {iteration_no} / {max_epoch}')
plt.savefig(f'{output_dir}/epoch_{iteration_no}.png')
plt.close()
plt.clf()
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=max_epoch,
batch_size=64, callbacks=[VisualiseAttentionMap()])
示例7: train_and_evaluate_model_on_imdb
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def train_and_evaluate_model_on_imdb(add_attention=True):
numpy.random.seed(7)
# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
# create the model
embedding_vector_length = 32
i = Input(shape=(max_review_length,))
x = Embedding(top_words, embedding_vector_length, input_length=max_review_length)(i)
x = Dropout(0.5)(x)
if add_attention:
x = LSTM(100, return_sequences=True)(x)
x = attention_3d_block(x)
else:
x = LSTM(100, return_sequences=False)(x)
x = Dense(350, activation='relu')(x) # same number of parameters so fair comparison.
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=[i], outputs=[x])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
class RecordBestTestAccuracy(Callback):
def __init__(self):
super().__init__()
self.val_accuracies = []
self.val_losses = []
def on_epoch_end(self, epoch, logs=None):
self.val_accuracies.append(logs['val_accuracy'])
self.val_losses.append(logs['val_loss'])
rbta = RecordBestTestAccuracy()
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64, callbacks=[rbta])
print(f"Max Test Accuracy: {100 * np.max(rbta.val_accuracies):.2f} %")
print(f"Mean Test Accuracy: {100 * np.mean(rbta.val_accuracies):.2f} %")
示例8: train
# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import Callback [as 别名]
def train(self,
train_structures: List[Structure],
train_targets: List[float],
validation_structures: List[Structure] = None,
validation_targets: List[float] = None,
epochs: int = 1000,
batch_size: int = 128,
verbose: int = 1,
callbacks: List[Callback] = None,
scrub_failed_structures: bool = False,
prev_model: str = None,
save_checkpoint: bool = True,
automatic_correction: bool = True,
lr_scaling_factor: float = 0.5,
patience: int = 500,
**kwargs) -> "GraphModel":
"""
Args:
train_structures: (list) list of pymatgen structures
train_targets: (list) list of target values
validation_structures: (list) list of pymatgen structures as validation
validation_targets: (list) list of validation targets
epochs: (int) number of epochs
batch_size: (int) training batch size
verbose: (int) keras fit verbose, 0 no progress bar, 1 only at the epoch end and 2 every batch
callbacks: (list) megnet or keras callback functions for training
scrub_failed_structures: (bool) whether to scrub structures with failed graph computation
prev_model: (str) file name for previously saved model
save_checkpoint: (bool) whether to save checkpoint
automatic_correction: (bool) correct nan errors
lr_scaling_factor: (float, less than 1) scale the learning rate down when nan loss encountered
patience: (int) patience for early stopping
**kwargs:
"""
train_graphs, train_targets = self.get_all_graphs_targets(train_structures, train_targets,
scrub_failed_structures=scrub_failed_structures)
if validation_structures is not None:
val_graphs, validation_targets = self.get_all_graphs_targets(
validation_structures, validation_targets, scrub_failed_structures=scrub_failed_structures)
else:
val_graphs = None
self.train_from_graphs(train_graphs,
train_targets,
validation_graphs=val_graphs,
validation_targets=validation_targets,
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=callbacks,
prev_model=prev_model,
lr_scaling_factor=lr_scaling_factor,
patience=patience,
save_checkpoint=save_checkpoint,
automatic_correction=automatic_correction,
**kwargs
)
return self