本文整理汇总了Python中keras.callbacks.TensorBoard方法的典型用法代码示例。如果您正苦于以下问题:Python callbacks.TensorBoard方法的具体用法?Python callbacks.TensorBoard怎么用?Python callbacks.TensorBoard使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.callbacks
的用法示例。
在下文中一共展示了callbacks.TensorBoard方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_call_back
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def get_call_back():
"""
定义call back
:return:
"""
checkpoint = ModelCheckpoint(filepath='/tmp/ctpn.{epoch:03d}.h5',
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=True,
period=5)
# 验证误差没有提升
lr_reducer = ReduceLROnPlateau(monitor='loss',
factor=0.1,
cooldown=0,
patience=10,
min_lr=1e-4)
log = TensorBoard(log_dir='log')
return [lr_reducer, checkpoint, log]
示例2: train
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def train(model, image_data, y_true, log_dir='logs/'):
'''retrain/fine-tune the model'''
model.compile(optimizer='adam', loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
monitor='val_loss', save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
model.fit([image_data, *y_true],
np.zeros(len(image_data)),
validation_split=.1,
batch_size=32,
epochs=30,
callbacks=[logging, checkpoint, early_stopping])
model.save_weights(log_dir + 'trained_weights.h5')
# Further training.
示例3: _create_callbacks
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def _create_callbacks(self, saved_weights_name, model_to_save):
checkpoint = CustomModelCheckpoint(
model_to_save=model_to_save,
filepath=saved_weights_name + 'ex-{epoch:03d}--loss-{loss:08.3f}.h5',
monitor='loss',
verbose=0,
save_best_only=True,
mode='min',
period=1
)
reduce_on_plateau = ReduceLROnPlateau(
monitor='loss',
factor=0.1,
patience=2,
verbose=0,
mode='min',
epsilon=0.01,
cooldown=0,
min_lr=0
)
tensor_board = TensorBoard(
log_dir=self.__logs_directory
)
return [checkpoint, reduce_on_plateau, tensor_board]
示例4: train_model
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def train_model(model, X, X_test, Y, Y_test):
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
# Creates live data:
# For better yield. The duration of the training is extended.
# If you don't want, use this:
# model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
from keras.preprocessing.image import ImageDataGenerator
generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
generated_data.fit(X)
import numpy
model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)
return model
示例5: init_logging_callbacks
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def init_logging_callbacks(self,log_dir=LOG_DIR_ROOT):
self.checkpoint = ModelCheckpoint(filepath="%s/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" % (log_dir),\
monitor='loss',\
verbose=1,\
save_best_only=True,\
mode='min')
self.early_stopping = EarlyStopping(monitor='loss',\
min_delta=0,\
patience=PATIENCE,\
verbose=0,\
mode='auto')
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
log_dir = "{}/run/{}".format(LOG_DIR_ROOT,now)
self.tensorboard = TensorBoard(log_dir=log_dir,\
write_graph=True,\
write_images=True)
self.callbacks = [self.early_stopping,\
self.tensorboard,\
self.checkpoint]
示例6: get_callbacks
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def get_callbacks(config_data, appendix=''):
ret_callbacks = []
model_stored = False
callbacks = config_data['callbacks']
if K._BACKEND == 'tensorflow':
tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
ret_callbacks.append(tensor_board)
for callback in callbacks:
if callback['name'] == 'early_stopping':
ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
elif callback['name'] == 'model_checkpoit':
model_stored = True
path = config_data['output_path']
basename = config_data['output_basename']
base_path = os.path.join(path, basename)
opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
save_best = bool(callback['save_best_only'])
ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
return ret_callbacks, model_stored
示例7: lengthy_test
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_path = LOG_PATH_BASE + ts + "_-_" + model.name
tensorboard = TensorBoard(log_dir=log_path,
write_graph=False, #This eats a lot of space. Enable with caution!
#histogram_freq = 1,
write_images=True,
batch_size = model.batch_size,
write_grads=True)
model_saver = ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
callbacks = [tensorboard, TerminateOnNaN(), model_saver]
for i in testrange:
acc = test_model(model, sequence_length=i, verboose=verboose)
print("the accuracy for length {0} was: {1}%".format(i,acc))
train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)
for i in testrange:
acc = test_model(model, sequence_length=i, verboose=verboose)
print("the accuracy for length {0} was: {1}%".format(i,acc))
return
示例8: main
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def main():
house_df = pd.read_csv('./data/housing.csv', sep='\s+', header=None)
hose_set = house_df.values
# print(hose_set)
x = hose_set[:, 0:13]
y = hose_set[:, 13]
# print(y)
# tbcallback=callbacks.TensorBoard(log_dir='./logs',histogram_freq=0, write_graph=True, write_images=True)
estimators = []
estimators.append(('mlp', KerasRegressor(build_fn=build_model, epochs=512, batch_size=32, verbose=1)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
# results = cross_val_score(estimator, x, y, cv=kfold)
scores = cross_val_score(pipeline, x, y, cv=kfold)
print('\n')
print("Results: %.2f (%.2f) MSE" % (scores.mean(), scores.std()))
示例9: train_seg_model
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def train_seg_model(model, splitted_npy_dataset_path, test_path, epochs):
test_XY = np.load(test_path+'/test.npy')
X_test, Y_test = test_XY[0], test_XY[1]
batch_dirs = listdir(splitted_npy_dataset_path)
len_batch_dirs = len(batch_dirs)
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints = []
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
for epoch in range(epochs):
print('Epoch: {0}/{1}'.format(epoch+1, epochs))
model.fit_generator(data_gen(splitted_npy_dataset_path), steps_per_epoch=batch_size, epochs=int(len_batch_dirs/batch_size), callbacks=checkpoints)
scores = model.evaluate(X_test, Y_test)
dice_score = dice_coefficient(model.predict(X_test), Y_test)
print('Test loss:', scores[0], '\nTest accuracy:', scores[1], '\nDice Coefficient Accuracy:', dice_score)
return model
# Training GAN:
示例10: build_tensorboard
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def build_tensorboard(tmp_generator, tb_folder):
for a_file in os.listdir(tb_folder):
file_path = join(tb_folder, a_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e, file=sys.stderr)
tb = TensorBoard(tb_folder, write_graph=False, histogram_freq=1, write_grads=True, write_images=False)
x, y = next(tmp_generator)
tb.validation_data = x
tb.validation_data[1] = np.expand_dims(tb.validation_data[1], axis=-1)
if isinstance(y, list):
num_targets = len(y)
tb.validation_data += [y[0]] + y[1:]
else:
tb.validation_data += [y]
num_targets = 1
tb.validation_data += [np.ones(x[0].shape[0])] * num_targets + [0.0]
return tb
示例11: create_initial_model
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def create_initial_model(name):
full_filename = os.path.join(conf['MODEL_DIR'], name) + ".h5"
if os.path.isfile(full_filename):
model = load_model(full_filename, custom_objects={'loss': loss})
return model
model = build_model(name)
# Save graph in tensorboard. This graph has the name scopes making it look
# good in tensorboard, the loaded models will not have the scopes.
tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], name),
histogram_freq=0, batch_size=1, write_graph=True, write_grads=False)
tf_callback.set_model(model)
tf_callback.on_epoch_end(0)
tf_callback.on_train_end(0)
from self_play import self_play
self_play(model, n_games=conf['N_GAMES'], mcts_simulations=conf['MCTS_SIMULATIONS'])
model.save(full_filename)
best_filename = os.path.join(conf['MODEL_DIR'], 'best_model.h5')
model.save(best_filename)
return model
示例12: init_callbacks
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def init_callbacks(self):
# self.callbacks.append(
# ModelCheckpoint(
# filepath=os.path.join(self.config.checkpoint_dir, '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp_name),
# monitor=self.config.checkpoint_monitor,
# mode=self.config.checkpoint_mode,
# save_best_only=self.config.checkpoint_save_best_only,
# save_weights_only=self.config.checkpoint_save_weights_only,
# verbose=self.config.checkpoint_verbose,
# )
# )
# self.callbacks.append(
# TensorBoard(
# log_dir=self.config.tensorboard_log_dir,
# write_graph=self.config.tensorboard_write_graph,
# )
# )
#学习率衰减
reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=1/math.e,
verbose=1, patience=self.patience, min_lr=self.min_lr)
self.callbacks.append(reduce_lr)
# if hasattr(self.config,"comet_api_key"):
# from comet_ml import Experiment
# experiment = Experiment(api_key=self.config.comet_api_key, project_name=self.config.exp_name)
# experiment.disable_mp()
# experiment.log_multiple_params(self.config)
# self.callbacks.append(experiment.get_keras_callback())
示例13: trainModel
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def trainModel(self, baseCoder, name):
x_train, x_test = AutoEncoder.getData(self)
if id(baseCoder) == id(ConvCoder):
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
autoencoder = AutoEncoder.getAutoEncoder(self, baseCoder)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train, epochs=10, batch_size=64, shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir=name)], verbose=1)
AutoEncoder.printSummary(self, autoencoder)
AutoEncoder.saveModel(self, autoencoder, name + '.h5')
示例14: callbacks
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def callbacks(logdir):
model_checkpoint = ModelCheckpoint("weights_train/weights.{epoch:02d}-{loss:.2f}.h5", monitor='loss', verbose=1, period=10)
tensorboard_callback = TensorBoard(log_dir=logdir, write_graph=True, write_images=True, histogram_freq=1)
plateau_callback = ReduceLROnPlateau(monitor='loss', factor=0.99, verbose=1, patience=0, min_lr=0.00001)
#return [CheckPoints(), tensorboard_callback, LrReducer()]
return [model_checkpoint, tensorboard_callback, plateau_callback, LrReducer()]
示例15: train
# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TensorBoard [as 别名]
def train(model, annotation_path, input_shape, anchors, num_classes, log_dir='logs/'):
model.compile(optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred})
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
monitor='val_loss', save_weights_only=True, save_best_only=True, period=1)
batch_size = 15
val_split = 0.1
with open(annotation_path, encoding='UTF-8') as f:
lines = f.readlines()
np.random.shuffle(lines)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
try :
#########2、修改epochs为30 ###########
model.fit_generator(data_generator_wrap(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch = max(1, num_train // batch_size),
validation_data = data_generator_wrap(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps = max(1, num_val // batch_size), epochs = 30, initial_epoch = 0)
except :
print("error")
finally:
model.save_weights(log_dir + 'trained_weights_except.h5')
model.save_weights(log_dir + 'trained_weights.h5')