本文整理匯總了Python中keras.callbacks.CSVLogger方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.CSVLogger方法的具體用法?Python callbacks.CSVLogger怎麽用?Python callbacks.CSVLogger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.callbacks
的用法示例。
在下文中一共展示了callbacks.CSVLogger方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: resume_train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def resume_train(self, category, pretrainModel, modelName, initEpoch, batchSize=8, epochs=20):
self.modelName = modelName
self.load_model(pretrainModel)
refineNetflag = True
self.nStackNum = 2
modelPath = os.path.dirname(pretrainModel)
trainDt = DataGenerator(category, os.path.join("../../data/train/Annotations", "train_split.csv"))
trainGen = trainDt.generator_with_mask_ohem(graph=tf.get_default_graph(), kerasModel=self.model,
batchSize=batchSize, inputSize=(self.inputHeight, self.inputWidth),
nStackNum=self.nStackNum, flipFlag=False, cropFlag=False)
normalizedErrorCallBack = NormalizedErrorCallBack("../../trained_models/", category, refineNetflag, resumeFolder=modelPath)
csvlogger = CSVLogger(os.path.join(normalizedErrorCallBack.get_folder_path(),
"csv_train_" + self.modelName + "_" + str(
datetime.datetime.now().strftime('%H:%M')) + ".csv"))
self.model.fit_generator(initial_epoch=initEpoch, generator=trainGen, steps_per_epoch=trainDt.get_dataset_size() // batchSize,
epochs=epochs, callbacks=[normalizedErrorCallBack, csvlogger])
示例2: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train():
data = load_train_data()
data = data.reshape((data.shape[0],data.shape[1],data.shape[2],1))
data = data.astype('float32')/255.0
# model selection
if args.pretrain: model = load_model(args.pretrain, compile=False)
else:
if args.model == 'DnCNN': model = models.DnCNN()
# compile the model
model.compile(optimizer=Adam(), loss=['mse'])
# use call back functions
ckpt = ModelCheckpoint(save_dir+'/model_{epoch:02d}.h5', monitor='val_loss',
verbose=0, period=args.save_every)
csv_logger = CSVLogger(save_dir+'/log.csv', append=True, separator=',')
lr = LearningRateScheduler(step_decay)
# train
history = model.fit_generator(train_datagen(data, batch_size=args.batch_size),
steps_per_epoch=len(data)//args.batch_size, epochs=args.epoch, verbose=1,
callbacks=[ckpt, csv_logger, lr])
return model
示例3: get_callbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
learning_rate_patience=50, logging_file="training.log", verbosity=1,
early_stopping_patience=None):
callbacks = list()
callbacks.append(ModelCheckpoint(model_file,monitor='val_acc', save_best_only=True,verbose=verbosity, save_weights_only=True))
# callbacks.append(ModelCheckpoint(model_file, save_best_only=True, save_weights_only=True))
callbacks.append(CSVLogger(logging_file, append=True))
if learning_rate_epochs:
callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
else:
callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
verbose=verbosity))
if early_stopping_patience:
callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
return callbacks
示例4: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train(self, category, batchSize=8, epochs=20, lrschedule=False):
trainDt = DataGenerator(category, os.path.join("../../data/train/Annotations", "train_split.csv"))
trainGen = trainDt.generator_with_mask_ohem( graph=tf.get_default_graph(), kerasModel=self.model,
batchSize= batchSize, inputSize=(self.inputHeight, self.inputWidth),
nStackNum=self.nStackNum, flipFlag=False, cropFlag=False)
normalizedErrorCallBack = NormalizedErrorCallBack("../../trained_models/", category, True)
csvlogger = CSVLogger( os.path.join(normalizedErrorCallBack.get_folder_path(),
"csv_train_"+self.modelName+"_"+str(datetime.datetime.now().strftime('%H:%M'))+".csv"))
xcallbacks = [normalizedErrorCallBack, csvlogger]
self.model.fit_generator(generator=trainGen, steps_per_epoch=trainDt.get_dataset_size()//batchSize,
epochs=epochs, callbacks=xcallbacks)
示例5: main
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def main(rootdir, case, results):
train_x, train_y, valid_x, valid_y, test_x, test_y = get_data(args.dataset, case)
input_shape = (train_x.shape[1], train_x.shape[2])
num_class = train_y.shape[1]
if not os.path.exists(rootdir):
os.makedirs(rootdir)
filepath = os.path.join(rootdir, str(case) + '.hdf5')
saveto = os.path.join(rootdir, str(case) + '.csv')
optimizer = Adam(lr=args.lr, clipnorm=args.clip)
pred_dir = os.path.join(rootdir, str(case) + '_pred.txt')
if args.train:
model = creat_model(input_shape, num_class)
early_stop = EarlyStopping(monitor='val_acc', patience=15, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5, mode='auto', cooldown=3., verbose=1)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
csv_logger = CSVLogger(saveto)
if args.dataset=='NTU' or args.dataset == 'PKU':
callbacks_list = [csv_logger, checkpoint, early_stop, reduce_lr]
else:
callbacks_list = [csv_logger, checkpoint]
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.fit(train_x, train_y, validation_data=[valid_x, valid_y], epochs=args.epochs,
batch_size=args.batch_size, callbacks=callbacks_list, verbose=2)
# test
model = creat_model(input_shape, num_class)
model.load_weights(filepath)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
scores = get_activation(model, test_x, test_y, pred_dir, VA=10, par=9)
results.append(round(scores, 2))
開發者ID:microsoft,項目名稱:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代碼行數:37,代碼來源:va-rnn.py
示例6: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train(model, data, args):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
(x_train, y_train), (x_test, y_test) = data
log = callbacks.CSVLogger(args.save_dir + '/log.csv')
checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',
save_best_only=False, save_weights_only=True, verbose=1)
lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, 'mse'],
loss_weights=[1., args.lam_recon],
metrics={'capsnet': 'accuracy'})
def train_generator(x, y, batch_size, shift_fraction=0.):
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size, args.shift_fraction),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
shuffle = True,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=snapshot.get_callbacks(log,model_prefix=model_prefix))
model.save_weights(args.save_dir + '/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
return model
示例7: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train(train_set, val_set, cfg, config_name, resume, model_path):
if not(model_path is None):
if resume:
print("Loading compiled model: " + model_path)
model = keras.models.load_model(model_path, compile=True)
else:
print("Loading uncompiled model: " + model_path)
model = keras.models.load_model(model_path, compile=False)
model = compile_model(model, cfg["model"])
else:
print("Loading the network..")
model = load_model(cfg["model"])
csv_logger = CSVLogger('checkpoint/' + config_name +
'-training.log', append=resume)
save_ckpt = ModelCheckpoint("checkpoint/weights.{epoch:02d}-{val_loss:.2f}" + config_name + ".hdf5", monitor='val_loss',
verbose=1,
save_best_only=True,
period=1)
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0, mode='auto')
lr_schedule = ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='auto', min_lr=10e-7)
callback_list = [save_ckpt, early_stopping, lr_schedule, csv_logger]
print("Start the training..")
model.fit_generator(train_set,
epochs=cfg["nb_epoch"],
callbacks=callback_list,
validation_data=val_set,
workers=cfg["workers"],
use_multiprocessing=cfg["use_multiprocessing"],
shuffle=True
)
示例8: train_gan
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train_gan( dataf ) :
gen, disc, gan = build_networks()
# Uncomment these, if you want to continue training from some snapshot.
# (or load pretrained generator weights)
#load_weights(gen, Args.genw)
#load_weights(disc, Args.discw)
logger = CSVLogger('loss.csv') # yeah, you can use callbacks independently
logger.on_train_begin() # initialize csv file
with h5py.File( dataf, 'r' ) as f :
faces = f.get( 'faces' )
run_batches(gen, disc, gan, faces, logger, range(5000))
logger.on_train_end()
示例9: train_model
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train_model(self, train_generator, steps_per_epoch=None, epochs=1, validation_generator=None,
validation_steps=None, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0,
save_history=False, save_model_per_epoch=False):
saved_items_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'saved_items')
if not os.path.exists(saved_items_dir):
os.makedirs(saved_items_dir)
callbacks = []
if save_history:
history_file = os.path.join(saved_items_dir, 'history')
csv_logger = CSVLogger(history_file, append=True)
callbacks.append(csv_logger)
if save_model_per_epoch:
save_model_file = os.path.join(saved_items_dir, 'bidaf_{epoch:02d}.h5')
checkpointer = ModelCheckpoint(filepath=save_model_file, verbose=1)
callbacks.append(checkpointer)
history = self.model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=epochs,
callbacks=callbacks, validation_data=validation_generator,
validation_steps=validation_steps, workers=workers,
use_multiprocessing=use_multiprocessing, shuffle=shuffle,
initial_epoch=initial_epoch)
if not save_model_per_epoch:
self.model.save(os.path.join(saved_items_dir, 'bidaf.h5'))
return history, self.model
示例10: pretrain
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def pretrain(self, x, y=None, optimizer='adam', epochs=200, batch_size=256, save_dir='results/temp'):
print('...Pretraining...')
self.autoencoder.compile(optimizer=optimizer, loss='mse')
csv_logger = callbacks.CSVLogger(save_dir + '/pretrain_log.csv')
cb = [csv_logger]
if y is not None:
class PrintACC(callbacks.Callback):
def __init__(self, x, y):
self.x = x
self.y = y
super(PrintACC, self).__init__()
def on_epoch_end(self, epoch, logs=None):
if int(epochs/10) != 0 and epoch % int(epochs/10) != 0:
return
feature_model = Model(self.model.input,
self.model.get_layer(
'encoder_%d' % (int(len(self.model.layers) / 2) - 1)).output)
features = feature_model.predict(self.x)
km = KMeans(n_clusters=len(np.unique(self.y)), n_init=20, n_jobs=4)
y_pred = km.fit_predict(features)
# print()
print(' '*8 + '|==> acc: %.4f, nmi: %.4f <==|'
% (metrics.acc(self.y, y_pred), metrics.nmi(self.y, y_pred)))
cb.append(PrintACC(x, y))
# begin pretraining
t0 = time()
self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=cb)
print('Pretraining time: %ds' % round(time() - t0))
self.autoencoder.save_weights(save_dir + '/ae_weights.h5')
print('Pretrained weights are saved to %s/ae_weights.h5' % save_dir)
self.pretrained = True
示例11: get_callbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
learning_rate_patience=50, logging_file="training.log", verbosity=1,
early_stopping_patience=None):
callbacks = list()
callbacks.append(ModelCheckpoint(model_file, save_best_only=True))
callbacks.append(CSVLogger(logging_file, append=True))
if learning_rate_epochs:
callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
else:
callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
verbose=verbosity))
if early_stopping_patience:
callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
return callbacks
示例12: train
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def train(model, epochs, patience, output_path, nproc, train_obj, val_obj):
"""
:param model: model to train (must be compiled)
:type model: Model
:param epochs: max number of epochs to train.
:type epochs: int
:param patience: Stop after these many layers if val. loss doesn't decrease
:type patience: int
:param output_path: paths to save weights and logs
:type output_path: str
:param nproc: number of processors for training
:type nproc: int
:param train_obj: DataGenerator training object for training
:type train_obj: DataGenerator
:param val_obj: DataGenerator training object for validation
:type val_obj: DataGenerator
:return: model, history object
"""
if nproc == 1:
use_multiprocessing = False
else:
use_multiprocessing = True
# Callbacks for training and validation
ES = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=patience, verbose=1, mode='min',
restore_best_weights=True)
CK = ModelCheckpoint(output_path + 'weights.h5', monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=False,
mode='min')
csv_name = output_path + 'training_log.csv'
LO = CSVLogger(csv_name, append=False)
callbacks = [ES, CK, LO]
train_history = model.fit_generator(generator=train_obj, validation_data=val_obj, epochs=epochs,
use_multiprocessing=use_multiprocessing, max_queue_size=10, workers=nproc,
shuffle=True, callbacks=callbacks, verbose=1)
return model, train_history
示例13: initialize_parameters
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def initialize_parameters():
mnist_common = mnist.MNIST(mnist.file_path,
'mnist_params.txt',
'keras',
prog='mnist_mlp',
desc='MNIST example'
)
# Initialize parameters
gParameters = candle.finalize_parameters(mnist_common)
csv_logger = CSVLogger('{}/params.log'.format(gParameters))
return gParameters
示例14: initialize_parameters
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def initialize_parameters():
mnist_common = mnist.MNIST(mnist.file_path,
'mnist_params.txt',
'keras',
prog='mnist_cnn',
desc='MNIST CNN example'
)
# Initialize parameters
gParameters = candle.finalize_parameters(mnist_common)
csv_logger = CSVLogger('{}/params.log'.format(gParameters))
return gParameters
示例15: get_callbacks
# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import CSVLogger [as 別名]
def get_callbacks(arguments):
if arguments.net.find('caps') != -1:
monitor_name = 'val_out_seg_dice_hard'
else:
monitor_name = 'val_dice_hard'
csv_logger = CSVLogger(join(arguments.log_dir, arguments.output_name + '_log_' + arguments.time + '.csv'), separator=',')
tb = TensorBoard(arguments.tf_log_dir, batch_size=arguments.batch_size, histogram_freq=0)
model_checkpoint = ModelCheckpoint(join(arguments.check_dir, arguments.output_name + '_model_' + arguments.time + '.hdf5'),
monitor=monitor_name, save_best_only=True, save_weights_only=True,
verbose=1, mode='max')
lr_reducer = ReduceLROnPlateau(monitor=monitor_name, factor=0.05, cooldown=0, patience=5,verbose=1, mode='max')
early_stopper = EarlyStopping(monitor=monitor_name, min_delta=0, patience=25, verbose=0, mode='max')
return [model_checkpoint, csv_logger, lr_reducer, early_stopper, tb]