本文整理汇总了Python中tensorpack.logger.set_logger_dir方法的典型用法代码示例。如果您正苦于以下问题:Python logger.set_logger_dir方法的具体用法?Python logger.set_logger_dir怎么用?Python logger.set_logger_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.logger
的用法示例。
在下文中一共展示了logger.set_logger_dir方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_once
# 需要导入模块: from tensorpack import logger [as 别名]
# 或者: from tensorpack.logger import set_logger_dir [as 别名]
def run_once(self, opt, sess_init=None, save_dir=None):
####
train_datagen = self.get_datagen(opt['train_batch_size'], mode='train')
valid_datagen = self.get_datagen(opt['infer_batch_size'], mode='valid')
###### must be called before ModelSaver
if save_dir is None:
logger.set_logger_dir(self.save_dir)
else:
logger.set_logger_dir(save_dir)
######
model_flags = opt['model_flags']
model = self.get_model()(**model_flags)
######
callbacks=[
ModelSaver(max_to_keep=opt['nr_epochs']),
]
for param_name, param_info in opt['manual_parameters'].items():
model.add_manual_variable(param_name, param_info[0])
callbacks.append(ScheduledHyperParamSetter(param_name, param_info[1]))
# multi-GPU inference (with mandatory queue prefetch)
infs = [StatCollector()]
callbacks.append(DataParallelInferenceRunner(
valid_datagen, infs, list(range(nr_gpus))))
callbacks.append(MaxSaver('valid_dice'))
######
steps_per_epoch = train_datagen.size() // nr_gpus
config = TrainConfig(
model = model,
callbacks = callbacks ,
dataflow = train_datagen ,
steps_per_epoch = steps_per_epoch,
max_epoch = opt['nr_epochs'],
)
config.session_init = sess_init
launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus))
tf.reset_default_graph() # remove the entire graph in case of multiple runs
return
####