本文整理汇总了Python中utils.setup_logger方法的典型用法代码示例。如果您正苦于以下问题:Python utils.setup_logger方法的具体用法?Python utils.setup_logger怎么用?Python utils.setup_logger使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.setup_logger方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import setup_logger [as 别名]
def train(args):
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(args.config_file, cfg.OUTPUT_DIR)
num_gpus = torch.cuda.device_count()
logger = setup_logger('reid_baseline', output_dir, 0)
logger.info('Using {} GPUS'.format(num_gpus))
logger.info(args)
logger.info('Running with config:\n{}'.format(cfg))
train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)
model = build_model(cfg, num_classes)
loss_func = make_loss(cfg, num_classes)
trainer = BaseTrainer(cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus)
for epoch in range(trainer.epochs):
for batch in trainer.train_dl:
trainer.step(batch)
trainer.handle_new_batch()
trainer.handle_new_epoch()
示例2: __init__
# 需要导入模块: import utils [as 别名]
# 或者: from utils import setup_logger [as 别名]
def __init__(self, config, model, criterion, ctx, sample_input):
config['trainer']['output_dir'] = os.path.join(str(pathlib.Path(os.path.abspath(__name__)).parent),
config['trainer']['output_dir'])
config['name'] = config['name'] + '_' + model.model_name
self.save_dir = os.path.join(config['trainer']['output_dir'], config['name'])
self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')
self.alphabet = config['dataset']['alphabet']
if config['trainer']['resume_checkpoint'] == '' and config['trainer']['finetune_checkpoint'] == '':
shutil.rmtree(self.save_dir, ignore_errors=True)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
# 保存本次实验的alphabet 到模型保存的地方
save(list(self.alphabet), os.path.join(self.save_dir, 'dict.txt'))
self.global_step = 0
self.start_epoch = 0
self.config = config
self.model = model
self.criterion = criterion
# logger and tensorboard
self.tensorboard_enable = self.config['trainer']['tensorboard']
self.epochs = self.config['trainer']['epochs']
self.display_interval = self.config['trainer']['display_interval']
if self.tensorboard_enable:
from mxboard import SummaryWriter
self.writer = SummaryWriter(self.save_dir, verbose=False)
self.logger = setup_logger(os.path.join(self.save_dir, 'train.log'))
self.logger.info(pformat(self.config))
self.logger.info(self.model)
# device set
self.ctx = ctx
mx.random.seed(2) # 设置随机种子
self.logger.info('train with mxnet: {} and device: {}'.format(mx.__version__, self.ctx))
self.metrics = {'val_acc': 0, 'train_loss': float('inf'), 'best_model': ''}
schedule = self._initialize('lr_scheduler', mx.lr_scheduler)
optimizer = self._initialize('optimizer', mx.optimizer, lr_scheduler=schedule)
self.trainer = gluon.Trainer(self.model.collect_params(), optimizer=optimizer)
if self.config['trainer']['resume_checkpoint'] != '':
self._laod_checkpoint(self.config['trainer']['resume_checkpoint'], resume=True)
elif self.config['trainer']['finetune_checkpoint'] != '':
self._laod_checkpoint(self.config['trainer']['finetune_checkpoint'], resume=False)
if self.tensorboard_enable:
try:
# add graph
from mxnet.gluon import utils as gutils
self.model(sample_input)
self.writer.add_graph(model)
except:
self.logger.error(traceback.format_exc())
self.logger.warn('add graph to tensorboard failed')