本文整理匯總了Python中utils.setup_logger方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.setup_logger方法的具體用法?Python utils.setup_logger怎麽用?Python utils.setup_logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.setup_logger方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: train
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import setup_logger [as 別名]
def train(args):
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(args.config_file, cfg.OUTPUT_DIR)
num_gpus = torch.cuda.device_count()
logger = setup_logger('reid_baseline', output_dir, 0)
logger.info('Using {} GPUS'.format(num_gpus))
logger.info(args)
logger.info('Running with config:\n{}'.format(cfg))
train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)
model = build_model(cfg, num_classes)
loss_func = make_loss(cfg, num_classes)
trainer = BaseTrainer(cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus)
for epoch in range(trainer.epochs):
for batch in trainer.train_dl:
trainer.step(batch)
trainer.handle_new_batch()
trainer.handle_new_epoch()
示例2: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import setup_logger [as 別名]
def __init__(self, config, model, criterion, ctx, sample_input):
config['trainer']['output_dir'] = os.path.join(str(pathlib.Path(os.path.abspath(__name__)).parent),
config['trainer']['output_dir'])
config['name'] = config['name'] + '_' + model.model_name
self.save_dir = os.path.join(config['trainer']['output_dir'], config['name'])
self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')
self.alphabet = config['dataset']['alphabet']
if config['trainer']['resume_checkpoint'] == '' and config['trainer']['finetune_checkpoint'] == '':
shutil.rmtree(self.save_dir, ignore_errors=True)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
# 保存本次實驗的alphabet 到模型保存的地方
save(list(self.alphabet), os.path.join(self.save_dir, 'dict.txt'))
self.global_step = 0
self.start_epoch = 0
self.config = config
self.model = model
self.criterion = criterion
# logger and tensorboard
self.tensorboard_enable = self.config['trainer']['tensorboard']
self.epochs = self.config['trainer']['epochs']
self.display_interval = self.config['trainer']['display_interval']
if self.tensorboard_enable:
from mxboard import SummaryWriter
self.writer = SummaryWriter(self.save_dir, verbose=False)
self.logger = setup_logger(os.path.join(self.save_dir, 'train.log'))
self.logger.info(pformat(self.config))
self.logger.info(self.model)
# device set
self.ctx = ctx
mx.random.seed(2) # 設置隨機種子
self.logger.info('train with mxnet: {} and device: {}'.format(mx.__version__, self.ctx))
self.metrics = {'val_acc': 0, 'train_loss': float('inf'), 'best_model': ''}
schedule = self._initialize('lr_scheduler', mx.lr_scheduler)
optimizer = self._initialize('optimizer', mx.optimizer, lr_scheduler=schedule)
self.trainer = gluon.Trainer(self.model.collect_params(), optimizer=optimizer)
if self.config['trainer']['resume_checkpoint'] != '':
self._laod_checkpoint(self.config['trainer']['resume_checkpoint'], resume=True)
elif self.config['trainer']['finetune_checkpoint'] != '':
self._laod_checkpoint(self.config['trainer']['finetune_checkpoint'], resume=False)
if self.tensorboard_enable:
try:
# add graph
from mxnet.gluon import utils as gutils
self.model(sample_input)
self.writer.add_graph(model)
except:
self.logger.error(traceback.format_exc())
self.logger.warn('add graph to tensorboard failed')