本文整理汇总了Python中utils.utils.create_logger方法的典型用法代码示例。如果您正苦于以下问题:Python utils.create_logger方法的具体用法?Python utils.create_logger怎么用?Python utils.create_logger使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.utils
的用法示例。
在下文中一共展示了utils.create_logger方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
assert args.exp_name
assert args.load_path.endswith('.pth')
assert os.path.exists(args.load_path)
args.path_helper = set_log_dir('logs_eval', args.exp_name)
logger = create_logger(args.path_helper['log_path'], phase='test')
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.gen_model+'.Generator')(args=args).cuda()
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
elif args.dataset.lower() == 'stl10':
fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
# set writer
logger.info(f'=> resuming from {args.load_path}')
checkpoint_file = args.load_path
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
if 'avg_gen_state_dict' in checkpoint:
gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
epoch = checkpoint['epoch']
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})')
else:
gen_net.load_state_dict(checkpoint)
logger.info(f'=> loaded checkpoint {checkpoint_file}')
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'valid_global_steps': 0,
}
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, clean_dir=False)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score}.')
示例2: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = parse_args()
update_config(cfg, args)
cfg.defrost()
cfg.RANK = args.rank
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'train'
)
logger.info(pprint.pformat(args))
logger.info(cfg)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or cfg.MULTIPROCESSING_DISTRIBUTED
ngpus_per_node = torch.cuda.device_count()
if cfg.MULTIPROCESSING_DISTRIBUTED:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args, final_output_dir, tb_log_dir)
)
else:
# Simply call main_worker function
main_worker(
','.join([str(i) for i in cfg.GPUS]),
ngpus_per_node,
args,
final_output_dir,
tb_log_dir
)
示例3: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
if config.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
else:
model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
gpus = list(config.GPUS)
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
# Data loading code
valdir = os.path.join(config.DATASET.ROOT,
config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
# evaluate on validation set
validate(config, valid_loader, model, criterion, final_output_dir,
tb_log_dir, None)
示例4: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = parse_args()
reset_config(config, args)
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
backbone_model = eval('models.' + config.BACKBONE_MODEL + '.get_pose_net')(
config, is_train=False)
model = eval('models.' + config.MODEL + '.get_multiview_pose_net')(
backbone_model, config)
if config.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
else:
model_path = 'model_best.pth.tar' if config.TEST.STATE == 'best' else 'final_state.pth.tar'
model_state_file = os.path.join(final_output_dir, model_path)
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=config.LOSS.USE_TARGET_WEIGHT).cuda()
# Data loading code
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)(
config, config.DATASET.TEST_SUBSET, False,
transforms.Compose([
transforms.ToTensor(),
normalize,
]))
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=config.TEST.BATCH_SIZE * len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True)
# evaluate on validation set
validate(config, valid_loader, valid_dataset, model, criterion,
final_output_dir, tb_log_dir)
示例5: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'test3d')
prediction_path = os.path.join(final_output_dir,
config.TEST.HEATMAP_LOCATION_FILE)
test_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)(
config, config.DATASET.TEST_SUBSET, False)
all_heatmaps = h5py.File(prediction_path)['heatmaps']
pairwise_file = config.PICT_STRUCT.PAIRWISE_FILE
with open(pairwise_file, 'rb') as f:
pairwise = pickle.load(f)['pairwise_constrain']
cnt = 0
grouping = test_dataset.grouping
mpjpes = []
for items in grouping:
heatmaps = []
boxes = []
poses = []
cameras = []
for idx in items:
datum = test_dataset.db[idx]
camera = datum['camera']
cameras.append(camera)
poses.append(
camera_to_world_frame(datum['joints_3d_camera'], camera['R'],
camera['T']))
box = {}
box['scale'] = np.array(datum['scale'])
box['center'] = np.array(datum['center'])
boxes.append(box)
heatmaps.append(all_heatmaps[cnt])
cnt += 1
heatmaps = np.array(heatmaps)
# This demo uses GT root locations and limb length; but can be replaced by statistics
grid_center = poses[0][0]
body = HumanBody()
limb_length = compute_limb_length(body, poses[0])
prediction = rpsm(cameras, heatmaps, boxes, grid_center, limb_length,
pairwise, config)
mpjpe = np.mean(np.sqrt(np.sum((prediction - poses[0])**2, axis=1)))
mpjpes.append(mpjpe)
print(np.mean(mpjpes))
示例6: main
# 需要导入模块: from utils import utils [as 别名]
# 或者: from utils.utils import create_logger [as 别名]
def main():
args = parse_args()
reset_config(config, args)
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_pose_net')(
config, is_train=False
)
if config.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
else:
model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=config.LOSS.USE_TARGET_WEIGHT
).cuda()
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
valid_dataset = eval('dataset.'+config.DATASET.DATASET)(
config,
config.DATASET.ROOT,
config.DATASET.TEST_SET,
False,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=config.TEST.BATCH_SIZE*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
# evaluate on validation set
validate(config, valid_loader, valid_dataset, model, criterion,
final_output_dir, tb_log_dir)