本文整理匯總了Python中config.cfg.set_args方法的典型用法代碼示例。如果您正苦於以下問題:Python cfg.set_args方法的具體用法?Python cfg.set_args怎麽用?Python cfg.set_args使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類config.cfg
的用法示例。
在下文中一共展示了cfg.set_args方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import set_args [as 別名]
def main():
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.fastest = True
cudnn.benchmark = True
tester = Tester(args.test_epoch)
tester._make_batch_generator()
tester._make_model()
preds = []
with torch.no_grad():
for itr, (input_img, cam_param) in enumerate(tqdm(tester.batch_generator)):
coord_out = tester.model(input_img, cam_param)
coord_out = coord_out.cpu().numpy()
preds.append(coord_out)
# evaluate
preds = np.concatenate(preds, axis=0)
tester._evaluate(preds, cfg.result_dir)
示例2: test
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import set_args [as 別名]
def test(test_model):
# annotation load
d = Dataset()
annot = d.load_annot(cfg.testset)
# input pose load
input_pose = d.input_pose_load(annot, cfg.testset)
# job assign (multi-gpu)
from tfflat.mp_utils import MultiProc
img_start = 0
ranges = [0]
img_num = len(np.unique([i['image_id'] for i in input_pose]))
images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
for run_img in range(img_num):
img_end = img_start + 1
while img_end < len(input_pose) and input_pose[img_end]['image_id'] == input_pose[img_start]['image_id']:
img_end += 1
if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
ranges.append(img_end)
img_start = img_end
def func(gpu_id):
cfg.set_args(args.gpu_ids.split(',')[gpu_id])
tester = Tester(Model(), cfg)
tester.load_weights(test_model)
range = [ranges[gpu_id], ranges[gpu_id + 1]]
return test_net(tester, input_pose, range, gpu_id)
MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
result = MultiGPUFunc.work()
# evaluation
d.evaluation(result, annot, cfg.result_dir, cfg.testset)
示例3: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import set_args [as 別名]
def main():
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.fastest = True
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enabled = True
tester = Tester(args.test_epoch)
tester._make_batch_generator()
tester._make_model()
preds = []
with torch.no_grad():
for itr, input_img in enumerate(tqdm(tester.batch_generator)):
# forward
coord_out = tester.model(input_img)
if cfg.flip_test:
flipped_input_img = flip(input_img, dims=3)
flipped_coord_out = tester.model(flipped_input_img)
flipped_coord_out[:, :, 0] = cfg.output_shape[1] - flipped_coord_out[:, :, 0] - 1
for pair in tester.flip_pairs:
flipped_coord_out[:, pair[0], :], flipped_coord_out[:, pair[1], :] = flipped_coord_out[:, pair[1], :].clone(), flipped_coord_out[:, pair[0], :].clone()
coord_out = (coord_out + flipped_coord_out)/2.
vis = False
if vis:
filename = str(itr)
tmpimg = input_img[0].cpu().numpy()
tmpimg = tmpimg * np.array(cfg.pixel_std).reshape(3,1,1) + np.array(cfg.pixel_mean).reshape(3,1,1)
tmpimg = tmpimg.astype(np.uint8)
tmpimg = tmpimg[::-1, :, :]
tmpimg = np.transpose(tmpimg,(1,2,0)).copy()
tmpkps = np.zeros((3,tester.joint_num))
tmpkps[:2,:] = coord_out[0,:,:2].cpu().numpy().transpose(1,0) / cfg.output_shape[0] * cfg.input_shape[0]
tmpkps[2,:] = 1
tmpimg = vis_keypoints(tmpimg, tmpkps, tester.skeleton)
cv2.imwrite(filename + '_output.jpg', tmpimg)
coord_out = coord_out.cpu().numpy()
preds.append(coord_out)
# evaluate
preds = np.concatenate(preds, axis=0)
tester._evaluate(preds, cfg.result_dir)
示例4: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import set_args [as 別名]
def main():
# argument parse and create log
args = parse_args()
cfg.set_args(args.gpu_ids, args.continue_train)
cudnn.fastest = True
cudnn.benchmark = True
trainer = Trainer()
trainer._make_batch_generator()
trainer._make_model()
# train
for epoch in range(trainer.start_epoch, cfg.end_epoch):
trainer.set_lr(epoch)
trainer.tot_timer.tic()
trainer.read_timer.tic()
for itr, (input_img, joint_img, joint_vis, joints_have_depth) in enumerate(trainer.batch_generator):
trainer.read_timer.toc()
trainer.gpu_timer.tic()
# forward
trainer.optimizer.zero_grad()
target = {'coord': joint_img, 'vis': joint_vis, 'have_depth': joints_have_depth}
loss_coord = trainer.model(input_img, target)
loss_coord = loss_coord.mean()
# backward
loss = loss_coord
loss.backward()
trainer.optimizer.step()
trainer.gpu_timer.toc()
screen = [
'Epoch %d/%d itr %d/%d:' % (epoch, cfg.end_epoch, itr, trainer.itr_per_epoch),
'lr: %g' % (trainer.get_lr()),
'speed: %.2f(%.2fs r%.2f)s/itr' % (
trainer.tot_timer.average_time, trainer.gpu_timer.average_time, trainer.read_timer.average_time),
'%.2fh/epoch' % (trainer.tot_timer.average_time / 3600. * trainer.itr_per_epoch),
'%s: %.4f' % ('loss_coord', loss_coord.detach()),
]
trainer.logger.info(' '.join(screen))
trainer.tot_timer.toc()
trainer.tot_timer.tic()
trainer.read_timer.tic()
trainer.save_model({
'epoch': epoch,
'network': trainer.model.state_dict(),
'optimizer': trainer.optimizer.state_dict(),
}, epoch)
示例5: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import set_args [as 別名]
def main():
# argument parse and create log
args = parse_args()
cfg.set_args(args.gpu_ids, args.continue_train)
cudnn.fastest = True
cudnn.benchmark = True
trainer = Trainer()
trainer._make_batch_generator()
trainer._make_model()
# train
for epoch in range(trainer.start_epoch, cfg.end_epoch):
trainer.set_lr(epoch)
trainer.tot_timer.tic()
trainer.read_timer.tic()
for itr, (input_img, k_value, root_img, root_vis, joints_have_depth) in enumerate(trainer.batch_generator):
trainer.read_timer.toc()
trainer.gpu_timer.tic()
# forward
trainer.optimizer.zero_grad()
target = {'coord': root_img, 'vis': root_vis, 'have_depth': joints_have_depth}
loss_coord = trainer.model(input_img, k_value, target)
loss_coord = loss_coord.mean();
# backward
loss = loss_coord
loss.backward()
trainer.optimizer.step()
trainer.gpu_timer.toc()
screen = [
'Epoch %d/%d itr %d/%d:' % (epoch, cfg.end_epoch, itr, trainer.itr_per_epoch),
'lr: %g' % (trainer.get_lr()),
'speed: %.2f(%.2fs r%.2f)s/itr' % (
trainer.tot_timer.average_time, trainer.gpu_timer.average_time, trainer.read_timer.average_time),
'%.2fh/epoch' % (trainer.tot_timer.average_time / 3600. * trainer.itr_per_epoch),
'%s: %.4f' % ('loss_coord', loss_coord.detach()),
]
trainer.logger.info(' '.join(screen))
trainer.tot_timer.toc()
trainer.tot_timer.tic()
trainer.read_timer.tic()
trainer.save_model({
'epoch': epoch,
'network': trainer.model.state_dict(),
'optimizer': trainer.optimizer.state_dict(),
}, epoch)