本文整理汇总了Python中reid.utils.logging.Logger方法的典型用法代码示例。如果您正苦于以下问题:Python logging.Logger方法的具体用法?Python logging.Logger怎么用?Python logging.Logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reid.utils.logging
的用法示例。
在下文中一共展示了logging.Logger方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from reid.utils import logging [as 别名]
# 或者: from reid.utils.logging import Logger [as 别名]
def main(args):
cudnn.benchmark = True
cudnn.enabled = True
save_path = args.logs_dir
sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.merge_percent)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
# get all unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
new_train_data, cluster_id_labels = change_to_unlabel(dataset_all)
num_train_ids = len(np.unique(np.array(cluster_id_labels)))
nums_to_merge = int(num_train_ids * args.merge_percent)
BuMain = Bottom_up(model_name=args.arch, batch_size=args.batch_size,
num_classes=num_train_ids,
dataset=dataset_all,
u_data=new_train_data, save_path=args.logs_dir, max_frames=args.max_frames,
embeding_fea_size=args.fea)
for step in range(int(1/args.merge_percent)-1):
print('step: ',step)
BuMain.train(new_train_data, step, loss=args.loss)
BuMain.evaluate(dataset_all.query, dataset_all.gallery)
# get new train data for the next iteration
print('----------------------------------------bottom-up clustering------------------------------------------------')
cluster_id_labels, new_train_data = BuMain.get_new_train_data_v2(cluster_id_labels, nums_to_merge, step, penalty=args.size_penalty)
print('\n\n')
示例2: main
# 需要导入模块: from reid.utils import logging [as 别名]
# 或者: from reid.utils.logging import Logger [as 别名]
def main(args):
cudnn.benchmark = True
# Redirect print to both console and log file
if not args.evaluate:
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
train_loader = get_loader(args.train_path, args.height, args.width, relabel=True,
batch_size=args.batch_size, mode='train', num_workers=args.workers, name_pattern = args.name_pattern)
gallery_loader = get_loader(args.gallery_path, args.height, args.width, relabel=False,
batch_size=args.batch_size, mode='test', num_workers=args.workers, name_pattern = args.name_pattern)
query_loader = get_loader(args.query_path, args.height, args.width, relabel=False,
batch_size=args.batch_size, mode='test', num_workers=args.workers, name_pattern = args.name_pattern)
# Create model
model = DenseNet(num_feature=args.num_feature, num_classes=args.true_class, num_iteration = args.num_iteration)
# Load from checkpoint
start_epoch = args.start_epoch
if args.resume:
checkpoint = load_checkpoint(args.resume)
model.load_state_dict(checkpoint['state_dict'])
model = nn.DataParallel(model).cuda()
# Evaluator
if args.evaluate:
evaluator = Evaluator(model)
print("Test:")
evaluator.evaluate(query_loader, gallery_loader, query_loader.dataset.ret, gallery_loader.dataset.ret, args.output_feature)
return
# Start training
model= train(args, model, train_loader, start_epoch)
save_checkpoint({'state_dict': model.module.state_dict()}, fpath=osp.join(args.logs_dir, 'model.pth.tar'))
evaluator = Evaluator(model)
print("Test:")
evaluator.evaluate(query_loader, gallery_loader, query_loader.dataset.ret, gallery_loader.dataset.ret, args.output_feature)
示例3: main
# 需要导入模块: from reid.utils import logging [as 别名]
# 或者: from reid.utils.logging import Logger [as 别名]
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.log_dir, 'log_test.txt'))
args.num_classes = 1
# Create data loaders
dataset = {}
dataset['dataset'] = datasets.create(args.name, args.data_dir)
dataset['train_loader'], dataset['query_loader'], dataset['gallery_loader'] \
= create_test_data_loader(args, args.name, dataset['dataset'])
if args.evaluate:
cls_params = None
trainer = PCBTrainer(args, cls_params=cls_params)
evaluator = trainer.test()
scores = {}
scores['cmc_scores'], scores['mAP'], q_f, g_f, _ = \
evaluator.evaluate(args.name, dataset['query_loader'], dataset['gallery_loader'],
dataset['dataset'].query, dataset['dataset'].gallery, isevaluate=True)
print('Cross Ddomain CMC Scores')
print('Source\t Target\t Top1\t Top5\t Top10\t MAP')
print('{}->{}: {:6.2%} {:6.2%} {:6.2%} ({:.2%})'.format(args.s_name, args.name,
scores['cmc_scores'][0],
scores['cmc_scores'][1],
scores['cmc_scores'][2],
scores['mAP']))
################## whether rerank test ############
if args.rerank:
rerankor = Rerankor()
rerankor.rerank(q_f, g_f,
savepath=os.path.join(args.save_dir, 'rerank'),
save=False, isevaluate=True,
dataset=dataset['dataset'])
示例4: main
# 需要导入模块: from reid.utils import logging [as 别名]
# 或者: from reid.utils.logging import Logger [as 别名]
def main(args):
cudnn.benchmark = True
cudnn.enabled = True
save_path = args.logs_dir
sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.merge_percent)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
# get all unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
new_train_data, cluster_id_labels = change_to_unlabel(dataset_all)
num_train_ids = len(np.unique(np.array(cluster_id_labels)))
nums_to_merge = int(num_train_ids * args.merge_percent)
BuMain = Bottom_up(model_name=args.arch, batch_size=args.batch_size,
num_classes=num_train_ids,
dataset=dataset_all,
u_data=new_train_data, save_path=args.logs_dir, max_frames=args.max_frames,
embeding_fea_size=args.fea)
for step in range(int(1/args.merge_percent)-1):
print('step: ',step)
BuMain.train(new_train_data, step, loss=args.loss)
BuMain.evaluate(dataset_all.query, dataset_all.gallery)
# get new train data for the next iteration
print('----------------------------------------bottom-up clustering------------------------------------------------')
cluster_id_labels, new_train_data = BuMain.get_new_train_data(cluster_id_labels, nums_to_merge, size_penalty=args.size_penalty)
print('\n\n')
示例5: main
# 需要导入模块: from reid.utils import logging [as 别名]
# 或者: from reid.utils.logging import Logger [as 别名]
def main(args):
cudnn.benchmark = True
cudnn.enabled = True
save_path = args.logs_dir
total_step = 100//args.EF + 1
sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.EF)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
num_all_examples = len(dataset_all.train)
l_data, u_data = get_one_shot_in_cam1(dataset_all, load_path="./examples/oneshot_{}_used_in_paper.pickle".format(dataset_all.name))
resume_step, ckpt_file = -1, ''
if args.resume:
resume_step, ckpt_file = resume(args)
# initial the EUG algorithm
eug = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=dataset_all.num_train_ids,
data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=args.logs_dir, max_frames=args.max_frames)
new_train_data = l_data
for step in range(total_step):
# for resume
if step < resume_step:
continue
nums_to_select = min(int( len(u_data) * (step+1) * args.EF / 100 ), len(u_data))
print("This is running {} with EF={}%, step {}:\t Nums_to_be_select {}, \t Logs-dir {}".format(
args.mode, args.EF, step, nums_to_select, save_path))
# train the model or load ckpt
eug.train(new_train_data, step, epochs=70, step_size=55, init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# pseudo-label and confidence score
pred_y, pred_score = eug.estimate_label()
# select data
selected_idx = eug.select_top_data(pred_score, nums_to_select)
# add new data
new_train_data = eug.generate_new_train_data(selected_idx, pred_y)
# evluate
eug.evaluate(dataset_all.query, dataset_all.gallery)