本文整理汇总了Python中mmcv.runner方法的典型用法代码示例。如果您正苦于以下问题:Python mmcv.runner方法的具体用法?Python mmcv.runner怎么用?Python mmcv.runner使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mmcv
的用法示例。
在下文中一共展示了mmcv.runner方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: after_train_epoch
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(
return_loss=False, rescale=True, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
dist.barrier()
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
else:
tmp_file = osp.join(runner.work_dir,
'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
示例2: evaluate
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def evaluate(self, runner, results):
gt_bboxes = []
gt_labels = []
gt_ignore = [] if self.dataset.with_crowd else None
for i in range(len(self.dataset)):
ann = self.dataset.get_ann_info(i)
bboxes = ann['bboxes']
labels = ann['labels']
if gt_ignore is not None:
ignore = np.concatenate([
np.zeros(bboxes.shape[0], dtype=np.bool),
np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
])
gt_ignore.append(ignore)
bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
labels = np.concatenate([labels, ann['labels_ignore']])
gt_bboxes.append(bboxes)
gt_labels.append(labels)
# If the dataset is VOC2007, then use 11 points mAP evaluation.
if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.dataset.CLASSES
mean_ap, eval_results = eval_map(
results,
gt_bboxes,
gt_labels,
gt_ignore=gt_ignore,
scale_ranges=None,
iou_thr=0.5,
dataset=ds_name,
print_summary=True)
runner.log_buffer.output['mAP'] = mean_ap
runner.log_buffer.ready = True
示例3: evaluate
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def evaluate(self, runner, results):
error_log_buffer = LogBuffer()
for result in results:
error_log_buffer.update(result['Error'])
error_log_buffer.average()
# import to tensor-board
for key in error_log_buffer.output.keys():
runner.log_buffer.output[key] = error_log_buffer.output[key]
# for better visualization, format into pandas
format_output_dict = flow_output_evaluation_in_pandas(error_log_buffer.output)
runner.logger.info("Epoch [{}] Evaluation Result: \t".format(runner.epoch + 1))
log_items = []
for key, val in format_output_dict.items():
if isinstance(val, pd.DataFrame):
log_items.append("\n{}:\n{} \n".format(key, val))
elif isinstance(val, float):
val = "{:.4f}".format(val)
log_items.append("{}: {}".format(key, val))
else:
log_items.append("{}: {}".format(key, val))
log_str = ", ".join(log_items)
runner.logger.info(log_str)
runner.log_buffer.ready = True
error_log_buffer.clear()
示例4: evaluate
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def evaluate(self, runner, results):
self.eval_conf(runner, results, bins_number=100)
error_log_buffer = LogBuffer()
for result in results:
error_log_buffer.update(result['Error'])
error_log_buffer.average()
# import to tensor-board
for key in error_log_buffer.output.keys():
runner.log_buffer.output[key] = error_log_buffer.output[key]
# for better visualization, format into pandas
format_output_dict = disp_output_evaluation_in_pandas(error_log_buffer.output)
runner.logger.info("Epoch [{}] Evaluation Result: \t".format(runner.epoch + 1))
log_items = []
for key, val in format_output_dict.items():
if isinstance(val, pd.DataFrame):
log_items.append("\n{}:\n{} \n".format(key, val))
elif isinstance(val, float):
val = "{:.4f}".format(val)
log_items.append("{}: {}".format(key, val))
else:
log_items.append("{}: {}".format(key, val))
log_str = ", ".join(log_items)
runner.logger.info(log_str)
runner.log_buffer.ready = True
error_log_buffer.clear()
# confidence distribution statistics
示例5: eval_conf
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def eval_conf(self, runner, results, bins_number=100):
# results is a list, corresponds to each test sample,
# for each sample, the result are saved as dict
# if the first sample contains the keyword 'Confidence'
if 'Confidence' not in results[0]:
return
# each sample has several confidence map, i.e. bin_edges is a list,
# with length = confidence map number
conf_number = len(results[0]['Confidence']['bin_edges'])
# for each confidence map, statistic its confidence distribution among all samples
total_counts = np.zeros((conf_number, bins_number))
total_bin_edges = np.zeros((conf_number, bins_number + 1))
for result in results:
# enumerate each sample's every confidence map, and i is the index of confidence map
for i, conf in enumerate(result['Confidence']['bin_edges']):
counts, bin_edges = result['Confidence']['counts'][i], result['Confidence']['bin_edges'][i]
# accumulate each confidence map's counts for all samples
total_counts[i] = total_counts[i] + counts
# each confidence map's bin_edges are same
total_bin_edges[i] = bin_edges
for i in range(conf_number):
total_counts[i] = total_counts[i] / sum(total_counts[i])
name = "figure/confidence_histogram/{}".format(i)
conf_hist = self.conf_tool.hist2vis(total_counts[i], total_bin_edges[i])
runner.log_buffer.output[name] = conf_hist
runner.logger.info("Epoch [{}] Confidence evaluation done!".format(runner.epoch + 1))
runner.log_buffer.ready = True
示例6: before_run
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def before_run(self, runner):
super(TextLoggerHook, self).before_run(runner)
self.start_iter = runner.iter
self.json_log_path = osp.join(runner.work_dir,
'{}.log.json'.format(runner.timestamp))
示例7: _get_max_memory
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def _get_max_memory(self, runner):
mem = torch.cuda.max_memory_allocated()
mem_mb = torch.tensor([mem / (1024 * 1024)],
dtype=torch.int,
device=torch.device('cuda'))
if runner.world_size > 1:
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
示例8: _dump_log
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def _dump_log(self, log_dict, runner):
# dump log in json format
json_log = OrderedDict()
for k, v in log_dict.items():
json_log[k] = self._round_float(v)
# only append log at last line
if runner.rank == 0:
with open(self.json_log_path, 'a+') as f:
mmcv.dump(json_log, f, file_format='json')
f.write('\n')
示例9: log
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def log(self, runner):
log_dict = OrderedDict()
# training mode if the output contains the key "time"
mode = 'train' if 'time' in runner.log_buffer.output else 'val'
log_dict['mode'] = mode
log_dict['epoch'] = runner.epoch + 1
log_dict['iter'] = runner.inner_iter + 1
# only record lr of the first param group
log_dict['lr'] = runner.current_lr()[0]
if mode == 'train':
log_dict['time'] = runner.log_buffer.output['time']
log_dict['data_time'] = runner.log_buffer.output['data_time']
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
for name, val in runner.log_buffer.output.items():
if name in ['time', 'data_time']:
continue
if not self._checkout(val):
continue
log_dict[name] = val
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
示例10: visualize
# 需要导入模块: import mmcv [as 别名]
# 或者: from mmcv import runner [as 别名]
def visualize(self, runner, results):
raise NotImplementedError