本文整理汇总了Python中core.config.cfg.EXPECTED_RESULTS_RTOL属性的典型用法代码示例。如果您正苦于以下问题:Python cfg.EXPECTED_RESULTS_RTOL属性的具体用法?Python cfg.EXPECTED_RESULTS_RTOL怎么用?Python cfg.EXPECTED_RESULTS_RTOL使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类core.config.cfg
的用法示例。
在下文中一共展示了cfg.EXPECTED_RESULTS_RTOL属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_RTOL [as 别名]
def main(ind_range=None, multi_gpu_testing=False):
output_dir = get_output_dir(training=False)
all_results = run_inference(
output_dir, ind_range=ind_range, multi_gpu_testing=multi_gpu_testing
)
if not ind_range:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
import json
json.dump(all_results, open(os.path.join(output_dir, 'bbox_results_all.json'), 'w'))
task_evaluation.log_copy_paste_friendly_results(all_results)
示例2: run_inference
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_RTOL [as 别名]
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
示例3: run_inference
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_RTOL [as 别名]
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if True: #is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
示例4: run_inference
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_RTOL [as 别名]
def run_inference(
weights_file, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False,
):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = get_output_dir(dataset_name, training=False)
results = parent_func(
weights_file,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = get_output_dir(dataset_name, training=False)
return child_func(
weights_file,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results