本文整理匯總了Python中core.config.cfg.EXPECTED_RESULTS_ATOL屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.EXPECTED_RESULTS_ATOL屬性的具體用法?Python cfg.EXPECTED_RESULTS_ATOL怎麽用?Python cfg.EXPECTED_RESULTS_ATOL使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類core.config.cfg
的用法示例。
在下文中一共展示了cfg.EXPECTED_RESULTS_ATOL屬性的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_ATOL [as 別名]
def main(ind_range=None, multi_gpu_testing=False):
output_dir = get_output_dir(training=False)
all_results = run_inference(
output_dir, ind_range=ind_range, multi_gpu_testing=multi_gpu_testing
)
if not ind_range:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
import json
json.dump(all_results, open(os.path.join(output_dir, 'bbox_results_all.json'), 'w'))
task_evaluation.log_copy_paste_friendly_results(all_results)
示例2: run_inference
# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_ATOL [as 別名]
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
示例3: run_inference
# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_ATOL [as 別名]
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if True: #is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
示例4: run_inference
# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import EXPECTED_RESULTS_ATOL [as 別名]
def run_inference(
weights_file, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False,
):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = get_output_dir(dataset_name, training=False)
results = parent_func(
weights_file,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = get_output_dir(dataset_name, training=False)
return child_func(
weights_file,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results