本文整理匯總了Python中utils.subprocess.process_in_parallel方法的典型用法代碼示例。如果您正苦於以下問題:Python subprocess.process_in_parallel方法的具體用法?Python subprocess.process_in_parallel怎麽用?Python subprocess.process_in_parallel使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils.subprocess
的用法示例。
在下文中一共展示了subprocess.process_in_parallel方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: multi_gpu_test_retinanet_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_retinanet_on_dataset(num_images, output_dir, dataset):
"""
If doing multi-gpu testing, we need to divide the data on various gpus and
make the subprocess call for each child process that'll run test_retinanet()
on its subset data. After all the subprocesses finish, we combine the results
and return
"""
# Retrieve the test_net binary path
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'retinanet_detections', num_images, binary, output_dir)
# Combine the results from each subprocess now
boxes, scores, classes, image_ids = [], [], [], []
for det_data in outputs:
boxes.extend(det_data['boxes'])
scores.extend(det_data['scores'])
classes.extend(det_data['classes'])
image_ids.extend(det_data['ids'])
return boxes, scores, classes, image_ids,
示例2: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(args, num_images):
"""Multi-gpu inference on a dataset."""
binary_dir = os.getcwd()
binary = os.path.join(binary_dir, args.test_net_file + '.py')
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel('detection', num_images, binary, cfg, cfg.CKPT)
# Collate the results from each subprocess
all_boxes = []
all_segms = []
all_keyps = []
all_parss = []
all_pscores = []
all_uvs = []
for ins_data in outputs:
all_boxes += ins_data['all_boxes']
all_segms += ins_data['all_segms']
all_keyps += ins_data['all_keyps']
all_parss += ins_data['all_parss']
all_pscores += ins_data['all_pscores']
all_uvs += ins_data['all_uvs']
det_file = os.path.join(cfg.CKPT, 'test', 'detections.pkl')
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
all_parss=all_parss,
all_pscores=all_pscores,
all_uvs=all_uvs
), det_file
)
logging_rank('Wrote detections to: {}'.format(os.path.abspath(det_file)), local_rank=0)
return all_boxes, all_segms, all_keyps, all_parss, all_pscores, all_uvs
示例3: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
tag = 'discovery' if 'train' in dataset_name else 'detection'
outputs = subprocess_utils.process_in_parallel(
tag, num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
all_boxes = {}
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_boxes.update(all_boxes_batch)
if 'train' in dataset_name:
det_file = os.path.join(output_dir, 'discovery.pkl')
else:
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes
示例4: multi_gpu_generate_rpn_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_generate_rpn_on_dataset(
args, dataset_name, _proposal_file_ignored, num_images, output_dir
):
"""Multi-gpu inference on a dataset."""
# Retrieve the test_net binary path
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'rpn_proposals', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
boxes, scores, ids = [], [], []
for rpn_data in outputs:
boxes += rpn_data['boxes']
scores += rpn_data['scores']
ids += rpn_data['ids']
rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
)
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
return boxes, scores, ids, rpn_file
示例5: multi_gpu_generate_rpn_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_generate_rpn_on_dataset(
weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir
):
"""Multi-gpu inference on a dataset."""
# Retrieve the test_net binary path
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
opts += ['TEST.WEIGHTS', weights_file]
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'rpn_proposals', num_images, binary, output_dir, opts
)
# Collate the results from each subprocess
boxes, scores, ids = [], [], []
for rpn_data in outputs:
boxes += rpn_data['boxes']
scores += rpn_data['scores']
ids += rpn_data['ids']
rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
)
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
return boxes, scores, ids, rpn_file
示例6: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir
)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps
示例7: multi_gpu_generate_rpn_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_generate_rpn_on_dataset(num_images, output_dir):
"""Multi-gpu inference on a dataset."""
# Retrieve the test_net binary path
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'rpn_proposals', num_images, binary, output_dir
)
# Collate the results from each subprocess
boxes, scores, ids = [], [], []
for rpn_data in outputs:
boxes += rpn_data['boxes']
scores += rpn_data['scores']
ids += rpn_data['ids']
rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
)
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
return boxes, scores, ids, rpn_file
示例8: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(num_images, output_dir):
binary = os.path.join('tools/test_net.py')
assert os.path.exists(binary), 'Binary {} not found'.format(binary)
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for j in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[j] += all_boxes_batch[j]
all_segms[j] += all_segms_batch[j]
all_keyps[j] += all_keyps_batch[j]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
robust_pickle_dump(
dict(all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml),
det_file)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps
示例9: multi_gpu_generate_rpn_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_generate_rpn_on_dataset(num_images, output_dir):
# TODO(rbg): Need to have non-FB specific code path for OSS
if cfg.CLUSTER.ON_CLUSTER:
binary_dir = os.path.abspath(os.getcwd())
binary = os.path.join(binary_dir, 'test_net.xar')
else:
assert parutil.is_lpar(), 'Binary must be inplace package style'
binary_dir = os.path.dirname(parutil.get_runtime_path())
binary = os.path.join(binary_dir, 'test_net.par')
assert os.path.exists(binary), 'Binary {} not found'.format(binary)
# Run inference in parallel in subprocesses
outputs = subprocess_utils.process_in_parallel(
'rpn_proposals', num_images, binary, output_dir)
# Collate the results from each subprocess
boxes, scores, ids = [], [], []
for rpn_data in outputs:
boxes += rpn_data['boxes']
scores += rpn_data['scores']
ids += rpn_data['ids']
rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
cfg_yaml = yaml.dump(cfg)
robust_pickle_dump(
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file)
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
return boxes, scores, ids, rpn_file
示例10: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps
示例11: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(
weights_file, dataset_name, proposal_file, num_images, output_dir
):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
opts += ['TEST.WEIGHTS', weights_file]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir, opts
)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps
示例12: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, args.net_name, args.mlp_head_dim,
args.heatmap_kernel_size, args.part_crop_size, args.use_kps17,
opts)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_hois = {}
all_losses = defaultdict(list)
all_keyps_vcoco = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
all_hois = {**all_hois, **det_data['all_hois']}
for k, v in det_data['all_losses'].items():
all_losses[k].extend(v)
all_keyps_vcoco_batch = det_data['all_keyps_vcoco']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
all_keyps_vcoco[cls_idx] += all_keyps_vcoco_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
all_hois=all_hois,
all_keyps_vcoco=all_keyps_vcoco,
all_losses=all_losses,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps, all_hois, all_keyps_vcoco, all_losses
示例13: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
if args.do_val:
opts += ['--do_val']
if args.use_gt_boxes:
opts += ['--use_gt_boxes']
if args.use_gt_labels:
opts += ['--use_gt_labels']
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'rel_detection', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
all_results = []
for det_data in outputs:
all_results += det_data
if args.use_gt_boxes:
if args.use_gt_labels:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections.pkl')
save_object(all_results, det_file)
logger.info('Wrote rel_detections to: {}'.format(os.path.abspath(det_file)))
return all_results
示例14: multi_gpu_test_net_on_dataset
# 需要導入模塊: from utils import subprocess [as 別名]
# 或者: from utils.subprocess import process_in_parallel [as 別名]
def multi_gpu_test_net_on_dataset(num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir
)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_rois = []
for det_data in outputs:
all_rois.extend(det_data['roidb'])
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml,
all_rois=all_rois
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps