本文整理汇总了Python中utils.vis.vis_one_image方法的典型用法代码示例。如果您正苦于以下问题:Python vis.vis_one_image方法的具体用法?Python vis.vis_one_image怎么用?Python vis.vis_one_image使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.vis
的用法示例。
在下文中一共展示了vis.vis_one_image方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: visualize_im_masks
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def visualize_im_masks(entry, boxes, segms, output_dir, show_class=True, ap=None):
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
im = cv2.imread(entry['image'])
if ap:
im_name = "{:.5f}_{}".format(ap, im_name)
classes = [1] * len(boxes)
vis_utils.vis_one_image(
im[:, :, ::-1],
im_name,
os.path.join(output_dir, 'vis_sorted'),
boxes=boxes,
segms=segms,
keypoints=None,
thresh=0.0,
box_alpha=0.8,
dataset=entry['dataset'],
show_class=show_class,
ext='pdf',
classes=classes
)
示例2: main
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(args.weights)
dummy_coco_dataset = (
dummy_datasets.get_vg3k_dataset()
if args.use_vg3k else dummy_datasets.get_coco_dataset())
if os.path.isdir(args.im_or_folder):
im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
else:
im_list = [args.im_or_folder]
for i, im_name in enumerate(im_list):
out_name = os.path.join(
args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
)
logger.info('Processing {} -> {}'.format(im_name, out_name))
im = cv2.imread(im_name)
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
for k, v in timers.items():
logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if i == 0:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=args.thresh,
kp_thresh=2
)
示例3: main
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def main(args):
logger = logging.getLogger(__name__)
dummy_coco_dataset = (
dummy_datasets.get_vg3k_dataset()
if args.use_vg3k else dummy_datasets.get_coco_dataset())
cfg_orig = yaml.load(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
workspace.ResetWorkspace()
else:
proposal_boxes = None
cls_boxes, cls_segms, cls_keyps = None, None, None
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
cfg.immutable(False)
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
weights_file = pkl
else:
weights_file = cfg.TEST.WEIGHTS
cfg.NUM_GPUS = 1
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(weights_file)
with c2_utils.NamedCudaScope(0):
cls_boxes_, cls_segms_, cls_keyps_ = \
model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
out_name = os.path.join(
args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf')
)
logger.info('Processing {} -> {}'.format(args.im_file, out_name))
vis_utils.vis_one_image(
im[:, :, ::-1],
args.im_file,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=args.thresh,
kp_thresh=2
)
示例4: vis
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
ds = JsonDataset(dataset)
roidb = ds.get_roidb()
with open(detections_pkl, 'r') as f:
dets = pickle.load(f)
assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \
'Expected detections pkl file in the format used by test_engine.py'
all_boxes = dets['all_boxes']
all_segms = dets['all_segms']
all_keyps = dets['all_keyps']
def id_or_index(ix, val):
if len(val) == 0:
return val
else:
return val[ix]
for ix, entry in enumerate(roidb):
if limit > 0 and ix >= limit:
break
if ix % 10 == 0:
print('{:d}/{:d}'.format(ix + 1, len(roidb)))
im = cv2.imread(entry['image'])
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
cls_boxes_i = [
id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
]
cls_segms_i = [
id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms
]
cls_keyps_i = [
id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps
]
vis_utils.vis_one_image(
im[:, :, ::-1],
'{:d}_{:s}'.format(ix, im_name),
os.path.join(output_dir, 'vis'),
cls_boxes_i,
segms=cls_segms_i,
keypoints=cls_keyps_i,
thresh=thresh,
box_alpha=0.8,
dataset=ds,
show_class=True
)
示例5: main
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.TEST.WEIGHTS = args.weights
cfg.NUM_GPUS = 1
assert_and_infer_cfg()
model = infer_engine.initialize_model_from_cfg()
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
if os.path.isdir(args.im_or_folder):
im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
else:
im_list = [args.im_or_folder]
for i, im_name in enumerate(im_list):
out_name = os.path.join(
args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
)
logger.info('Processing {} -> {}'.format(im_name, out_name))
im = cv2.imread(im_name)
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
for k, v in timers.items():
logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if i == 0:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2
)
示例6: main
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def main(args):
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
cfg_orig = yaml.load(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
workspace.ResetWorkspace()
else:
proposal_boxes = None
cls_boxes, cls_segms, cls_keyps = None, None, None
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
cfg.TEST.WEIGHTS = pkl
cfg.NUM_GPUS = 1
assert_and_infer_cfg()
model = model_engine.initialize_model_from_cfg()
with c2_utils.NamedCudaScope(0):
cls_boxes_, cls_segms_, cls_keyps_ = \
model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
vis_utils.vis_one_image(
im[:, :, ::-1],
args.im_file,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2
)
示例7: vis
# 需要导入模块: from utils import vis [as 别名]
# 或者: from utils.vis import vis_one_image [as 别名]
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
ds = JsonDataset(dataset)
roidb = ds.get_roidb()
with open(detections_pkl, 'r') as f:
dets = pickle.load(f)
all_boxes = dets['all_boxes']
if 'all_segms' in dets:
all_segms = dets['all_segms']
else:
all_segms = None
if 'all_keyps' in dets:
all_keyps = dets['all_keyps']
else:
all_keyps = None
def id_or_index(ix, val):
if len(val) == 0:
return val
else:
return val[ix]
for ix, entry in enumerate(roidb):
if limit > 0 and ix >= limit:
break
if ix % 10 == 0:
print('{:d}/{:d}'.format(ix + 1, len(roidb)))
im = cv2.imread(entry['image'])
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
cls_boxes_i = [
id_or_index(ix, all_boxes[j]) for j in range(len(all_boxes))
]
if all_segms is not None:
cls_segms_i = [
id_or_index(ix, all_segms[j]) for j in range(len(all_segms))
]
else:
cls_segms_i = None
if all_keyps is not None:
cls_keyps_i = [
id_or_index(ix, all_keyps[j]) for j in range(len(all_keyps))
]
else:
cls_keyps_i = None
vis_utils.vis_one_image(
im[:, :, ::-1],
'{:d}_{:s}'.format(ix, im_name),
os.path.join(output_dir, 'vis'),
cls_boxes_i,
segms=cls_segms_i,
keypoints=cls_keyps_i,
thresh=thresh,
box_alpha=0.8,
dataset=ds,
show_class=True
)