本文整理汇总了Python中datasets.dataset_catalog.DATASETS属性的典型用法代码示例。如果您正苦于以下问题:Python dataset_catalog.DATASETS属性的具体用法?Python dataset_catalog.DATASETS怎么用?Python dataset_catalog.DATASETS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类datasets.dataset_catalog
的用法示例。
在下文中一共展示了dataset_catalog.DATASETS属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: voc_info
# 需要导入模块: from datasets import dataset_catalog [as 别名]
# 或者: from datasets.dataset_catalog import DATASETS [as 别名]
def voc_info(json_dataset):
year = json_dataset.name[4:8]
image_set = json_dataset.name[9:]
devkit_path = DATASETS[json_dataset.name][DEVKIT_DIR]
assert os.path.exists(devkit_path), \
'Devkit directory {} not found'.format(devkit_path)
anno_path = os.path.join(
devkit_path, 'VOC' + year, 'Annotations', '{:s}.xml')
image_set_path = os.path.join(
devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt')
return dict(
year=year,
image_set=image_set,
devkit_path=devkit_path,
anno_path=anno_path,
image_set_path=image_set_path)
示例2: convert
# 需要导入模块: from datasets import dataset_catalog [as 别名]
# 或者: from datasets.dataset_catalog import DATASETS [as 别名]
def convert(json_file, output_dir):
print('Reading: {}'.format(json_file))
with open(json_file, 'r') as fid:
dt = json.load(fid)
print('done!')
test_image_info = DATASETS['coco_2017_test'][ANN_FN]
with open(test_image_info, 'r') as fid:
info_test = json.load(fid)
image_test = info_test['images']
image_test_id = [i['id'] for i in image_test]
print('{} has {} images'.format(test_image_info, len(image_test_id)))
test_dev_image_info = DATASETS['coco_2017_test-dev'][ANN_FN]
with open(test_dev_image_info, 'r') as fid:
info_testdev = json.load(fid)
image_testdev = info_testdev['images']
image_testdev_id = [i['id'] for i in image_testdev]
print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))
dt_testdev = []
print('Filtering test-dev from test...')
t = Timer()
t.tic()
for i in range(len(dt)):
if i % 1000 == 0:
print('{}/{}'.format(i, len(dt)))
if dt[i]['image_id'] in image_testdev_id:
dt_testdev.append(dt[i])
print('Done filtering ({:2}s)!'.format(t.toc()))
filename, file_extension = os.path.splitext(os.path.basename(json_file))
filename = filename + '_test-dev'
filename = os.path.join(output_dir, filename + file_extension)
with open(filename, 'w') as fid:
info_test = json.dump(dt_testdev, fid)
print('Done writing: {}!'.format(filename))
示例3: evaluate_masks
# 需要导入模块: from datasets import dataset_catalog [as 别名]
# 或者: from datasets.dataset_catalog import DATASETS [as 别名]
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
if cfg.CLUSTER.ON_CLUSTER:
# On the cluster avoid saving these files in the job directory
output_dir = '/tmp'
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results_dir = os.path.join(output_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
os.environ['CITYSCAPES_DATASET'] = DATASETS[json_dataset.name][RAW_DIR]
os.environ['CITYSCAPES_RESULTS'] = output_dir
# Load the Cityscapes eval script *after* setting the required env vars,
# since the script reads their values into global variables (at load time).
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \
as cityscapes_eval
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
im_name = entry['image']
basename = os.path.splitext(os.path.basename(im_name))[0]
txtname = os.path.join(output_dir, basename + 'pred.txt')
with open(txtname, 'w') as fid_txt:
if i % 10 == 0:
logger.info('i: {}: {}'.format(i, basename))
for j in range(1, len(all_segms)):
clss = json_dataset.classes[j]
clss_id = cityscapes_eval.name2label[clss].id
segms = all_segms[j][i]
boxes = all_boxes[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
pngname = os.path.join(
'results',
basename + '_' + clss + '_{}.png'.format(k))
# write txt
fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score))
# save mask
cv2.imwrite(os.path.join(output_dir, pngname), mask * 255)
logger.info('Evaluating...')
cityscapes_eval.main([])
return None
示例4: eval_json
# 需要导入模块: from datasets import dataset_catalog [as 别名]
# 或者: from datasets.dataset_catalog import DATASETS [as 别名]
def eval_json(det_json,gt_json):
json_dataset = JsonDataset(gt_dataset_name)
gt_json = dataset_catalog.DATASETS[gt_dataset_name]['annotation_file']
with open(det_json,'rb') as f:
det = json.load(f)
f.close()
with open(gt_json,'rb') as f:
gt = json.load(f)
f.close()
# convert det to the all_boxes list
num_images = len(gt['images'])
num_classes = 2
print('Total number of images:',len(det['images']))
all_boxes, all_segms, all_keyps = empty_results(num_classes,num_images)
for cls in range(num_classes):
for image in range(num_images):
filename = gt['images'][image]['file_name']
fid = gt['images'][image]['id']
img_prop = get_by_filename(det,filename)
if not (img_prop is None):
img_id,det_prop = img_prop
boxes = get_boxes_by_img_id(det,img_id)
if image%100 == 0:
print('Reading detections for:',filename,'--',det_prop['file_name'])
print('Det json:',det_json)
if 'score' in boxes[0]:
boxes = np.array([b['bbox']+[b['score']] for b in boxes])
else:
boxes = np.array([b['bbox'] for b in boxes])
if len(boxes) > 0:
# add w, h to get (x2,y2)
boxes[:,2] += boxes[:,0]
boxes[:,3] += boxes[:,1]
all_boxes[cls][image] = boxes
else:
all_boxes[cls][image] = []
# save detections
with open(os.path.join(output_dir,'detections.pkl'),'wb') as f:
pickle.dump(dict(all_boxes=all_boxes,all_segms=all_segms,all_keyps=all_keyps),f)
f.close()
#input(len(all_boxes[0]))
coco_eval = evaluate_boxes(json_dataset,all_boxes,output_dir)
#coco_eval = task_evaluation.evaluate_all(json_dataset,all_boxes,all_segms,all_keyps,output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.5, output_dir=output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.75, iou_high=0.75, output_dir=output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=output_dir)