本文整理汇总了Python中gluoncv.data.VOCDetection方法的典型用法代码示例。如果您正苦于以下问题:Python data.VOCDetection方法的具体用法?Python data.VOCDetection怎么用?Python data.VOCDetection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gluoncv.data
的用法示例。
在下文中一共展示了data.VOCDetection方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
elif dataset.lower() == 'visualgenome':
train_dataset = VGObject(root=os.path.join('~', '.mxnet', 'datasets', 'visualgenome'),
splits='detections_train', use_crowd=False)
val_dataset = VGObject(root=os.path.join('~', '.mxnet', 'datasets', 'visualgenome'),
splits='detections_val', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.mixup:
from gluoncv.data.mixup import detection
train_dataset = detection.MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
示例2: test_pascal_voc_detection
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def test_pascal_voc_detection():
if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')):
return
train = data.VOCDetection(splits=((2007, 'trainval'), (2012, 'trainval')))
name = str(train)
val = data.VOCDetection(splits=((2007, 'test'), ))
name = str(val)
assert train.classes == val.classes
for _ in range(10):
index = np.random.randint(0, len(train))
_ = train[index]
for _ in range(10):
index = np.random.randint(0, len(val))
_ = val[index]
示例3: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_train2017')
val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape), post_affine=get_post_transform)
# coco validation is slow, consider increase the validation interval
if args.val_interval == 1:
args.val_interval = 10
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.num_samples < 0:
args.num_samples = len(train_dataset)
return train_dataset, val_dataset, val_metric
示例4: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_train2017')
val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
# coco validation is slow, consider increase the validation interval
if args.val_interval == 1:
args.val_interval = 10
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
示例5: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() in ['clipart', 'comic', 'watercolor']:
root = os.path.join('~', '.mxnet', 'datasets', dataset.lower())
train_dataset = gdata.CustomVOCDetection(root=root, splits=[('', 'train')],
generate_classes=True)
val_dataset = gdata.CustomVOCDetection(root=root, splits=[('', 'test')],
generate_classes=True)
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.mixup:
from gluoncv.data.mixup import detection
train_dataset = detection.MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
示例6: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017')
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
# coco validation is slow, consider increase the validation interval
if args.val_interval == 1:
args.val_interval = 10
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
示例7: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(args.data_shape, args.data_shape))
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.num_samples < 0:
args.num_samples = len(train_dataset)
if args.mixup:
from gluoncv.data import MixupDetection
train_dataset = MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
示例8: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, data_shape):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(data_shape, data_shape), post_affine=get_post_transform)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
示例9: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, data_shape):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, args.save_prefix + '_eval', cleanup=True,
data_shape=(data_shape, data_shape))
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
示例10: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
示例11: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017')
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
示例12: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.75, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
示例13: get_dataset
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import VOCDetection [as 别名]
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric