本文整理汇总了Python中utils.augmentations.SSDAugmentation方法的典型用法代码示例。如果您正苦于以下问题:Python augmentations.SSDAugmentation方法的具体用法?Python augmentations.SSDAugmentation怎么用?Python augmentations.SSDAugmentation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.augmentations
的用法示例。
在下文中一共展示了augmentations.SSDAugmentation方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DatasetSync
# 需要导入模块: from utils import augmentations [as 别名]
# 或者: from utils.augmentations import SSDAugmentation [as 别名]
def DatasetSync(dataset='VOC',split='training'):
if dataset=='VOC':
#DataRoot=os.path.join(args.data_root,'VOCdevkit')
DataRoot=args.data_root
dataset = VOCDetection(DataRoot, train_sets, SSDAugmentation(
args.dim, means), AnnotationTransform())
elif dataset=='kitti':
DataRoot=os.path.join(args.data_root,'kitti')
dataset = KittiLoader(DataRoot, split=split,img_size=(1000,300),
transforms=SSDAugmentation((1000,300),means),
target_transform=AnnotationTransform_kitti())
return dataset
示例2: validate
# 需要导入模块: from utils import augmentations [as 别名]
# 或者: from utils.augmentations import SSDAugmentation [as 别名]
def validate(args, net, criterion, cfg):
validation_batch_size = 1
try:
# Turn off learning. Go to testing phase
net.eval()
dataset = GTDBDetection(args, args.validation_data, split='validate',
transform=SSDAugmentation(cfg['min_dim'], mean=MEANS))
data_loader = data.DataLoader(dataset, validation_batch_size,
num_workers=args.num_workers,
shuffle=False, collate_fn=detection_collate,
pin_memory=True)
total = len(dataset)
done = 0
loc_loss = 0
conf_loss = 0
start = time.time()
for batch_idx, (images, targets, ids) in enumerate(data_loader):
done = done + len(images)
logging.debug('processing {}/{}'.format(done, total))
if args.cuda:
images = images.cuda()
targets = [ann.cuda() for ann in targets]
else:
images = Variable(images)
targets = [Variable(ann, volatile=True) for ann in targets]
y = net(images) # forward pass
loss_l, loss_c = criterion(y, targets)
loc_loss += loss_l.item() # data[0]
conf_loss += loss_c.item() # data[0]
end = time.time()
logging.debug('Time taken for validation ' + str(datetime.timedelta(seconds=end - start)))
return (loc_loss + conf_loss) / (total/validation_batch_size)
except Exception as e:
logging.error("Could not validate", exc_info=True)
return 0