本文整理汇总了Python中data.AnnotationTransform方法的典型用法代码示例。如果您正苦于以下问题:Python data.AnnotationTransform方法的具体用法?Python data.AnnotationTransform怎么用?Python data.AnnotationTransform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类data
的用法示例。
在下文中一共展示了data.AnnotationTransform方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DatasetSync
# 需要导入模块: import data [as 别名]
# 或者: from data import AnnotationTransform [as 别名]
def DatasetSync(dataset='VOC',split='training'):
if dataset=='VOC':
#DataRoot=os.path.join(args.data_root,'VOCdevkit')
DataRoot=args.data_root
dataset = VOCDetection(DataRoot, train_sets, SSDAugmentation(
args.dim, means), AnnotationTransform())
elif dataset=='kitti':
DataRoot=os.path.join(args.data_root,'kitti')
dataset = KittiLoader(DataRoot, split=split,img_size=(1000,300),
transforms=SSDAugmentation((1000,300),means),
target_transform=AnnotationTransform_kitti())
return dataset
示例2: train
# 需要导入模块: import data [as 别名]
# 或者: from data import AnnotationTransform [as 别名]
def train():
net.train()
epoch = 0 + args.resume_epoch
print('Loading Dataset...')
dataset = VOCDetection(args.training_dataset, preproc(img_dim, rgb_means), AnnotationTransform())
epoch_size = math.ceil(len(dataset) / args.batch_size)
max_iter = args.max_epoch * epoch_size
stepvalues = (200 * epoch_size, 250 * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
# create batch iterator
batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
torch.save(net.state_dict(), args.save_folder + 'HandBoxes_epoch_' + repr(epoch) + '.pth')
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
if gpu_train:
images = Variable(images.cuda())
targets = [Variable(anno.cuda()) for anno in targets]
else:
images = Variable(images)
targets = [Variable(anno) for anno in targets]
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, priors, targets)
loss = cfg['loc_weight'] * loss_l + loss_c
loss.backward()
optimizer.step()
load_t1 = time.time()
print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
'|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (cfg['loc_weight']*loss_l.item(), loss_c.item()) +
'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
torch.save(net.state_dict(), args.save_folder + 'Final_HandBoxes.pth')
示例3: train
# 需要导入模块: import data [as 别名]
# 或者: from data import AnnotationTransform [as 别名]
def train():
net.train()
epoch = 0 + args.resume_epoch
print('Loading Dataset...')
dataset = VOCDetection(args.training_dataset, preproc_s3fd(img_dim, rgb_means, cfg['max_expand_ratio']), AnnotationTransform())
epoch_size = math.ceil(len(dataset) / args.batch_size)
max_iter = args.max_epoch * epoch_size
stepvalues = (200 * epoch_size, 250 * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
# create batch iterator
batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate, pin_memory=True))
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
torch.save(net.state_dict(), args.save_folder + 'S3FD_epoch_' + repr(epoch) + '.pth')
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
if args.cuda:
images = Variable(images.cuda())
targets = [Variable(anno.cuda()) for anno in targets]
else:
images = Variable(images)
targets = [Variable(anno) for anno in targets]
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, priors, targets)
loss = loss_l + cfg['conf_weight'] * loss_c
loss.backward()
optimizer.step()
load_t1 = time.time()
print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
'|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (loss_l.item(), cfg['conf_weight'] * loss_c.item()) +
'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
if writer is not None:
writer.add_scalar('train/loss_l', loss_l.item(), iteration)
writer.add_scalar('train/loss_c', cfg['conf_weight'] * loss_c.item(), iteration)
writer.add_scalar('train/lr', lr, iteration)
torch.save(net.state_dict(), args.save_folder + 'Final_S3FD.pth')
示例4: train
# 需要导入模块: import data [as 别名]
# 或者: from data import AnnotationTransform [as 别名]
def train():
net.train()
epoch = 0 + args.resume_epoch
print('Loading Dataset...')
dataset = VOCDetection(training_dataset, preproc(img_dim, rgb_mean), AnnotationTransform())
epoch_size = math.ceil(len(dataset) / batch_size)
max_iter = max_epoch * epoch_size
stepvalues = (200 * epoch_size, 250 * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
# create batch iterator
batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
torch.save(net.state_dict(), save_folder + 'FaceBoxes_epoch_' + str(epoch) + '.pth')
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
images = images.to(device)
targets = [anno.to(device) for anno in targets]
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, priors, targets)
loss = cfg['loc_weight'] * loss_l + loss_c
loss.backward()
optimizer.step()
load_t1 = time.time()
batch_time = load_t1 - load_t0
eta = int(batch_time * (max_iter - iteration))
print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || L: {:.4f} C: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'.format(epoch, max_epoch, (iteration % epoch_size) + 1, epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
torch.save(net.state_dict(), save_folder + 'Final_FaceBoxes.pth')