當前位置: 首頁>>代碼示例>>Python>>正文


Python data.VOCDetection方法代碼示例

本文整理匯總了Python中data.VOCDetection方法的典型用法代碼示例。如果您正苦於以下問題:Python data.VOCDetection方法的具體用法?Python data.VOCDetection怎麽用?Python data.VOCDetection使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在data的用法示例。


在下文中一共展示了data.VOCDetection方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_voc

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def test_voc():
    # load net
    num_classes = len(VOC_CLASSES) + 1 # +1 background
    net = build_ssd('test', 300, num_classes) # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder, net, args.cuda, testset,
             BaseTransform(net.size, (104, 117, 123)),
             thresh=args.visual_threshold) 
開發者ID:soo89,項目名稱:CSD-SSD,代碼行數:18,代碼來源:test.py

示例2: test_voc

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def test_voc():
    # load net
    num_classes = len(VOC_CLASSES) + 1 # +1 background
    net = build_ssd('test',args.model, 300, num_classes) # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder, net, args.cuda, testset,
             BaseTransform(net.size, (104, 117, 123)),
             thresh=args.visual_threshold) 
開發者ID:yczhang1017,項目名稱:SSD_resnet_pytorch,代碼行數:18,代碼來源:test.py

示例3: get_dataloader

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def get_dataloader(cfg, dataset, setname='train_sets'):
    _preproc = preproc(cfg.model.input_size, cfg.model.rgb_means, cfg.model.p)
    Dataloader_function = {'VOC': VOCDetection, 'COCO':COCODetection}
    _Dataloader_function = Dataloader_function[dataset]
    if setname == 'train_sets':
        dataset = _Dataloader_function(cfg.COCOroot if dataset == 'COCO' else cfg.VOCroot,
                                   getattr(cfg.dataset, dataset)[setname], _preproc)
    else:
        dataset = _Dataloader_function(cfg.COCOroot if dataset == 'COCO' else cfg.VOCroot,
                                   getattr(cfg.dataset, dataset)[setname], None)
    return dataset 
開發者ID:qijiezhao,項目名稱:M2Det,代碼行數:13,代碼來源:core.py

示例4: DatasetSync

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def DatasetSync(dataset='VOC',split='training'):


    if dataset=='VOC':
        #DataRoot=os.path.join(args.data_root,'VOCdevkit')
        DataRoot=args.data_root
        dataset = VOCDetection(DataRoot, train_sets, SSDAugmentation(
        args.dim, means), AnnotationTransform())
    elif dataset=='kitti':
        DataRoot=os.path.join(args.data_root,'kitti')
        dataset = KittiLoader(DataRoot, split=split,img_size=(1000,300),
                  transforms=SSDAugmentation((1000,300),means),
                  target_transform=AnnotationTransform_kitti())
    return dataset 
開發者ID:qijiezhao,項目名稱:pytorch-ssd,代碼行數:16,代碼來源:train.py

示例5: train

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(args.training_dataset, preproc(img_dim, rgb_means), AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / args.batch_size)
    max_iter = args.max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
                torch.save(net.state_dict(), args.save_folder + 'HandBoxes_epoch_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        if gpu_train:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]

        # forward
        out = net(images)
        
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
              '|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (cfg['loc_weight']*loss_l.item(), loss_c.item()) +
              'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

    torch.save(net.state_dict(), args.save_folder + 'Final_HandBoxes.pth') 
開發者ID:zllrunning,項目名稱:hand-detection.PyTorch,代碼行數:57,代碼來源:train.py

示例6: train

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(args.training_dataset, preproc_s3fd(img_dim, rgb_means, cfg['max_expand_ratio']), AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / args.batch_size)
    max_iter = args.max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate, pin_memory=True))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
                torch.save(net.state_dict(), args.save_folder + 'S3FD_epoch_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]

        # forward
        out = net(images)
        
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + cfg['conf_weight'] * loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
              '|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (loss_l.item(), cfg['conf_weight'] * loss_c.item()) +
              'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
        if writer is not None:
            writer.add_scalar('train/loss_l', loss_l.item(), iteration)
            writer.add_scalar('train/loss_c', cfg['conf_weight'] * loss_c.item(), iteration)
            writer.add_scalar('train/lr', lr, iteration)

    torch.save(net.state_dict(), args.save_folder + 'Final_S3FD.pth') 
開發者ID:luuuyi,項目名稱:S3FD.PyTorch,代碼行數:61,代碼來源:train_s3fd.py

示例7: train

# 需要導入模塊: import data [as 別名]
# 或者: from data import VOCDetection [as 別名]
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(training_dataset, preproc(img_dim, rgb_mean), AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / batch_size)
    max_iter = max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
                torch.save(net.state_dict(), save_folder + 'FaceBoxes_epoch_' + str(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        images = images.to(device)
        targets = [anno.to(device) for anno in targets]

        # forward
        out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        batch_time = load_t1 - load_t0
        eta = int(batch_time * (max_iter - iteration))
        print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || L: {:.4f} C: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'.format(epoch, max_epoch, (iteration % epoch_size) + 1, epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))

    torch.save(net.state_dict(), save_folder + 'Final_FaceBoxes.pth') 
開發者ID:zisianw,項目名稱:FaceBoxes.PyTorch,代碼行數:53,代碼來源:train.py


注:本文中的data.VOCDetection方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。