当前位置: 首页>>代码示例>>Python>>正文


Python datasets.create方法代码示例

本文整理汇总了Python中reid.datasets.create方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.create方法的具体用法?Python datasets.create怎么用?Python datasets.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在reid.datasets的用法示例。


在下文中一共展示了datasets.create方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def main(args):
    cudnn.benchmark = True
    cudnn.enabled = True
    
    save_path = args.logs_dir
    sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.merge_percent)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))

    # get all unlabeled data for training
    dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
    new_train_data, cluster_id_labels = change_to_unlabel(dataset_all)

    num_train_ids = len(np.unique(np.array(cluster_id_labels)))
    nums_to_merge = int(num_train_ids * args.merge_percent)

    BuMain = Bottom_up(model_name=args.arch, batch_size=args.batch_size, 
            num_classes=num_train_ids,
            dataset=dataset_all,
            u_data=new_train_data, save_path=args.logs_dir, max_frames=args.max_frames,
            embeding_fea_size=args.fea)


    for step in range(int(1/args.merge_percent)-1):
        print('step: ',step)

        BuMain.train(new_train_data, step, loss=args.loss) 

        
        BuMain.evaluate(dataset_all.query, dataset_all.gallery)

        # get new train data for the next iteration
        print('----------------------------------------bottom-up clustering------------------------------------------------')
        cluster_id_labels, new_train_data = BuMain.get_new_train_data_v2(cluster_id_labels, nums_to_merge, step, penalty=args.size_penalty)
        print('\n\n') 
开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:35,代码来源:run.py

示例2: main

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    sys.stdout = Logger(osp.join(args.log_dir, 'log_test.txt'))

    args.num_classes = 1
    
    # Create data loaders
    dataset = {}
    dataset['dataset'] = datasets.create(args.name, args.data_dir)
    dataset['train_loader'], dataset['query_loader'], dataset['gallery_loader'] \
        = create_test_data_loader(args, args.name, dataset['dataset'])
       
    if args.evaluate:
        cls_params = None
        trainer = PCBTrainer(args, cls_params=cls_params)
        evaluator = trainer.test()
        scores = {}

        scores['cmc_scores'], scores['mAP'], q_f, g_f, _ = \
            evaluator.evaluate(args.name, dataset['query_loader'], dataset['gallery_loader'],
                               dataset['dataset'].query, dataset['dataset'].gallery, isevaluate=True)
        
        print('Cross Ddomain CMC Scores')
        print('Source\t Target\t Top1\t Top5\t Top10\t MAP')
        print('{}->{}: {:6.2%} {:6.2%} {:6.2%} ({:.2%})'.format(args.s_name, args.name,
                                                                scores['cmc_scores'][0],
                                                                scores['cmc_scores'][1],
                                                                scores['cmc_scores'][2],
                                                                scores['mAP']))

        ################## whether rerank test ############
        if args.rerank:
            rerankor = Rerankor()
            rerankor.rerank(q_f, g_f,
                               savepath=os.path.join(args.save_dir, 'rerank'),
                               save=False, isevaluate=True,
                               dataset=dataset['dataset']) 
开发者ID:zhangxinyu-xyz,项目名称:PAST-ReID,代码行数:43,代码来源:test.py

示例3: main

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def main(args):
    cudnn.benchmark = True
    cudnn.enabled = True
    
    save_path = args.logs_dir
    sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.merge_percent)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))

    # get all unlabeled data for training
    dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
    new_train_data, cluster_id_labels = change_to_unlabel(dataset_all)

    num_train_ids = len(np.unique(np.array(cluster_id_labels)))
    nums_to_merge = int(num_train_ids * args.merge_percent)

    BuMain = Bottom_up(model_name=args.arch, batch_size=args.batch_size, 
            num_classes=num_train_ids,
            dataset=dataset_all,
            u_data=new_train_data, save_path=args.logs_dir, max_frames=args.max_frames,
            embeding_fea_size=args.fea)


    for step in range(int(1/args.merge_percent)-1):
        print('step: ',step)

        BuMain.train(new_train_data, step, loss=args.loss) 

        BuMain.evaluate(dataset_all.query, dataset_all.gallery)

        # get new train data for the next iteration
        print('----------------------------------------bottom-up clustering------------------------------------------------')
        cluster_id_labels, new_train_data = BuMain.get_new_train_data(cluster_id_labels, nums_to_merge, size_penalty=args.size_penalty)
        print('\n\n') 
开发者ID:vana77,项目名称:Bottom-up-Clustering-Person-Re-identification,代码行数:34,代码来源:run.py

示例4: get_data

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def get_data(name, split_id, data_dir, height, width, batch_size, workers,
             combine_trainval):
    root = osp.join(data_dir, name)

    dataset = datasets.create(name, root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval if combine_trainval else dataset.train
    num_classes = (dataset.num_trainval_ids if combine_trainval
                   else dataset.num_train_ids)

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(height, width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=True, pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(dataset.val, root=dataset.images_dir,
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    test_loader = DataLoader(
        Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
                     root=dataset.images_dir, transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader 
开发者ID:Cysu,项目名称:open-reid,代码行数:47,代码来源:softmax_loss.py

示例5: get_data

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def get_data(name, split_id, data_dir, height, width, batch_size, num_instances,
             workers, combine_trainval):
    root = osp.join(data_dir, name)

    dataset = datasets.create(name, root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval if combine_trainval else dataset.train
    num_classes = (dataset.num_trainval_ids if combine_trainval
                   else dataset.num_train_ids)

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(height, width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer),
        batch_size=batch_size, num_workers=workers,
        sampler=RandomIdentitySampler(train_set, num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(dataset.val, root=dataset.images_dir,
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    test_loader = DataLoader(
        Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
                     root=dataset.images_dir, transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader 
开发者ID:Cysu,项目名称:open-reid,代码行数:48,代码来源:triplet_loss.py

示例6: get_data

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def get_data(name, split_id, data_dir, height, width, batch_size, num_instances,
             workers, combine_trainval):
    root = osp.join(data_dir, name)

    dataset = datasets.create(name, root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval if combine_trainval else dataset.train
    num_classes = (dataset.num_trainval_ids if combine_trainval
                   else dataset.num_train_ids)

    train_transformer = T.Compose([
        T.RectScale(height, width),
        T.RandomSizedEarser(),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(height, width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer),
        batch_size=batch_size, num_workers=workers,
        sampler=RandomMultipleGallerySampler(train_set, num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(dataset.val, root=dataset.images_dir,
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    test_loader = DataLoader(
        Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
                     root=dataset.images_dir, transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader 
开发者ID:YantaoShen,项目名称:kpm_rw_person_reid,代码行数:49,代码来源:main.py

示例7: get_data

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def get_data(name, split_id, data_dir, batch_size, num_instances, workers):
    root = osp.join(data_dir, name)

    dataset = datasets.create(name, root, split_id=split_id)
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval
    num_classes = dataset.num_trainval_ids

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(256, 128),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(256, 128),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer),
        batch_size=batch_size, num_workers=workers,
        sampler=RandomIdentitySampler(train_set, num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(dataset.val, root=dataset.images_dir,
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    test_loader = DataLoader(
        Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
                     root=dataset.images_dir, transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader 
开发者ID:zydou,项目名称:Deep-Person,代码行数:45,代码来源:deep.py

示例8: get_data

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def get_data(name, data_dir, height, width, batch_size, workers):
    root = osp.join(data_dir, name)
    root = data_dir
    dataset = datasets.create(name, root)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    num_classes = dataset.num_train_ids

    train_transformer = T.Compose([
        T.RectScale(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(height, width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(dataset.train, root=osp.join(dataset.images_dir,dataset.train_path),
                    transform=train_transformer,random_mask=False),
        batch_size=batch_size, num_workers=workers,
        shuffle=True, pin_memory=True, drop_last=True)

    query_loader = DataLoader(
        Preprocessor(dataset.query, root=osp.join(dataset.images_dir,dataset.query_path),
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)

    gallery_loader = DataLoader(
        Preprocessor(dataset.gallery, root=osp.join(dataset.images_dir,dataset.gallery_path),
                     transform=test_transformer),
        batch_size=batch_size, num_workers=workers,
        shuffle=False, pin_memory=True)


    return dataset, num_classes, train_loader, query_loader, gallery_loader 
开发者ID:HRanWang,项目名称:Spatial-Attention,代码行数:45,代码来源:main.py

示例9: main

# 需要导入模块: from reid import datasets [as 别名]
# 或者: from reid.datasets import create [as 别名]
def main(args):
    cudnn.benchmark = True
    cudnn.enabled = True
    save_path = args.logs_dir
    total_step = 100//args.EF + 1
    sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.EF)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))

    # get all the labeled and unlabeled data for training
    dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
    num_all_examples = len(dataset_all.train)
    l_data, u_data = get_one_shot_in_cam1(dataset_all, load_path="./examples/oneshot_{}_used_in_paper.pickle".format(dataset_all.name))
    
    resume_step, ckpt_file = -1, ''
    if args.resume:
        resume_step, ckpt_file = resume(args) 

    # initial the EUG algorithm 
    eug = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=dataset_all.num_train_ids, 
            data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=args.logs_dir, max_frames=args.max_frames)


    new_train_data = l_data 
    for step in range(total_step):
        # for resume
        if step < resume_step: 
            continue

        nums_to_select = min(int( len(u_data) * (step+1) * args.EF / 100 ),  len(u_data))
        print("This is running {} with EF={}%, step {}:\t Nums_to_be_select {}, \t Logs-dir {}".format(
                args.mode, args.EF, step,  nums_to_select, save_path))

        # train the model or load ckpt        
        eug.train(new_train_data, step, epochs=70, step_size=55, init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)

        # pseudo-label and confidence score
        pred_y, pred_score = eug.estimate_label()

        # select data
        selected_idx = eug.select_top_data(pred_score, nums_to_select)

        # add new data
        new_train_data = eug.generate_new_train_data(selected_idx, pred_y)

        # evluate
        eug.evaluate(dataset_all.query, dataset_all.gallery) 
开发者ID:Yu-Wu,项目名称:Exploit-Unknown-Gradually,代码行数:47,代码来源:run.py


注:本文中的reid.datasets.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。