当前位置: 首页>>代码示例>>Python>>正文


Python parallel.MMDataParallel方法代码示例

本文整理汇总了Python中mmcv.parallel.MMDataParallel方法的典型用法代码示例。如果您正苦于以下问题:Python parallel.MMDataParallel方法的具体用法?Python parallel.MMDataParallel怎么用?Python parallel.MMDataParallel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mmcv.parallel的用法示例。


在下文中一共展示了parallel.MMDataParallel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:dingjiansw101,项目名称:AerialDetection,代码行数:26,代码来源:train.py

示例2: _non_dist_test

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_test(model, dataset, cfg, validate=False):
    model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
    model.eval()

    embeds = _process_embeds(dataset, model, cfg)

    metric = model.module.triplet_net.metric_branch

    # compatibility auc
    auc = dataset.test_compatibility(embeds, metric)

    # fill-in-blank accuracy
    acc = dataset.test_fitb(embeds, metric)

    print('Compat AUC: {:.2f} FITB: {:.1f}\n'.format(
        round(auc, 2), round(acc * 100, 1))) 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:18,代码来源:test_fashion_recommender.py

示例3: test_runner_with_parallel

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def test_runner_with_parallel():

    def batch_processor():
        pass

    model = MMDataParallel(OldStyleModel())
    _ = EpochBasedRunner(model, batch_processor, logger=logging.getLogger())

    model = MMDataParallel(Model())
    _ = EpochBasedRunner(model, logger=logging.getLogger())

    with pytest.raises(RuntimeError):
        # batch_processor and train_step() cannot be both set

        def batch_processor():
            pass

        model = MMDataParallel(Model())
        _ = EpochBasedRunner(
            model, batch_processor, logger=logging.getLogger()) 
开发者ID:open-mmlab,项目名称:mmcv,代码行数:22,代码来源:test_runner.py

示例4: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir,
                    cfg.log_level)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:chanyn,项目名称:Reasoning-RCNN,代码行数:25,代码来源:train.py

示例5: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, datasets, cfg, validate=False, logger=None):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False) for dataset in datasets
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    runner = NASRunner(model, batch_processor, None, cfg.work_dir, cfg.log_level, cfg=cfg, logger=logger)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer.weight_optim.optimizer_config,
                                    cfg.optimizer.arch_optim.optimizer_config, 
                                    cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs, cfg.arch_update_epoch) 
开发者ID:JaminFong,项目名称:FNA,代码行数:25,代码来源:fna_search_apis.py

示例6: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.videos_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir,
                    cfg.log_level)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:open-mmlab,项目名称:mmaction,代码行数:25,代码来源:train.py

示例7: _single_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _single_train(model, data_loaders, cfg):
    if cfg.gpus > 1:
        raise NotImplemented
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:yl-1993,项目名称:learn-to-cluster,代码行数:19,代码来源:train_lgcn.py

示例8: _single_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _single_train(model, data_loaders, cfg):
    if cfg.gpus > 1:
        raise NotImplemented
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model,
                    batch_processor,
                    optimizer,
                    cfg.work_dir,
                    cfg.log_level,
                    iter_size=cfg.iter_size)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:yl-1993,项目名称:learn-to-cluster,代码行数:23,代码来源:train_gcn_e.py

示例9: _single_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _single_train(model, data_loaders, batch_processor, cfg):
    if cfg.gpus > 1:
        raise NotImplemented
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model,
                    batch_processor,
                    optimizer,
                    cfg.work_dir,
                    cfg.log_level,
                    iter_size=cfg.iter_size)
    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:yl-1993,项目名称:learn-to-cluster,代码行数:23,代码来源:train.py

示例10: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(
        model, train_dataset, cfg,
        eval_dataset=None, vis_dataset=None, validate=False, logger=None
):
    # prepare data loaders
    data_loaders = [
        build_data_loader(
            train_dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(
        model, batch_processor, optimizer, cfg.work_dir, cfg.log_level, logger
    )
    logger.info("Register Optimizer Hook...")
    runner.register_training_hooks(
        cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config
    )
    logger.info("Register EmptyCache Hook...")
    runner.register_hook(
        EmptyCacheHook(before_epoch=True, after_iter=False, after_epoch=True),
        priority='VERY_LOW'
    )

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)

    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:38,代码来源:train.py

示例11: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)
    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=False)
    else:
        optimizer_config = cfg.optimizer_config
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:xvjiarui,项目名称:GCNet,代码行数:34,代码来源:train.py

示例12: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    # 返回dataloader的迭代器,采用pytorch的DataLoader方法封装数据集
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False)
    ]
    # put model on gpus 这里多GPU输入没用list而是迭代器,注意单GPU是range(0,1),遍历的时候只有0
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # build runner 
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)
    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=False)
    else:
        optimizer_config = cfg.optimizer_config

    # 注册钩子    
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)
    # 断点加载或文件加载数据
    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:ming71,项目名称:mmdetection-annotated,代码行数:37,代码来源:train.py

示例13: _non_dist_test

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_test(model, query_set, gallery_set, cfg, validate=False):
    model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
    model.eval()

    query_embeds = _process_embeds(query_set, model, cfg)
    gallery_embeds = _process_embeds(gallery_set, model, cfg)

    query_embeds_np = np.array(query_embeds)
    gallery_embeds_np = np.array(gallery_embeds)

    e = Evaluator(
        cfg.data.query.id_file,
        cfg.data.gallery.id_file,
        extract_feature=cfg.extract_feature)
    e.evaluate(query_embeds_np, gallery_embeds_np) 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:17,代码来源:test_retriever.py

示例14: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            len(cfg.gpus.train),
            dist=False)
    ]
    print('dataloader built')

    model = MMDataParallel(model, device_ids=cfg.gpus.train).cuda()
    print('model paralleled')

    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)

    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:29,代码来源:train_retriever.py

示例15: _non_dist_train

# 需要导入模块: from mmcv import parallel [as 别名]
# 或者: from mmcv.parallel import MMDataParallel [as 别名]
def _non_dist_train(model, dataset, cfg, validate=False):
    # prepare data loaders
    data_loaders = [
        build_dataloader(
            dataset,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            len(cfg.gpus.train),
            dist=False)
    ]
    print('dataloader built')

    # put model on gpus
    model = MMDataParallel(model, device_ids=cfg.gpus.train).cuda()
    print('model paralleled')

    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
                    cfg.log_level)

    runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:30,代码来源:train_predictor.py


注:本文中的mmcv.parallel.MMDataParallel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。