当前位置: 首页>>代码示例>>Python>>正文


Python torch.DistributedOptimizer方法代码示例

本文整理汇总了Python中horovod.torch.DistributedOptimizer方法的典型用法代码示例。如果您正苦于以下问题:Python torch.DistributedOptimizer方法的具体用法?Python torch.DistributedOptimizer怎么用?Python torch.DistributedOptimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在horovod.torch的用法示例。


在下文中一共展示了torch.DistributedOptimizer方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_torch

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def build_torch(self, model):
        import torch
        lookup = {
            'sgd':      torch.optim.SGD,
            'adadelta': torch.optim.Adadelta,
            'rmsprop':  torch.optim.RMSprop,
            'adam':     torch.optim.Adam
            }
        if self.name not in lookup:
            logging.warning("No optimizer '{}' found, using SGD instead".format(self.name))
            self.name = 'sgd'
        opt = lookup[self.name](model.parameters(), **self.config)
        if self.horovod_wrapper:
            import horovod.torch as hvd
            opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
        return opt 
开发者ID:vlimant,项目名称:mpi_learn,代码行数:18,代码来源:optimizer.py

示例2: train_main

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def train_main():
    model = Net().to(device)
    # optimizer = optim.SGD(model.parameters(), lr=0.05)

    if hvd.rank() == 0:
        print(model)

    # Horovod: broadcast parameters.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)

    # Horovod: scale learning rate by the number of GPUs.
    lr = 0.05
    optimizer = optim.SGD(model.parameters(), lr=lr * hvd.size())

    # Horovod: wrap optimizer with DistributedOptimizer.
    optimizer = hvd.DistributedOptimizer(optimizer,
                                         named_parameters=model.named_parameters())
    criterion = nn.BCELoss()

    batch_size = 25
    train_loader, train_sampler = get_train_loader(batch_size)
    validation_loader, validation_sampler = get_validation_loader(batch_size)

    log = get_tensorboard('simple_hvd')
    epochs = 50

    start_time = datetime.now()
    for epoch in range(1, epochs + 1):
        train(model, train_loader, train_sampler, criterion, optimizer, epoch, log)

        with torch.no_grad():
            if hvd.rank() == 0:
                print('\nValidation:')
            evaluate(model, validation_loader, validation_sampler, criterion, epoch, log)

    end_time = datetime.now()

    if hvd.rank() == 0:
        print('Total training time: {}.'.format(end_time - start_time))
        torch.save(model.state_dict(), model_file)
        print('Wrote model to', model_file) 
开发者ID:csc-training,项目名称:intro-to-dl,代码行数:43,代码来源:pytorch_dvc_cnn_simple_hvd.py

示例3: fit

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def fit(self, module):

        # Prepare module for training
        module.trainer = self
        # Update and print module configuration
        prep_logger_and_checkpoint(module)
        print_config(module.config)

        # Send module to GPU
        module = module.to('cuda')
        # Configure optimizer and scheduler
        module.configure_optimizers()

        # Create distributed optimizer
        compression = hvd.Compression.none
        optimizer = hvd.DistributedOptimizer(module.optimizer,
            named_parameters=module.named_parameters(), compression=compression)
        scheduler = module.scheduler

        # Get train and val dataloaders
        train_dataloader = module.train_dataloader()
        val_dataloaders = module.val_dataloader()

        # Epoch loop
        for epoch in range(module.current_epoch, self.max_epochs):
            # Train
            self.train(train_dataloader, module, optimizer)
            # Validation
            validation_output = self.validate(val_dataloaders, module)
            # Check and save model
            self.check_and_save(module, validation_output)
            # Update current epoch
            module.current_epoch += 1
            # Take a scheduler step
            scheduler.step() 
开发者ID:TRI-ML,项目名称:packnet-sfm,代码行数:37,代码来源:horovod_trainer.py

示例4: _init_optimizer

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def _init_optimizer(self):
        optimizer = optim.SGD(
                self.model.parameters(),
                lr=(self.base_lr * self.batches_per_allreduce * hvd.size()),
                momentum=self.momentum, weight_decay=self.wd)

        steps_per_batch = self.attack_backward_steps + 1 + 100000000
        if self.attack_loss == 'avg':
            steps_per_batch += 1
        self.optimizer = hvd.DistributedOptimizer(
            optimizer, named_parameters=self.model.named_parameters(),
            compression=self.compression,
            backward_passes_per_step=steps_per_batch * self.batches_per_allreduce) 
开发者ID:ddkang,项目名称:advex-uar,代码行数:15,代码来源:trainer.py

示例5: build

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def build(self):
        from keras.optimizers import deserialize
        opt_config = {'class_name': self.name, 'config': self.config}
        opt = deserialize(opt_config)
        if self.horovod_wrapper:
            import horovod.keras as hvd
            if hasattr(opt, 'lr'):
                opt.lr *= hvd.size()
            opt = hvd.DistributedOptimizer(opt)
        return opt 
开发者ID:vlimant,项目名称:mpi_learn,代码行数:12,代码来源:optimizer.py

示例6: __init__

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def __init__(self, model, params, update_lr_stepwise=False, parallel_mode='dp'):

        self.params = params
        self.model = model
        self.update_lr_stepwise = update_lr_stepwise
        self.parralle_mode = parallel_mode

        self.lr = self.params['lr']
        self.global_step = 1
        self.global_epoch = 0

        if params['optimizer'] == 'adam':
            self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=self.lr,
                                              betas=(0.9, 0.98), eps=1e-9)
        elif params['optimizer'] == 'sgd':
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, model.parameters()), lr=self.lr, momentum=0.9)
        elif params['optimizer'] == 'adadelate':
            self.optimizer = torch.optim.Adadelta(
                filter(lambda p: p.requires_grad, model.parameters()))
        else:
            raise NotImplementedError

        if self.parralle_mode == 'hvd':
            import horovod.torch as hvd
            self.optimizer = hvd.DistributedOptimizer(self.optimizer, named_parameters=model.named_parameters()) 
开发者ID:ZhengkunTian,项目名称:OpenTransformer,代码行数:28,代码来源:optim.py

示例7: setup_horovod

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def setup_horovod(model, learning_rate):
    """ Setup for Horovod usage.

    Args:
        model(MultitaskModel): The MultitaskModel object.
        learning_rate(float): Learning rate for the model.

    Returns: hvd.DistributedOptimizer: Optimizer to use for computing
    gradients and applying updates.

    """
    # Horovod: scale learning rate by the number of GPUs.
    optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())

    # Horovod: broadcast parameters & optimizer state.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)
    hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    # Horovod: (optional) compression algorithm.
    compression = hvd.Compression.fp16

    # Horovod: wrap optimizer with DistributedOptimizer.
    optimizer = hvd.DistributedOptimizer(
        optimizer,
        named_parameters=model.named_parameters(),
        compression=compression,
    )

    return optimizer 
开发者ID:microsoft,项目名称:nlp-recipes,代码行数:31,代码来源:gensen_train.py

示例8: horovod_train

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def horovod_train(self, model):
        # call setup after the ddp process has connected
        self.setup('fit')
        if self.is_function_implemented('setup', model):
            model.setup('fit')

        if torch.cuda.is_available() and self.on_gpu:
            # Horovod: pin GPU to local rank
            assert self.root_gpu == hvd.local_rank()
            torch.cuda.set_device(self.root_gpu)
            model.cuda(self.root_gpu)

        # avoid duplicating progress bar
        if hvd.rank() != 0 and self.progress_bar_callback is not None:
            self.progress_bar_callback.disable()

        # CHOOSE OPTIMIZER
        # allow for lr schedulers as well
        self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)

        # Horovod: scale the learning rate by the number of workers to account for
        # increased total batch size
        for optimizer in self.optimizers:
            for param_group in optimizer.param_groups:
                param_group['lr'] *= hvd.size()

        if self.use_amp:
            # An example
            model, optimizers = model.configure_apex(amp, model, self.optimizers, self.amp_level)
            self.optimizers = optimizers
            self.reinit_scheduler_properties(self.optimizers, self.lr_schedulers)

        # Horovod: broadcast parameters & optimizer state to ensure consistent initialization
        hvd.broadcast_parameters(model.state_dict(), root_rank=0)
        for optimizer in self.optimizers:
            hvd.broadcast_optimizer_state(optimizer, root_rank=0)

        def filter_named_parameters(model, optimizer):
            opt_params = set([p for group in optimizer.param_groups for p in group.get('params', [])])
            return [(name, p) for name, p in model.named_parameters() if p in opt_params]

        # Horovod: wrap optimizers to perform gradient aggregation via allreduce
        self.optimizers = [
            hvd.DistributedOptimizer(optimizer, named_parameters=filter_named_parameters(model, optimizer))
            for optimizer in self.optimizers
        ]

        # Update logger rank info from Horovod to avoid race conditions from  different ranks
        # creating directories / writing files in the same locations.
        self.global_rank = hvd.rank()
        rank_zero_only.rank = self.global_rank

        with ExitStack() as stack:
            for optimizer in self.optimizers:
                # Synchronization will be performed explicitly following backward()
                stack.enter_context(optimizer.skip_synchronize())

            self.run_pretrain_routine(model)

        # Make sure all workers have finished training before returning to the user
        hvd.join() 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:63,代码来源:distrib_parts.py

示例9: create_optimizer

# 需要导入模块: from horovod import torch [as 别名]
# 或者: from horovod.torch import DistributedOptimizer [as 别名]
def create_optimizer(
        self,
        num_train_optimization_steps,
        lr=2e-5,
        fp16_allreduce=False,
        warmup_proportion=None,
    ):

        """
        Method to create an BERT Optimizer based on the inputs from the user.

        Args:
            num_train_optimization_steps(int): Number of optimization steps.
            lr (float): learning rate of the adam optimizer. defaults to 2e-5.
            warmup_proportion (float, optional): proportion of training to
                perform linear learning rate warmup for. e.g., 0.1 = 10% of
                training. defaults to none.
            fp16_allreduce(bool, optional)L if true, use fp16 compression
                during allreduce.

        Returns:
            pytorch_pretrained_bert.optimization.BertAdam  : A BertAdam optimizer with
                user specified config.

        """
        if self.use_distributed:
            lr = lr * hvd.size()

        if warmup_proportion is None:
            optimizer = BertAdam(self.optimizer_params, lr=lr)
        else:
            optimizer = BertAdam(
                self.optimizer_params,
                lr=lr,
                t_total=num_train_optimization_steps,
                warmup=warmup_proportion,
            )

        if self.use_distributed:
            compression = (
                hvd.Compression.fp16 if fp16_allreduce else hvd.Compression.none
            )
            optimizer = hvd.DistributedOptimizer(
                optimizer,
                named_parameters=self.model.named_parameters(),
                compression=compression,
            )

        return optimizer 
开发者ID:microsoft,项目名称:nlp-recipes,代码行数:51,代码来源:sequence_classification_distributed.py


注:本文中的horovod.torch.DistributedOptimizer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。