当前位置: 首页>>代码示例>>Python>>正文


Python amp.initialize方法代码示例

本文整理汇总了Python中apex.amp.initialize方法的典型用法代码示例。如果您正苦于以下问题:Python amp.initialize方法的具体用法?Python amp.initialize怎么用?Python amp.initialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在apex.amp的用法示例。


在下文中一共展示了amp.initialize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_gradient_accumulation_with_apex_amp

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def test_gradient_accumulation_with_apex_amp(self, mocker):
        desired_bs, accum_steps = 32, 4
        real_bs = desired_bs // accum_steps
        num_iter = 10
        task = mod_task.XORTask(batch_size=real_bs)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(
            task.train_loader, num_iter=num_iter, accumulation_steps=accum_steps
        )
        assert spy.call_count == accum_steps * num_iter 
开发者ID:davidtvs,项目名称:pytorch-lr-finder,代码行数:21,代码来源:test_lr_finder.py

示例2: test_mixed_precision

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def test_mixed_precision(self, mocker):
        batch_size = 32
        num_iter = 10
        task = mod_task.XORTask(batch_size=batch_size)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)
        assert hasattr(task.optimizer, "_amp_stash")

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(task.train_loader, num_iter=num_iter)
        # NOTE: Here we did not perform gradient accumulation, so that call count
        # of `amp.scale_loss` should equal to `num_iter`.
        assert spy.call_count == num_iter 
开发者ID:davidtvs,项目名称:pytorch-lr-finder,代码行数:21,代码来源:test_lr_finder.py

示例3: initialize

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def initialize(model, optimizers):
    """Initialize mixed precision

    Arguments:
        model {nn.Module} -- The model to convert
        optimizers -- The model

    Returns:
        [nn.Module, Optimizer] -- Converted model and optimizer
    """
    if is_mixed_precision():
        from apex import amp
        if optimizers is not None:
            model, optimizers = \
                amp.initialize(model, optimizers, opt_level=get_optim_level())
        else:
            model = amp.initialize(model, opt_level=get_optim_level())
    return model, optimizers 
开发者ID:Philip-Bachman,项目名称:amdim-public,代码行数:20,代码来源:mixed_precision.py

示例4: test_larc_mixed_precision

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def test_larc_mixed_precision(self):
        for opt_level in ["O0", "O1", "O2", "O3"]:
            model = MyModel(1)

            optimizer = LARC(
                torch.optim.SGD(
                    [{"params": model.parameters(), "lr": 0.25}], momentum=0.125
                )
            )

            model, optimizer = amp.initialize(
                model, optimizer, opt_level=opt_level, verbosity=0
            )

            optimizer.zero_grad()
            loss = model(self.x)
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step() 
开发者ID:NVIDIA,项目名称:apex,代码行数:21,代码来源:test_larc.py

示例5: predict

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def predict(args, model):
    """Entrypoint for predict mode"""

    test_loader = dataset.get_test_loader(args)
    train_loader, val_loader = dataset.get_train_val_loader(args, predict=True)

    if args.fp16:
        model = amp.initialize(model, opt_level='O1')

    logging.info('Starting prediction')

    output = {}
    for k, loader in [('test', test_loader),
                      ('val', val_loader)]:
        output[k] = {}
        res = infer(args, model, loader)

        for i, v in res.items():
            d = loader.dataset.data[i]
            name = '{}_{}_{}'.format(d[0], d[1], d[2])
            if name not in output[k]:
                output[k][name] = []
            output[k][name].append(v)

    logging.info('Saving predictions to {}'.format(args.load + '.output' + args.pred_suffix))
    with open(args.load + '.output' + args.pred_suffix, 'wb') as file:
        pickle.dump(output, file) 
开发者ID:maciej-sypetkowski,项目名称:kaggle-rcic-1st,代码行数:29,代码来源:main.py

示例6: _init_amp

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def _init_amp(model, device, optimizer=None, use_amp=None):
    model = model.to(device)
    if use_amp and optimizer:
        if AMP_AVAILABLE:
            model, optimizer = amp.initialize(model, optimizer, opt_level=use_amp)
        else:
            logger.warning(f"Can't find AMP although you specificed to use amp with level {use_amp}. Will continue without AMP ...")

    return model, optimizer 
开发者ID:deepset-ai,项目名称:FARM,代码行数:11,代码来源:optimization.py

示例7: setup

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def setup(rank, world_size, offset=0):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = str(12355+offset)

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=world_size)

    # Explicitly setting seed to make sure that models created in two processes
    # start from same random weights and biases.
    torch.manual_seed(42) 
开发者ID:maxjiang93,项目名称:space_time_pde,代码行数:12,代码来源:train_ddp.py

示例8: prepare_for_training

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def prepare_for_training(args, model, checkpoint_state_dict, amp):
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)

    if amp:
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
        if checkpoint_state_dict:
            amp.load_state_dict(checkpoint_state_dict['amp'])

    if checkpoint_state_dict:
        optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
        model.load_state_dict(checkpoint_state_dict['model'])

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)

    return model, optimizer 
开发者ID:microsoft,项目名称:unilm,代码行数:30,代码来源:run_seq2seq.py

示例9: __init__

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def __init__(
        self, model, optimizer, criterion, metrics=None, callbacks=ConsoleLogger(), gradient_clip_val=0, accumulate_steps=1,
    ):
        super().__init__()

        if not hasattr(amp._amp_state, "opt_properties"):
            model_optimizer = amp.initialize(model, optimizer, enabled=False)
            model, optimizer = (model_optimizer, None) if optimizer is None else model_optimizer

        self.state = RunnerState(model=model, optimizer=optimizer, criterion=criterion, metrics=metrics,)
        self.callbacks = Callbacks(callbacks)
        self.callbacks.set_state(self.state)
        self.gradient_clip_val = gradient_clip_val
        self.accumulate_steps = accumulate_steps 
开发者ID:bonlime,项目名称:pytorch-tools,代码行数:16,代码来源:wrapper.py

示例10: load_checkpoint

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def load_checkpoint(self):
        try:
            map_location = "cuda:0" if torch.cuda.is_available() else "cpu"
            ckpt = load_checkpoint(self.checkpoint_dir,
                                   map_location=map_location)
            # Transition settings
            self.is_transitioning = ckpt["is_transitioning"]
            self.transition_step = ckpt["transition_step"]
            self.current_imsize = ckpt["current_imsize"]
            self.latest_switch = ckpt["latest_switch"]

            # Tracking stats
            self.global_step = ckpt["global_step"]
            self.start_time = time.time() - ckpt["total_time"] * 60
            self.num_skipped_steps = ckpt["num_skipped_steps"]

            # Models
            self.discriminator.load_state_dict(ckpt['D'])

            self.generator.load_state_dict(ckpt['G'])
            self.running_average_generator.load_state_dict(
                ckpt["running_average_generator"])
            to_cuda([self.generator, self.discriminator,
                     self.running_average_generator])
            self.running_average_generator = amp.initialize(self.running_average_generator,
                                                            None, opt_level=self.opt_level)
            self.init_optimizers()
            self.d_optimizer.load_state_dict(ckpt['d_optimizer'])
            self.g_optimizer.load_state_dict(ckpt['g_optimizer'])
            return True
        except FileNotFoundError as e:
            print(e)
            print(' [*] No checkpoint!')
            return False 
开发者ID:hukkelas,项目名称:DeepPrivacy,代码行数:36,代码来源:train.py

示例11: init_running_average_generator

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def init_running_average_generator(self):
        self.running_average_generator = Generator(self.pose_size,
                                                   self.start_channel_size,
                                                   self.image_channels)
        self.running_average_generator = wrap_models(
            self.running_average_generator)
        to_cuda(self.running_average_generator)
        self.running_average_generator = amp.initialize(self.running_average_generator,
                                                        None, opt_level=self.opt_level) 
开发者ID:hukkelas,项目名称:DeepPrivacy,代码行数:11,代码来源:train.py

示例12: extend_running_average_generator

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def extend_running_average_generator(self):
        g = self.running_average_generator
        g.extend()

        for avg_param, cur_param in zip(g.new_parameters(), self.generator.new_parameters()):
            assert avg_param.data.shape == cur_param.data.shape, "AVG param: {}, cur_param: {}".format(
                avg_param.shape, cur_param.shape)
            avg_param.data = cur_param.data
        to_cuda(g)
        self.running_average_generator = amp.initialize(
            self.running_average_generator, None, opt_level=self.opt_level) 
开发者ID:hukkelas,项目名称:DeepPrivacy,代码行数:13,代码来源:train.py

示例13: initialize_amp

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def initialize_amp(self):
        to_cuda([self.generator, self.discriminator])
        [self.generator, self.discriminator], [self.g_optimizer, self.d_optimizer] = amp.initialize(
            [self.generator, self.discriminator],
            [self.g_optimizer, self.d_optimizer],
            opt_level=self.opt_level,
            num_losses=4) 
开发者ID:hukkelas,项目名称:DeepPrivacy,代码行数:9,代码来源:train.py

示例14: _try_setup_apex

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def _try_setup_apex(self):
        """Sets up the model for fp16 training via apex if available."""
        if self.use_fp16 and amp:
            self.models, self.optimizers = amp.initialize(
                self.models, self.optimizers, **self.apex_args) 
开发者ID:ray-project,项目名称:ray,代码行数:7,代码来源:torch_runner.py

示例15: main

# 需要导入模块: from apex import amp [as 别名]
# 或者: from apex.amp import initialize [as 别名]
def main(mel_files, waveglow_path, sigma, output_dir, sampling_rate, is_fp16,
         denoiser_strength):
    mel_files = files_to_list(mel_files)
    waveglow = torch.load(waveglow_path)['model']
    waveglow = waveglow.remove_weightnorm(waveglow)
    waveglow.cuda().eval()
    if is_fp16:
        from apex import amp
        waveglow, _ = amp.initialize(waveglow, [], opt_level="O3")

    if denoiser_strength > 0:
        denoiser = Denoiser(waveglow).cuda()

    for i, file_path in enumerate(mel_files):
        file_name = os.path.splitext(os.path.basename(file_path))[0]
        mel = torch.load(file_path)
        mel = torch.autograd.Variable(mel.cuda())
        mel = torch.unsqueeze(mel, 0)
        mel = mel.half() if is_fp16 else mel
        with torch.no_grad():
            audio = waveglow.infer(mel, sigma=sigma)
            if denoiser_strength > 0:
                audio = denoiser(audio, denoiser_strength)
            audio = audio * MAX_WAV_VALUE
        audio = audio.squeeze()
        audio = audio.cpu().numpy()
        audio = audio.astype('int16')
        audio_path = os.path.join(
            output_dir, "{}_synthesis.wav".format(file_name))
        write(audio_path, sampling_rate, audio)
        print(audio_path) 
开发者ID:NVIDIA,项目名称:waveglow,代码行数:33,代码来源:inference.py


注:本文中的apex.amp.initialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。