當前位置: 首頁>>代碼示例>>Python>>正文


Python amp.initialize方法代碼示例

本文整理匯總了Python中apex.amp.initialize方法的典型用法代碼示例。如果您正苦於以下問題:Python amp.initialize方法的具體用法?Python amp.initialize怎麽用?Python amp.initialize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在apex.amp的用法示例。


在下文中一共展示了amp.initialize方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_gradient_accumulation_with_apex_amp

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def test_gradient_accumulation_with_apex_amp(self, mocker):
        desired_bs, accum_steps = 32, 4
        real_bs = desired_bs // accum_steps
        num_iter = 10
        task = mod_task.XORTask(batch_size=real_bs)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(
            task.train_loader, num_iter=num_iter, accumulation_steps=accum_steps
        )
        assert spy.call_count == accum_steps * num_iter 
開發者ID:davidtvs,項目名稱:pytorch-lr-finder,代碼行數:21,代碼來源:test_lr_finder.py

示例2: test_mixed_precision

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def test_mixed_precision(self, mocker):
        batch_size = 32
        num_iter = 10
        task = mod_task.XORTask(batch_size=batch_size)

        # Wrap model and optimizer by `amp.initialize`. Beside, `amp` requires
        # CUDA GPU. So we have to move model to GPU first.
        model, optimizer, device = task.model, task.optimizer, task.device
        model = model.to(device)
        task.model, task.optimizer = amp.initialize(model, optimizer)
        assert hasattr(task.optimizer, "_amp_stash")

        lr_finder = prepare_lr_finder(task)
        spy = mocker.spy(amp, "scale_loss")

        lr_finder.range_test(task.train_loader, num_iter=num_iter)
        # NOTE: Here we did not perform gradient accumulation, so that call count
        # of `amp.scale_loss` should equal to `num_iter`.
        assert spy.call_count == num_iter 
開發者ID:davidtvs,項目名稱:pytorch-lr-finder,代碼行數:21,代碼來源:test_lr_finder.py

示例3: initialize

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def initialize(model, optimizers):
    """Initialize mixed precision

    Arguments:
        model {nn.Module} -- The model to convert
        optimizers -- The model

    Returns:
        [nn.Module, Optimizer] -- Converted model and optimizer
    """
    if is_mixed_precision():
        from apex import amp
        if optimizers is not None:
            model, optimizers = \
                amp.initialize(model, optimizers, opt_level=get_optim_level())
        else:
            model = amp.initialize(model, opt_level=get_optim_level())
    return model, optimizers 
開發者ID:Philip-Bachman,項目名稱:amdim-public,代碼行數:20,代碼來源:mixed_precision.py

示例4: test_larc_mixed_precision

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def test_larc_mixed_precision(self):
        for opt_level in ["O0", "O1", "O2", "O3"]:
            model = MyModel(1)

            optimizer = LARC(
                torch.optim.SGD(
                    [{"params": model.parameters(), "lr": 0.25}], momentum=0.125
                )
            )

            model, optimizer = amp.initialize(
                model, optimizer, opt_level=opt_level, verbosity=0
            )

            optimizer.zero_grad()
            loss = model(self.x)
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step() 
開發者ID:NVIDIA,項目名稱:apex,代碼行數:21,代碼來源:test_larc.py

示例5: predict

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def predict(args, model):
    """Entrypoint for predict mode"""

    test_loader = dataset.get_test_loader(args)
    train_loader, val_loader = dataset.get_train_val_loader(args, predict=True)

    if args.fp16:
        model = amp.initialize(model, opt_level='O1')

    logging.info('Starting prediction')

    output = {}
    for k, loader in [('test', test_loader),
                      ('val', val_loader)]:
        output[k] = {}
        res = infer(args, model, loader)

        for i, v in res.items():
            d = loader.dataset.data[i]
            name = '{}_{}_{}'.format(d[0], d[1], d[2])
            if name not in output[k]:
                output[k][name] = []
            output[k][name].append(v)

    logging.info('Saving predictions to {}'.format(args.load + '.output' + args.pred_suffix))
    with open(args.load + '.output' + args.pred_suffix, 'wb') as file:
        pickle.dump(output, file) 
開發者ID:maciej-sypetkowski,項目名稱:kaggle-rcic-1st,代碼行數:29,代碼來源:main.py

示例6: _init_amp

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def _init_amp(model, device, optimizer=None, use_amp=None):
    model = model.to(device)
    if use_amp and optimizer:
        if AMP_AVAILABLE:
            model, optimizer = amp.initialize(model, optimizer, opt_level=use_amp)
        else:
            logger.warning(f"Can't find AMP although you specificed to use amp with level {use_amp}. Will continue without AMP ...")

    return model, optimizer 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:11,代碼來源:optimization.py

示例7: setup

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def setup(rank, world_size, offset=0):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = str(12355+offset)

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=world_size)

    # Explicitly setting seed to make sure that models created in two processes
    # start from same random weights and biases.
    torch.manual_seed(42) 
開發者ID:maxjiang93,項目名稱:space_time_pde,代碼行數:12,代碼來源:train_ddp.py

示例8: prepare_for_training

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def prepare_for_training(args, model, checkpoint_state_dict, amp):
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)

    if amp:
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
        if checkpoint_state_dict:
            amp.load_state_dict(checkpoint_state_dict['amp'])

    if checkpoint_state_dict:
        optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
        model.load_state_dict(checkpoint_state_dict['model'])

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)

    return model, optimizer 
開發者ID:microsoft,項目名稱:unilm,代碼行數:30,代碼來源:run_seq2seq.py

示例9: __init__

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def __init__(
        self, model, optimizer, criterion, metrics=None, callbacks=ConsoleLogger(), gradient_clip_val=0, accumulate_steps=1,
    ):
        super().__init__()

        if not hasattr(amp._amp_state, "opt_properties"):
            model_optimizer = amp.initialize(model, optimizer, enabled=False)
            model, optimizer = (model_optimizer, None) if optimizer is None else model_optimizer

        self.state = RunnerState(model=model, optimizer=optimizer, criterion=criterion, metrics=metrics,)
        self.callbacks = Callbacks(callbacks)
        self.callbacks.set_state(self.state)
        self.gradient_clip_val = gradient_clip_val
        self.accumulate_steps = accumulate_steps 
開發者ID:bonlime,項目名稱:pytorch-tools,代碼行數:16,代碼來源:wrapper.py

示例10: load_checkpoint

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def load_checkpoint(self):
        try:
            map_location = "cuda:0" if torch.cuda.is_available() else "cpu"
            ckpt = load_checkpoint(self.checkpoint_dir,
                                   map_location=map_location)
            # Transition settings
            self.is_transitioning = ckpt["is_transitioning"]
            self.transition_step = ckpt["transition_step"]
            self.current_imsize = ckpt["current_imsize"]
            self.latest_switch = ckpt["latest_switch"]

            # Tracking stats
            self.global_step = ckpt["global_step"]
            self.start_time = time.time() - ckpt["total_time"] * 60
            self.num_skipped_steps = ckpt["num_skipped_steps"]

            # Models
            self.discriminator.load_state_dict(ckpt['D'])

            self.generator.load_state_dict(ckpt['G'])
            self.running_average_generator.load_state_dict(
                ckpt["running_average_generator"])
            to_cuda([self.generator, self.discriminator,
                     self.running_average_generator])
            self.running_average_generator = amp.initialize(self.running_average_generator,
                                                            None, opt_level=self.opt_level)
            self.init_optimizers()
            self.d_optimizer.load_state_dict(ckpt['d_optimizer'])
            self.g_optimizer.load_state_dict(ckpt['g_optimizer'])
            return True
        except FileNotFoundError as e:
            print(e)
            print(' [*] No checkpoint!')
            return False 
開發者ID:hukkelas,項目名稱:DeepPrivacy,代碼行數:36,代碼來源:train.py

示例11: init_running_average_generator

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def init_running_average_generator(self):
        self.running_average_generator = Generator(self.pose_size,
                                                   self.start_channel_size,
                                                   self.image_channels)
        self.running_average_generator = wrap_models(
            self.running_average_generator)
        to_cuda(self.running_average_generator)
        self.running_average_generator = amp.initialize(self.running_average_generator,
                                                        None, opt_level=self.opt_level) 
開發者ID:hukkelas,項目名稱:DeepPrivacy,代碼行數:11,代碼來源:train.py

示例12: extend_running_average_generator

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def extend_running_average_generator(self):
        g = self.running_average_generator
        g.extend()

        for avg_param, cur_param in zip(g.new_parameters(), self.generator.new_parameters()):
            assert avg_param.data.shape == cur_param.data.shape, "AVG param: {}, cur_param: {}".format(
                avg_param.shape, cur_param.shape)
            avg_param.data = cur_param.data
        to_cuda(g)
        self.running_average_generator = amp.initialize(
            self.running_average_generator, None, opt_level=self.opt_level) 
開發者ID:hukkelas,項目名稱:DeepPrivacy,代碼行數:13,代碼來源:train.py

示例13: initialize_amp

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def initialize_amp(self):
        to_cuda([self.generator, self.discriminator])
        [self.generator, self.discriminator], [self.g_optimizer, self.d_optimizer] = amp.initialize(
            [self.generator, self.discriminator],
            [self.g_optimizer, self.d_optimizer],
            opt_level=self.opt_level,
            num_losses=4) 
開發者ID:hukkelas,項目名稱:DeepPrivacy,代碼行數:9,代碼來源:train.py

示例14: _try_setup_apex

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def _try_setup_apex(self):
        """Sets up the model for fp16 training via apex if available."""
        if self.use_fp16 and amp:
            self.models, self.optimizers = amp.initialize(
                self.models, self.optimizers, **self.apex_args) 
開發者ID:ray-project,項目名稱:ray,代碼行數:7,代碼來源:torch_runner.py

示例15: main

# 需要導入模塊: from apex import amp [as 別名]
# 或者: from apex.amp import initialize [as 別名]
def main(mel_files, waveglow_path, sigma, output_dir, sampling_rate, is_fp16,
         denoiser_strength):
    mel_files = files_to_list(mel_files)
    waveglow = torch.load(waveglow_path)['model']
    waveglow = waveglow.remove_weightnorm(waveglow)
    waveglow.cuda().eval()
    if is_fp16:
        from apex import amp
        waveglow, _ = amp.initialize(waveglow, [], opt_level="O3")

    if denoiser_strength > 0:
        denoiser = Denoiser(waveglow).cuda()

    for i, file_path in enumerate(mel_files):
        file_name = os.path.splitext(os.path.basename(file_path))[0]
        mel = torch.load(file_path)
        mel = torch.autograd.Variable(mel.cuda())
        mel = torch.unsqueeze(mel, 0)
        mel = mel.half() if is_fp16 else mel
        with torch.no_grad():
            audio = waveglow.infer(mel, sigma=sigma)
            if denoiser_strength > 0:
                audio = denoiser(audio, denoiser_strength)
            audio = audio * MAX_WAV_VALUE
        audio = audio.squeeze()
        audio = audio.cpu().numpy()
        audio = audio.astype('int16')
        audio_path = os.path.join(
            output_dir, "{}_synthesis.wav".format(file_name))
        write(audio_path, sampling_rate, audio)
        print(audio_path) 
開發者ID:NVIDIA,項目名稱:waveglow,代碼行數:33,代碼來源:inference.py


注:本文中的apex.amp.initialize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。