当前位置: 首页>>代码示例>>Python>>正文


Python optim.LBFGS属性代码示例

本文整理汇总了Python中torch.optim.LBFGS属性的典型用法代码示例。如果您正苦于以下问题:Python optim.LBFGS属性的具体用法?Python optim.LBFGS怎么用?Python optim.LBFGS使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在torch.optim的用法示例。


在下文中一共展示了optim.LBFGS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def fit(self, observations, labels):
    def closure():
      predicted = self.predict(observations)
      loss = self.loss_fn(predicted, labels)
      self.optimizer.zero_grad()
      loss.backward()
      return loss
    old_params = parameters_to_vector(self.model.parameters())
    for lr in self.lr * .5**np.arange(10):
      self.optimizer = optim.LBFGS(self.model.parameters(), lr=lr)
      self.optimizer.step(closure)
      current_params = parameters_to_vector(self.model.parameters())
      if any(np.isnan(current_params.data.cpu().numpy())):
        print("LBFGS optimization diverged. Rolling back update...")
        vector_to_parameters(old_params, self.model.parameters())
      else:
        return 
开发者ID:mjacar,项目名称:pytorch-trpo,代码行数:19,代码来源:torch_utils.py

示例2: polished_loss_fft_learn_perm

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polished_loss_fft_learn_perm(trainable):
    model = trainable.model
    polished_model = ButterflyProduct(size=model.size, complex=model.complex, fixed_order=True)
    temperature = 1.0 / (0.3 * trainable._iteration + 1)
    trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    preopt_loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS_VALIDATION):
        optimizer.step(closure)
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    # return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else float('inf')
    return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else 9999.0 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:25,代码来源:learning_fft_old.py

示例3: polish_fft_blockperm

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_fft_blockperm(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    perm = model[0].argmax()
    polished_model = Block2x2DiagProduct(size=config['size'], complex=True)
    polished_model.load_state_dict(model[1].state_dict())
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:25,代码来源:learning_fft_old.py

示例4: setup_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def setup_optimizer(img):
    if params.optimizer == 'lbfgs':
        print("Running optimization with L-BFGS")
        optim_state = {
            'max_iter': params.num_iterations,
            'tolerance_change': -1,
            'tolerance_grad': -1,
        }
        if params.lbfgs_num_correction != 100:
            optim_state['history_size'] = params.lbfgs_num_correction
        optimizer = optim.LBFGS([img], **optim_state)
        loopVal = 1
    elif params.optimizer == 'adam':
        print("Running optimization with ADAM")
        optimizer = optim.Adam([img], lr = params.learning_rate)
        loopVal = params.num_iterations - 1
    return optimizer, loopVal 
开发者ID:ProGamerGov,项目名称:neural-style-pt,代码行数:19,代码来源:neural_style.py

示例5: polish_fft_blockperm_transpose

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_fft_blockperm_transpose(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    perm = model[1].argmax()
    polished_model = Block2x2DiagProduct(size=config['size'], complex=True, decreasing_size=False)
    polished_model.load_state_dict(model[0].state_dict())
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:25,代码来源:learning_fft_old.py

示例6: polish

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish(self, nmaxsteps=50, patience=5, threshold=1e-10, save_to_self_model=False):
        if not save_to_self_model:
            model_bak = self.model
            self.model = copy.deepcopy(self.model)
        self.freeze()
        optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.model.parameters()))
        def closure():
            optimizer.zero_grad()
            loss = self.loss()
            loss.backward()
            return loss
        n_bad_steps = 0
        best_loss = float('inf')
        for i in range(nmaxsteps):
            loss = optimizer.step(closure)
            if loss.item() < best_loss - threshold:
                best_loss = loss.item()
                n_bad_steps = 0
            else:
                n_bad_steps += 1
            if n_bad_steps > patience:
                break
        if not save_to_self_model:
            self.model = model_bak
        return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:27,代码来源:training.py

示例7: calibrate

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def calibrate(network, loader, device, indexes, calibration_type="linear"):
    """Corrects the bias for new classes.

    :param network: The logits extractor model, usually convnet+FC w/o final act.
    :param loader: The validation data loader.
    :param device: Device on which apply the computation.
    :param indexes: A list of tuple made a starting and ending indexes. They delimit
                    on which range of targets to apply the calibration. If given
                    several tuples, different models will be used per range.
    :return: A wrapper `CalibrationWrapper`.
    """
    logits, labels = _extract_data(network, loader, device)
    calibration_wrapper = _get_calibration_model(indexes, calibration_type).to(device)

    def eval():
        corrected_logits = calibration_wrapper(logits)
        loss = F.cross_entropy(corrected_logits, labels)
        loss.backward()
        return loss

    optimizer = optim.LBFGS(calibration_wrapper.parameters(), lr=0.01, max_iter=50)
    optimizer.step(eval)

    return calibration_wrapper 
开发者ID:arthurdouillard,项目名称:incremental_learning.pytorch,代码行数:26,代码来源:calibration.py

示例8: get_input_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def get_input_optimizer(input_img):
    # this line to show that input is a parameter that requires a gradient
    optimizer = optim.LBFGS([input_img.requires_grad_()])
    return optimizer


######################################################################
# Finally, we must define a function that performs the neural transfer. For
# each iteration of the networks, it is fed an updated input and computes
# new losses. We will run the ``backward`` methods of each loss module to
# dynamicaly compute their gradients. The optimizer requires a "closure"
# function, which reevaluates the modul and returns the loss.
#
# We still have one final constraint to address. The network may try to
# optimize the input with values that exceed the 0 to 1 tensor range for
# the image. We can address this by correcting the input values to be
# between 0 to 1 each time the network is run.
# 
开发者ID:allegroai,项目名称:trains,代码行数:20,代码来源:pytorch_matplotlib.py

示例9: _train_kf

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def _train_kf(self, data: torch.Tensor, num_epochs: int = 8, cls: Type['KalmanFilter'] = KalmanFilter):
        kf = cls(
            measures=['y'],
            processes=[
                LocalLevel(id='local_level').add_measure('y'),
                Season(id='day_in_week', seasonal_period=7, dt_unit='D').add_measure('y')
            ]
        )
        kf.opt = LBFGS(kf.parameters())

        start_datetimes = (
                np.zeros(self.config['num_groups'], dtype='timedelta64') + DEFAULT_START_DT
        )

        def closure():
            kf.opt.zero_grad()
            pred = kf(data, start_datetimes=start_datetimes)
            loss = -pred.log_prob(data).mean()
            loss.backward()
            return loss

        print(f"Will train for {num_epochs} epochs...")
        loss = float('nan')
        for i in range(num_epochs):
            new_loss = kf.opt.step(closure)
            print(f"EPOCH {i}, LOSS {new_loss.item()}, DELTA {loss - new_loss.item()}")
            loss = new_loss.item()

        return kf(data, start_datetimes=start_datetimes).predictions 
开发者ID:strongio,项目名称:torch-kalman,代码行数:31,代码来源:test_training.py

示例10: get_optimiser

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def get_optimiser(name, net_params, optim_params):
    lr = optim_params['learning_rate']
    momentum = optim_params['momentum']
    weight_decay = optim_params['weight_decay']
    if(name == "SGD"):
        return optim.SGD(net_params, lr, 
            momentum = momentum, weight_decay = weight_decay)
    elif(name == "Adam"):
        return optim.Adam(net_params, lr, weight_decay = 1e-5)
    elif(name == "SparseAdam"):
        return optim.SparseAdam(net_params, lr)
    elif(name == "Adadelta"):
        return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adagrad"):
        return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adamax"):
        return optim.Adamax(net_params, lr, weight_decay = weight_decay)
    elif(name == "ASGD"):
        return optim.ASGD(net_params, lr, weight_decay = weight_decay)
    elif(name == "LBFGS"):
        return optim.LBFGS(net_params, lr)
    elif(name == "RMSprop"):
        return optim.RMSprop(net_params, lr, momentum = momentum,
            weight_decay = weight_decay)
    elif(name == "Rprop"):
        return optim.Rprop(net_params, lr)
    else:
        raise ValueError("unsupported optimizer {0:}".format(name)) 
开发者ID:HiLab-git,项目名称:PyMIC,代码行数:30,代码来源:get_optimizer.py

示例11: polish_hadamard

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_hadamard(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:29,代码来源:learning_hadamard.py

示例12: polish_ops

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_ops(trial):
    """Load model from checkpoint, and re-optimize using L-BFGS to find
    the nearest local optimum.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = HstackDiagProduct(size=config['size'])
    polished_model.factors = model.factors
    polished_model.P_init = model.P_init
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        eye = torch.eye(polished_model.size)
        x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
        y = polished_model(x[:, trainable.br_perm])
        loss = nn.functional.mse_loss(y, trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    eye = torch.eye(polished_model.size)
    x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
    y = polished_model(x[:, trainable.br_perm])
    loss = nn.functional.mse_loss(y, trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:30,代码来源:learning_ops.py

示例13: polish_fft

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_fft(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:29,代码来源:learning_fft_old.py

示例14: polish_fft_learn_perm

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_fft_learn_perm(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    temperature = 1.0 / (0.3 * trainable._iteration + 1)
    trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:31,代码来源:learning_fft_old.py

示例15: polish_dct_real

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import LBFGS [as 别名]
def polish_dct_real(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    return loss.item() 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:29,代码来源:learning_vandermonde.py


注:本文中的torch.optim.LBFGS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。