當前位置: 首頁>>代碼示例>>Python>>正文


Python optim.LBFGS屬性代碼示例

本文整理匯總了Python中torch.optim.LBFGS屬性的典型用法代碼示例。如果您正苦於以下問題:Python optim.LBFGS屬性的具體用法?Python optim.LBFGS怎麽用?Python optim.LBFGS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在torch.optim的用法示例。


在下文中一共展示了optim.LBFGS屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: fit

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def fit(self, observations, labels):
    def closure():
      predicted = self.predict(observations)
      loss = self.loss_fn(predicted, labels)
      self.optimizer.zero_grad()
      loss.backward()
      return loss
    old_params = parameters_to_vector(self.model.parameters())
    for lr in self.lr * .5**np.arange(10):
      self.optimizer = optim.LBFGS(self.model.parameters(), lr=lr)
      self.optimizer.step(closure)
      current_params = parameters_to_vector(self.model.parameters())
      if any(np.isnan(current_params.data.cpu().numpy())):
        print("LBFGS optimization diverged. Rolling back update...")
        vector_to_parameters(old_params, self.model.parameters())
      else:
        return 
開發者ID:mjacar,項目名稱:pytorch-trpo,代碼行數:19,代碼來源:torch_utils.py

示例2: polished_loss_fft_learn_perm

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polished_loss_fft_learn_perm(trainable):
    model = trainable.model
    polished_model = ButterflyProduct(size=model.size, complex=model.complex, fixed_order=True)
    temperature = 1.0 / (0.3 * trainable._iteration + 1)
    trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    preopt_loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS_VALIDATION):
        optimizer.step(closure)
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    # return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else float('inf')
    return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else 9999.0 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:25,代碼來源:learning_fft_old.py

示例3: polish_fft_blockperm

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_fft_blockperm(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    perm = model[0].argmax()
    polished_model = Block2x2DiagProduct(size=config['size'], complex=True)
    polished_model.load_state_dict(model[1].state_dict())
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:25,代碼來源:learning_fft_old.py

示例4: setup_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def setup_optimizer(img):
    if params.optimizer == 'lbfgs':
        print("Running optimization with L-BFGS")
        optim_state = {
            'max_iter': params.num_iterations,
            'tolerance_change': -1,
            'tolerance_grad': -1,
        }
        if params.lbfgs_num_correction != 100:
            optim_state['history_size'] = params.lbfgs_num_correction
        optimizer = optim.LBFGS([img], **optim_state)
        loopVal = 1
    elif params.optimizer == 'adam':
        print("Running optimization with ADAM")
        optimizer = optim.Adam([img], lr = params.learning_rate)
        loopVal = params.num_iterations - 1
    return optimizer, loopVal 
開發者ID:ProGamerGov,項目名稱:neural-style-pt,代碼行數:19,代碼來源:neural_style.py

示例5: polish_fft_blockperm_transpose

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_fft_blockperm_transpose(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    perm = model[1].argmax()
    polished_model = Block2x2DiagProduct(size=config['size'], complex=True, decreasing_size=False)
    polished_model.load_state_dict(model[0].state_dict())
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:25,代碼來源:learning_fft_old.py

示例6: polish

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish(self, nmaxsteps=50, patience=5, threshold=1e-10, save_to_self_model=False):
        if not save_to_self_model:
            model_bak = self.model
            self.model = copy.deepcopy(self.model)
        self.freeze()
        optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.model.parameters()))
        def closure():
            optimizer.zero_grad()
            loss = self.loss()
            loss.backward()
            return loss
        n_bad_steps = 0
        best_loss = float('inf')
        for i in range(nmaxsteps):
            loss = optimizer.step(closure)
            if loss.item() < best_loss - threshold:
                best_loss = loss.item()
                n_bad_steps = 0
            else:
                n_bad_steps += 1
            if n_bad_steps > patience:
                break
        if not save_to_self_model:
            self.model = model_bak
        return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:27,代碼來源:training.py

示例7: calibrate

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def calibrate(network, loader, device, indexes, calibration_type="linear"):
    """Corrects the bias for new classes.

    :param network: The logits extractor model, usually convnet+FC w/o final act.
    :param loader: The validation data loader.
    :param device: Device on which apply the computation.
    :param indexes: A list of tuple made a starting and ending indexes. They delimit
                    on which range of targets to apply the calibration. If given
                    several tuples, different models will be used per range.
    :return: A wrapper `CalibrationWrapper`.
    """
    logits, labels = _extract_data(network, loader, device)
    calibration_wrapper = _get_calibration_model(indexes, calibration_type).to(device)

    def eval():
        corrected_logits = calibration_wrapper(logits)
        loss = F.cross_entropy(corrected_logits, labels)
        loss.backward()
        return loss

    optimizer = optim.LBFGS(calibration_wrapper.parameters(), lr=0.01, max_iter=50)
    optimizer.step(eval)

    return calibration_wrapper 
開發者ID:arthurdouillard,項目名稱:incremental_learning.pytorch,代碼行數:26,代碼來源:calibration.py

示例8: get_input_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def get_input_optimizer(input_img):
    # this line to show that input is a parameter that requires a gradient
    optimizer = optim.LBFGS([input_img.requires_grad_()])
    return optimizer


######################################################################
# Finally, we must define a function that performs the neural transfer. For
# each iteration of the networks, it is fed an updated input and computes
# new losses. We will run the ``backward`` methods of each loss module to
# dynamicaly compute their gradients. The optimizer requires a "closure"
# function, which reevaluates the modul and returns the loss.
#
# We still have one final constraint to address. The network may try to
# optimize the input with values that exceed the 0 to 1 tensor range for
# the image. We can address this by correcting the input values to be
# between 0 to 1 each time the network is run.
# 
開發者ID:allegroai,項目名稱:trains,代碼行數:20,代碼來源:pytorch_matplotlib.py

示例9: _train_kf

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def _train_kf(self, data: torch.Tensor, num_epochs: int = 8, cls: Type['KalmanFilter'] = KalmanFilter):
        kf = cls(
            measures=['y'],
            processes=[
                LocalLevel(id='local_level').add_measure('y'),
                Season(id='day_in_week', seasonal_period=7, dt_unit='D').add_measure('y')
            ]
        )
        kf.opt = LBFGS(kf.parameters())

        start_datetimes = (
                np.zeros(self.config['num_groups'], dtype='timedelta64') + DEFAULT_START_DT
        )

        def closure():
            kf.opt.zero_grad()
            pred = kf(data, start_datetimes=start_datetimes)
            loss = -pred.log_prob(data).mean()
            loss.backward()
            return loss

        print(f"Will train for {num_epochs} epochs...")
        loss = float('nan')
        for i in range(num_epochs):
            new_loss = kf.opt.step(closure)
            print(f"EPOCH {i}, LOSS {new_loss.item()}, DELTA {loss - new_loss.item()}")
            loss = new_loss.item()

        return kf(data, start_datetimes=start_datetimes).predictions 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:31,代碼來源:test_training.py

示例10: get_optimiser

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def get_optimiser(name, net_params, optim_params):
    lr = optim_params['learning_rate']
    momentum = optim_params['momentum']
    weight_decay = optim_params['weight_decay']
    if(name == "SGD"):
        return optim.SGD(net_params, lr, 
            momentum = momentum, weight_decay = weight_decay)
    elif(name == "Adam"):
        return optim.Adam(net_params, lr, weight_decay = 1e-5)
    elif(name == "SparseAdam"):
        return optim.SparseAdam(net_params, lr)
    elif(name == "Adadelta"):
        return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adagrad"):
        return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adamax"):
        return optim.Adamax(net_params, lr, weight_decay = weight_decay)
    elif(name == "ASGD"):
        return optim.ASGD(net_params, lr, weight_decay = weight_decay)
    elif(name == "LBFGS"):
        return optim.LBFGS(net_params, lr)
    elif(name == "RMSprop"):
        return optim.RMSprop(net_params, lr, momentum = momentum,
            weight_decay = weight_decay)
    elif(name == "Rprop"):
        return optim.Rprop(net_params, lr)
    else:
        raise ValueError("unsupported optimizer {0:}".format(name)) 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:30,代碼來源:get_optimizer.py

示例11: polish_hadamard

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_hadamard(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:29,代碼來源:learning_hadamard.py

示例12: polish_ops

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_ops(trial):
    """Load model from checkpoint, and re-optimize using L-BFGS to find
    the nearest local optimum.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = HstackDiagProduct(size=config['size'])
    polished_model.factors = model.factors
    polished_model.P_init = model.P_init
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        eye = torch.eye(polished_model.size)
        x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
        y = polished_model(x[:, trainable.br_perm])
        loss = nn.functional.mse_loss(y, trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    eye = torch.eye(polished_model.size)
    x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
    y = polished_model(x[:, trainable.br_perm])
    loss = nn.functional.mse_loss(y, trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:30,代碼來源:learning_ops.py

示例13: polish_fft

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_fft(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:29,代碼來源:learning_fft_old.py

示例14: polish_fft_learn_perm

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_fft_learn_perm(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    temperature = 1.0 / (0.3 * trainable._iteration + 1)
    trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:31,代碼來源:learning_fft_old.py

示例15: polish_dct_real

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import LBFGS [as 別名]
def polish_dct_real(trial):
    """Load model from checkpoint, then fix the order of the factor
    matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    trainable = eval(trial.trainable_name)(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    model = trainable.model
    config = trial.config
    polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
    if not model.fixed_order:
        prob = model.softmax_fn(model.logit)
        maxes, argmaxes = torch.max(prob, dim=-1)
        polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
    else:
        polished_model.factors = model.factors
    optimizer = optim.LBFGS(polished_model.parameters())
    def closure():
        optimizer.zero_grad()
        loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
        loss.backward()
        return loss
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
    return loss.item() 
開發者ID:HazyResearch,項目名稱:learning-circuits,代碼行數:29,代碼來源:learning_vandermonde.py


注:本文中的torch.optim.LBFGS屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。