當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.parameters_to_vector方法代碼示例

本文整理匯總了Python中torch.nn.utils.parameters_to_vector方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.parameters_to_vector方法的具體用法?Python utils.parameters_to_vector怎麽用?Python utils.parameters_to_vector使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn.utils的用法示例。


在下文中一共展示了utils.parameters_to_vector方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_master

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None: mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
        for mp in master_params:
            for param in mp: param.requires_grad = True
        return model_params, master_params 
開發者ID:sshaoshuai,項目名稱:PointRCNN,代碼行數:22,代碼來源:fastai_optim.py

示例2: get_master

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [[
        param for param in lg.parameters() if param.requires_grad
    ] for lg in split_groups]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None: mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [[param.clone().float().detach() for param in lg]
                         for lg in model_params]
        for mp in master_params:
            for param in mp:
                param.requires_grad = True
        return model_params, master_params 
開發者ID:traveller59,項目名稱:second.pytorch,代碼行數:26,代碼來源:fastai_optim.py

示例3: model_g2master_g

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def model_g2master_g(model_params, master_params,
                     flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(
                    parameters_to_vector(
                        [p.grad.data.float() for p in model_group]))
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None:
                        master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
開發者ID:traveller59,項目名稱:second.pytorch,代碼行數:20,代碼來源:fastai_optim.py

示例4: model_g2master_g

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(
                    parameters_to_vector([p.grad.data.float() for p in model_group])
                )
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None:
                        master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
開發者ID:poodarchu,項目名稱:Det3D,代碼行數:19,代碼來源:fastai_optim.py

示例5: model_g2master_g

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None: master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
開發者ID:sshaoshuai,項目名稱:PointRCNN,代碼行數:16,代碼來源:fastai_optim.py

示例6: add_weight_noise

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def add_weight_noise(self, std=0.075):
        """Add variational weight noise to weight parametesr.

        Args:
            std (float): standard deviation

        """
        with torch.no_grad():
            param_vector = parameters_to_vector(self.parameters())
            normal_dist = torch.distributions.Normal(loc=torch.tensor([0.]), scale=torch.tensor([std]))
            noise = normal_dist.sample(param_vector.size())
            if self.device_id >= 0:
                noise = noise.cuda(self.device_id)
            param_vector.add_(noise[0])
        vector_to_parameters(param_vector, self.parameters()) 
開發者ID:hirofumi0810,項目名稱:neural_sp,代碼行數:17,代碼來源:base.py

示例7: flat_grad

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def flat_grad(f, param, **kwargs):
    return parameters_to_vector(torch.autograd.grad(f, param, **kwargs)) 
開發者ID:kashif,項目名稱:firedup,代碼行數:4,代碼來源:core.py

示例8: hessian_vector_product

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def hessian_vector_product(d_kl, x):
  g = parameters_to_vector(autograd.grad(d_kl, agent.actor.parameters(), create_graph=True))
  return parameters_to_vector(autograd.grad((g * x.detach()).sum(), agent.actor.parameters(), retain_graph=True)) + DAMPING_COEFF * x 
開發者ID:Kaixhin,項目名稱:spinning-up-basic,代碼行數:5,代碼來源:trpo.py

示例9: step

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def step(self, closure=None, thr=1e-2, eps=1e-9):
        loss = None
        if closure is not None:
            loss = closure()
            world_size = self.dist.get_world_size()
            grads = [p.grad for p in self.model.parameters()]
            # pack
            packed_tensor = parameters_to_vector(grads)
            # all reduce
            self.dist.all_reduce(packed_tensor)
            # unpack
            vector_to_parameters(packed_tensor.div_(world_size), grads)

        if self.lars:
            for group in self.param_groups:
                for p in group['params']:
                    setattr(p, 'data_pre', p.data.detach().clone())

        self.actual_optimizer.step(closure=None)

        if self.lars:
            for group in self.param_groups:
                for p in group['params']:
                    d_norm_pre = p.data_pre.norm()
                    if d_norm_pre > thr:
                        upd = p.data - p.data_pre
                        upd_norm = upd.norm()
                        rate = group['lr'] * d_norm_pre / (upd_norm + eps)
                        p.data = p.data_pre.add(rate, upd)

        return loss 
開發者ID:cybertronai,項目名稱:pytorch-sso,代碼行數:33,代碼來源:firstorder.py

示例10: get_master

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [
        [param for param in lg.parameters() if param.requires_grad]
        for lg in split_groups
    ]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None:
                    mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [
            [param.clone().float().detach() for param in lg] for lg in model_params
        ]
        for mp in master_params:
            for param in mp:
                param.requires_grad = True
        return model_params, master_params 
開發者ID:poodarchu,項目名稱:Det3D,代碼行數:29,代碼來源:fastai_optim.py

示例11: vector_to_parameter_list

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def vector_to_parameter_list(vec, parameters):
    """
    Convert the vector `vec` to a parameter-list format matching `parameters`.

    This function is the inverse of `parameters_to_vector` from the
    pytorch module `torch.nn.utils.convert_parameters`.
    Contrary to `vector_to_parameters`, which replaces the value
    of the parameters, this function leaves the parameters unchanged and
    returns a list of parameter views of the vector.

    ```
    from torch.nn.utils import parameters_to_vector

    vector_view = parameters_to_vector(parameters)
    param_list_view = vector_to_parameter_list(vec, parameters)

    for a, b in zip(parameters, param_list_view):
        assert torch.all_close(a, b)
    ```

    Parameters:
    -----------
        vec: Tensor
            a single vector represents the parameters of a model
        parameters: (Iterable[Tensor])
            an iterator of Tensors that are of the desired shapes.
    """
    # Ensure vec of type Tensor
    if not isinstance(vec, torch.Tensor):
        raise TypeError(
            "expected torch.Tensor, but got: {}".format(torch.typename(vec))
        )
    params_new = []
    # Pointer for slicing the vector for each parameter
    pointer = 0
    for param in parameters:
        # The length of the parameter
        num_param = param.numel()
        # Slice the vector, reshape it
        param_new = vec[pointer : pointer + num_param].view_as(param).data
        params_new.append(param_new)
        # Increment the pointer
        pointer += num_param

    return params_new 
開發者ID:f-dangel,項目名稱:backpack,代碼行數:47,代碼來源:convert_parameters.py

示例12: trpo_update

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def trpo_update(replay, policy, baseline):
    gamma = 0.99
    tau = 0.95
    max_kl = 0.01
    ls_max_steps = 15
    backtrack_factor = 0.5
    old_policy = deepcopy(policy)
    for step in range(10):
        states = replay.state()
        actions = replay.action()
        rewards = replay.reward()
        dones = replay.done()
        next_states = replay.next_state()
        returns = ch.td.discount(gamma, rewards, dones)
        baseline.fit(states, returns)
        values = baseline(states)
        next_values = baseline(next_states)

        # Compute KL
        with th.no_grad():
            old_density = old_policy.density(states)
        new_density = policy.density(states)
        kl = kl_divergence(old_density, new_density).mean()

        # Compute surrogate loss
        old_log_probs = old_density.log_prob(actions).mean(dim=1, keepdim=True)
        new_log_probs = new_density.log_prob(actions).mean(dim=1, keepdim=True)
        bootstraps = values * (1.0 - dones) + next_values * dones
        advantages = ch.pg.generalized_advantage(gamma, tau, rewards,
                                                 dones, bootstraps, th.zeros(1))
        advantages = ch.normalize(advantages).detach()
        surr_loss = trpo.policy_loss(new_log_probs, old_log_probs, advantages)

        # Compute the update
        grad = autograd.grad(surr_loss,
                             policy.parameters(),
                             retain_graph=True)
        Fvp = trpo.hessian_vector_product(kl, policy.parameters())
        grad = parameters_to_vector(grad).detach()
        step = trpo.conjugate_gradient(Fvp, grad)
        lagrange_mult = 0.5 * th.dot(step, Fvp(step)) / max_kl
        step = step / lagrange_mult
        step_ = [th.zeros_like(p.data) for p in policy.parameters()]
        vector_to_parameters(step, step_)
        step = step_

        #  Line-search
        for ls_step in range(ls_max_steps):
            stepsize = backtrack_factor**ls_step
            clone = deepcopy(policy)
            for c, u in zip(clone.parameters(), step):
                c.data.add_(-stepsize, u.data)
            new_density = clone.density(states)
            new_kl = kl_divergence(old_density, new_density).mean()
            new_log_probs = new_density.log_prob(actions).mean(dim=1, keepdim=True)
            new_loss = trpo.policy_loss(new_log_probs, old_log_probs, advantages)
            if new_loss < surr_loss and new_kl < max_kl:
                for p, c in zip(policy.parameters(), clone.parameters()):
                    p.data[:] = c.data[:]
                break 
開發者ID:learnables,項目名稱:cherry,代碼行數:62,代碼來源:trpo_v_random.py

示例13: conjugate_gradient

# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import parameters_to_vector [as 別名]
def conjugate_gradient(Ax, b, num_iterations=10, tol=1e-10, eps=1e-8):
    """
    [[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/algorithms/trpo.py)

    **Description**

    Computes \\(x = A^{-1}b\\) using the conjugate gradient algorithm.

    **Credit**

    Adapted from Kai Arulkumaran's implementation, with additions inspired from John Schulman's implementation.

    **References**

    1. Nocedal and Wright. 2006. "Numerical Optimization, 2nd edition". Springer.
    2. Shewchuk et al. 1994. “An Introduction to the Conjugate Gradient Method without the Agonizing Pain.” CMU.

    **Arguments**

    * **Ax** (callable) - Given a vector x, computes A@x.
    * **b** (tensor or list) - The reference vector.
    * **num_iterations** (int, *optional*, default=10) - Number of conjugate gradient iterations.
    * **tol** (float, *optional*, default=1e-10) - Tolerance for proposed solution.
    * **eps** (float, *optional*, default=1e-8) - Numerical stability constant.

    **Returns**

    * **x** (tensor or list) - The solution to Ax = b, as a list if b is a list else a tensor.

    **Example**

    ~~~python
    pass
    ~~~
    """
    shape = None
    if not isinstance(b, th.Tensor):
        shape = [th.zeros_like(b_i) for b_i in b]
        b = parameters_to_vector(b)
    x = th.zeros_like(b)
    r = b
    p = r
    r_dot_old = th.dot(r, r)
    for _ in range(num_iterations):
        Ap = Ax(p)
        alpha = r_dot_old / (th.dot(p, Ap) + eps)
        x += alpha * p
        r -= alpha * Ap
        r_dot_new = th.dot(r, r)
        p = r + (r_dot_new / r_dot_old) * p
        r_dot_old = r_dot_new
        if r_dot_new.item() < tol:
            break
    if shape is not None:
        vector_to_parameters(x, shape)
        x = shape
    return x 
開發者ID:learnables,項目名稱:cherry,代碼行數:59,代碼來源:trpo.py


注:本文中的torch.nn.utils.parameters_to_vector方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。