当前位置: 首页>>代码示例>>Python>>正文


Python utils.parameters_to_vector方法代码示例

本文整理汇总了Python中torch.nn.utils.parameters_to_vector方法的典型用法代码示例。如果您正苦于以下问题:Python utils.parameters_to_vector方法的具体用法?Python utils.parameters_to_vector怎么用?Python utils.parameters_to_vector使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.utils的用法示例。


在下文中一共展示了utils.parameters_to_vector方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_master

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None: mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
        for mp in master_params:
            for param in mp: param.requires_grad = True
        return model_params, master_params 
开发者ID:sshaoshuai,项目名称:PointRCNN,代码行数:22,代码来源:fastai_optim.py

示例2: get_master

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [[
        param for param in lg.parameters() if param.requires_grad
    ] for lg in split_groups]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None: mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [[param.clone().float().detach() for param in lg]
                         for lg in model_params]
        for mp in master_params:
            for param in mp:
                param.requires_grad = True
        return model_params, master_params 
开发者ID:traveller59,项目名称:second.pytorch,代码行数:26,代码来源:fastai_optim.py

示例3: model_g2master_g

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def model_g2master_g(model_params, master_params,
                     flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(
                    parameters_to_vector(
                        [p.grad.data.float() for p in model_group]))
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None:
                        master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
开发者ID:traveller59,项目名称:second.pytorch,代码行数:20,代码来源:fastai_optim.py

示例4: model_g2master_g

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(
                    parameters_to_vector([p.grad.data.float() for p in model_group])
                )
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None:
                        master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
开发者ID:poodarchu,项目名称:Det3D,代码行数:19,代码来源:fastai_optim.py

示例5: model_g2master_g

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
    "Copy the `model_params` gradients to `master_params` for the optimizer step."
    if flat_master:
        for model_group, master_group in zip(model_params, master_params):
            if len(master_group) != 0:
                master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
    else:
        for model_group, master_group in zip(model_params, master_params):
            for model, master in zip(model_group, master_group):
                if model.grad is not None:
                    if master.grad is None: master.grad = master.data.new(*master.data.size())
                    master.grad.data.copy_(model.grad.data)
                else:
                    master.grad = None 
开发者ID:sshaoshuai,项目名称:PointRCNN,代码行数:16,代码来源:fastai_optim.py

示例6: add_weight_noise

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def add_weight_noise(self, std=0.075):
        """Add variational weight noise to weight parametesr.

        Args:
            std (float): standard deviation

        """
        with torch.no_grad():
            param_vector = parameters_to_vector(self.parameters())
            normal_dist = torch.distributions.Normal(loc=torch.tensor([0.]), scale=torch.tensor([std]))
            noise = normal_dist.sample(param_vector.size())
            if self.device_id >= 0:
                noise = noise.cuda(self.device_id)
            param_vector.add_(noise[0])
        vector_to_parameters(param_vector, self.parameters()) 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:17,代码来源:base.py

示例7: flat_grad

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def flat_grad(f, param, **kwargs):
    return parameters_to_vector(torch.autograd.grad(f, param, **kwargs)) 
开发者ID:kashif,项目名称:firedup,代码行数:4,代码来源:core.py

示例8: hessian_vector_product

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def hessian_vector_product(d_kl, x):
  g = parameters_to_vector(autograd.grad(d_kl, agent.actor.parameters(), create_graph=True))
  return parameters_to_vector(autograd.grad((g * x.detach()).sum(), agent.actor.parameters(), retain_graph=True)) + DAMPING_COEFF * x 
开发者ID:Kaixhin,项目名称:spinning-up-basic,代码行数:5,代码来源:trpo.py

示例9: step

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def step(self, closure=None, thr=1e-2, eps=1e-9):
        loss = None
        if closure is not None:
            loss = closure()
            world_size = self.dist.get_world_size()
            grads = [p.grad for p in self.model.parameters()]
            # pack
            packed_tensor = parameters_to_vector(grads)
            # all reduce
            self.dist.all_reduce(packed_tensor)
            # unpack
            vector_to_parameters(packed_tensor.div_(world_size), grads)

        if self.lars:
            for group in self.param_groups:
                for p in group['params']:
                    setattr(p, 'data_pre', p.data.detach().clone())

        self.actual_optimizer.step(closure=None)

        if self.lars:
            for group in self.param_groups:
                for p in group['params']:
                    d_norm_pre = p.data_pre.norm()
                    if d_norm_pre > thr:
                        upd = p.data - p.data_pre
                        upd_norm = upd.norm()
                        rate = group['lr'] * d_norm_pre / (upd_norm + eps)
                        p.data = p.data_pre.add(rate, upd)

        return loss 
开发者ID:cybertronai,项目名称:pytorch-sso,代码行数:33,代码来源:firstorder.py

示例10: get_master

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def get_master(layer_groups, flat_master: bool = False):
    "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
    split_groups = split_bn_bias(layer_groups)
    model_params = [
        [param for param in lg.parameters() if param.requires_grad]
        for lg in split_groups
    ]
    if flat_master:
        master_params = []
        for lg in model_params:
            if len(lg) != 0:
                mp = parameters_to_vector([param.data.float() for param in lg])
                mp = torch.nn.Parameter(mp, requires_grad=True)
                if mp.grad is None:
                    mp.grad = mp.new(*mp.size())
                master_params.append([mp])
            else:
                master_params.append([])
        return model_params, master_params
    else:
        master_params = [
            [param.clone().float().detach() for param in lg] for lg in model_params
        ]
        for mp in master_params:
            for param in mp:
                param.requires_grad = True
        return model_params, master_params 
开发者ID:poodarchu,项目名称:Det3D,代码行数:29,代码来源:fastai_optim.py

示例11: vector_to_parameter_list

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def vector_to_parameter_list(vec, parameters):
    """
    Convert the vector `vec` to a parameter-list format matching `parameters`.

    This function is the inverse of `parameters_to_vector` from the
    pytorch module `torch.nn.utils.convert_parameters`.
    Contrary to `vector_to_parameters`, which replaces the value
    of the parameters, this function leaves the parameters unchanged and
    returns a list of parameter views of the vector.

    ```
    from torch.nn.utils import parameters_to_vector

    vector_view = parameters_to_vector(parameters)
    param_list_view = vector_to_parameter_list(vec, parameters)

    for a, b in zip(parameters, param_list_view):
        assert torch.all_close(a, b)
    ```

    Parameters:
    -----------
        vec: Tensor
            a single vector represents the parameters of a model
        parameters: (Iterable[Tensor])
            an iterator of Tensors that are of the desired shapes.
    """
    # Ensure vec of type Tensor
    if not isinstance(vec, torch.Tensor):
        raise TypeError(
            "expected torch.Tensor, but got: {}".format(torch.typename(vec))
        )
    params_new = []
    # Pointer for slicing the vector for each parameter
    pointer = 0
    for param in parameters:
        # The length of the parameter
        num_param = param.numel()
        # Slice the vector, reshape it
        param_new = vec[pointer : pointer + num_param].view_as(param).data
        params_new.append(param_new)
        # Increment the pointer
        pointer += num_param

    return params_new 
开发者ID:f-dangel,项目名称:backpack,代码行数:47,代码来源:convert_parameters.py

示例12: trpo_update

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def trpo_update(replay, policy, baseline):
    gamma = 0.99
    tau = 0.95
    max_kl = 0.01
    ls_max_steps = 15
    backtrack_factor = 0.5
    old_policy = deepcopy(policy)
    for step in range(10):
        states = replay.state()
        actions = replay.action()
        rewards = replay.reward()
        dones = replay.done()
        next_states = replay.next_state()
        returns = ch.td.discount(gamma, rewards, dones)
        baseline.fit(states, returns)
        values = baseline(states)
        next_values = baseline(next_states)

        # Compute KL
        with th.no_grad():
            old_density = old_policy.density(states)
        new_density = policy.density(states)
        kl = kl_divergence(old_density, new_density).mean()

        # Compute surrogate loss
        old_log_probs = old_density.log_prob(actions).mean(dim=1, keepdim=True)
        new_log_probs = new_density.log_prob(actions).mean(dim=1, keepdim=True)
        bootstraps = values * (1.0 - dones) + next_values * dones
        advantages = ch.pg.generalized_advantage(gamma, tau, rewards,
                                                 dones, bootstraps, th.zeros(1))
        advantages = ch.normalize(advantages).detach()
        surr_loss = trpo.policy_loss(new_log_probs, old_log_probs, advantages)

        # Compute the update
        grad = autograd.grad(surr_loss,
                             policy.parameters(),
                             retain_graph=True)
        Fvp = trpo.hessian_vector_product(kl, policy.parameters())
        grad = parameters_to_vector(grad).detach()
        step = trpo.conjugate_gradient(Fvp, grad)
        lagrange_mult = 0.5 * th.dot(step, Fvp(step)) / max_kl
        step = step / lagrange_mult
        step_ = [th.zeros_like(p.data) for p in policy.parameters()]
        vector_to_parameters(step, step_)
        step = step_

        #  Line-search
        for ls_step in range(ls_max_steps):
            stepsize = backtrack_factor**ls_step
            clone = deepcopy(policy)
            for c, u in zip(clone.parameters(), step):
                c.data.add_(-stepsize, u.data)
            new_density = clone.density(states)
            new_kl = kl_divergence(old_density, new_density).mean()
            new_log_probs = new_density.log_prob(actions).mean(dim=1, keepdim=True)
            new_loss = trpo.policy_loss(new_log_probs, old_log_probs, advantages)
            if new_loss < surr_loss and new_kl < max_kl:
                for p, c in zip(policy.parameters(), clone.parameters()):
                    p.data[:] = c.data[:]
                break 
开发者ID:learnables,项目名称:cherry,代码行数:62,代码来源:trpo_v_random.py

示例13: conjugate_gradient

# 需要导入模块: from torch.nn import utils [as 别名]
# 或者: from torch.nn.utils import parameters_to_vector [as 别名]
def conjugate_gradient(Ax, b, num_iterations=10, tol=1e-10, eps=1e-8):
    """
    [[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/algorithms/trpo.py)

    **Description**

    Computes \\(x = A^{-1}b\\) using the conjugate gradient algorithm.

    **Credit**

    Adapted from Kai Arulkumaran's implementation, with additions inspired from John Schulman's implementation.

    **References**

    1. Nocedal and Wright. 2006. "Numerical Optimization, 2nd edition". Springer.
    2. Shewchuk et al. 1994. “An Introduction to the Conjugate Gradient Method without the Agonizing Pain.” CMU.

    **Arguments**

    * **Ax** (callable) - Given a vector x, computes A@x.
    * **b** (tensor or list) - The reference vector.
    * **num_iterations** (int, *optional*, default=10) - Number of conjugate gradient iterations.
    * **tol** (float, *optional*, default=1e-10) - Tolerance for proposed solution.
    * **eps** (float, *optional*, default=1e-8) - Numerical stability constant.

    **Returns**

    * **x** (tensor or list) - The solution to Ax = b, as a list if b is a list else a tensor.

    **Example**

    ~~~python
    pass
    ~~~
    """
    shape = None
    if not isinstance(b, th.Tensor):
        shape = [th.zeros_like(b_i) for b_i in b]
        b = parameters_to_vector(b)
    x = th.zeros_like(b)
    r = b
    p = r
    r_dot_old = th.dot(r, r)
    for _ in range(num_iterations):
        Ap = Ax(p)
        alpha = r_dot_old / (th.dot(p, Ap) + eps)
        x += alpha * p
        r -= alpha * Ap
        r_dot_new = th.dot(r, r)
        p = r + (r_dot_new / r_dot_old) * p
        r_dot_old = r_dot_new
        if r_dot_new.item() < tol:
            break
    if shape is not None:
        vector_to_parameters(x, shape)
        x = shape
    return x 
开发者ID:learnables,项目名称:cherry,代码行数:59,代码来源:trpo.py


注:本文中的torch.nn.utils.parameters_to_vector方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。