当前位置: 首页>>代码示例>>Python>>正文


Python torch.numel方法代码示例

本文整理汇总了Python中torch.numel方法的典型用法代码示例。如果您正苦于以下问题:Python torch.numel方法的具体用法?Python torch.numel怎么用?Python torch.numel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.numel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad_sparsity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def grad_sparsity(self):
    global_state = self._global_state
    if self._iter == 0:
      global_state["sparsity_avg"] = 0.0
    non_zero_cnt = 0.0
    all_entry_cnt = 0.0
    for group in self._optimizer.param_groups:
      for p in group['params']:
        if p.grad is None:
          continue
        grad = p.grad.data
        grad_non_zero = grad.nonzero()
        if grad_non_zero.dim() > 0:
          non_zero_cnt += grad_non_zero.size()[0]
        all_entry_cnt += torch.numel(grad)
    beta = self._beta
    global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
      + (1 - beta) * non_zero_cnt / float(all_entry_cnt)
    self._sparsity_avg = \
      global_state["sparsity_avg"] / self.zero_debias_factor()
    
    if DEBUG:
      logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)

    return 
开发者ID:JianGoForIt,项目名称:YellowFin_Pytorch,代码行数:27,代码来源:yellowfin_backup.py

示例2: grad_sparsity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def grad_sparsity(self):
    global_state = self._global_state
    if self._iter == 0:
      global_state["sparsity_avg"] = 0.0
    non_zero_cnt = 0.0
    all_entry_cnt = 0.0
    for group in self._optimizer.param_groups:
      for p in group['params']:
        if p.grad is None:
          continue
        grad = p.grad.data
        grad_non_zero = grad.nonzero()
        if grad_non_zero.dim() > 0:
          non_zero_cnt += grad_non_zero.size()[0]
        all_entry_cnt += torch.numel(grad)
    beta = self._beta
    global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
      + (1 - beta) * non_zero_cnt / float(all_entry_cnt)
    self._sparsity_avg = \
      global_state["sparsity_avg"] / self.zero_debias_factor()
    
    if self._verbose:
      logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)

    return 
开发者ID:JianGoForIt,项目名称:YellowFin_Pytorch,代码行数:27,代码来源:yellowfin.py

示例3: grad_sparsity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def grad_sparsity(self):
        global_state = self._global_state
        if self._iter == 0:
            global_state["sparsity_avg"] = 0.0
        non_zero_cnt = 0.0
        all_entry_cnt = 0.0
        for group in self._optimizer.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                grad_non_zero = grad.nonzero()
                if grad_non_zero.dim() > 0:
                    non_zero_cnt += grad_non_zero.size()[0]
                all_entry_cnt += torch.numel(grad)
        beta = self._beta
        global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
                                       + (1 - beta) * non_zero_cnt / float(all_entry_cnt)
        self._sparsity_avg = \
            global_state["sparsity_avg"] / self.zero_debias_factor()

        if self._verbose:
            logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)

        return 
开发者ID:ansleliu,项目名称:LightNet,代码行数:27,代码来源:yellowfin.py

示例4: density

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def density(tensor):
    """Computes the density of a tensor.

    Density is the fraction of non-zero elements in a tensor.
    If a tensor has a density of 1.0, then it has no zero elements.

    Args:
        tensor: the tensor for which we compute the density.

    Returns:
        density (float)
    """
    nonzero = torch.nonzero(tensor)
    if nonzero.dim() == 0:
        return 0.0
    return nonzero.size(0) / float(torch.numel(tensor)) 
开发者ID:cornell-zhang,项目名称:dnn-quant-ocs,代码行数:18,代码来源:utils.py

示例5: log_weights_sparsity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def log_weights_sparsity(self, model, epoch):
        params_size = 0
        sparse_params_size = 0

        for name, param in model.state_dict().items():
            if param.dim() in [2, 4]:
                _density = density(param)
                params_size += torch.numel(param)
                sparse_params_size += param.numel() * _density
                self.tblogger.scalar_summary('sparsity/weights/' + name,
                                             sparsity(param)*100, epoch)
                self.tblogger.scalar_summary('sparsity-2D/weights/' + name,
                                             sparsity_2D(param)*100, epoch)

        self.tblogger.scalar_summary("sprasity/weights/total", 100*(1 - sparse_params_size/params_size), epoch)
        self.tblogger.sync_to_file() 
开发者ID:cornell-zhang,项目名称:dnn-quant-ocs,代码行数:18,代码来源:logger.py

示例6: _calc_apoz

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def _calc_apoz(self, activations):
        """
        Calculate APoZ(average percentage of zeros) of activations.

        Parameters
        ----------
        activations : list
            Layer's output activations

        Returns
        -------
        torch.Tensor
            Filter's APoZ(average percentage of zeros) of the activations
        """
        activations = torch.cat(activations, 0)
        _eq_zero = torch.eq(activations, torch.zeros_like(activations))
        _apoz = torch.sum(_eq_zero, dim=(0, 2, 3)) / torch.numel(_eq_zero[:, 0, :, :])
        return _apoz 
开发者ID:microsoft,项目名称:nni,代码行数:20,代码来源:structured_pruning.py

示例7: grad_sparsity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def grad_sparsity(self):
    global_state = self._global_state
    if self._iter == 0:
      global_state["sparsity_avg"] = 0.0
    non_zero_cnt = 0.0
    all_entry_cnt = 0.0
    for group in self._optimizer.param_groups:
      for p in group['params']:
        if p.grad is None:
          continue
        grad = p.grad.data
        grad_non_zero = grad.nonzero()
        if grad_non_zero.dim() > 0:
          non_zero_cnt += grad_non_zero.size()[0]
        all_entry_cnt += torch.numel(grad)
    beta = self._beta
    global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
      + (1 - beta) * non_zero_cnt / float(all_entry_cnt)
    self._sparsity_avg = \
      global_state["sparsity_avg"] / self.zero_debias_factor()
    return 
开发者ID:eric-xw,项目名称:AREL,代码行数:23,代码来源:yellowfin.py

示例8: to_float

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def to_float(val):
    """ Check that val is one of the following:
    - pytorch autograd Variable with one element
    - pytorch tensor with one element
    - numpy array with one element
    - any type supporting float() operation
    And convert val to float
    """

    n_elements = 1
    if isinstance(val, np.ndarray):
        n_elements = val.size
    elif torch is not None and (isinstance(val, torch_autograd.Variable) or torch.is_tensor(val)):
        n_elements = torch.numel(val)

    assert n_elements == 1, \
        "val should have one element (got {})".format(n_elements)
    try:
        return float(val)
    except:
        raise TypeError("Unsupported type for val ({})".format(type(val))) 
开发者ID:oval-group,项目名称:mlogger,代码行数:23,代码来源:to_float.py

示例9: _perturb_func

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def _perturb_func(inputs):
    def perturb_ratio(input):
        return (
            torch.arange(-torch.numel(input[0]) // 2, torch.numel(input[0]) // 2)
            .view(input[0].shape)
            .float()
            / 100
        )

    if isinstance(inputs, tuple):
        input1 = inputs[0]
        input2 = inputs[1]
    else:
        input1 = inputs
        input2 = None

    perturbed_input1 = input1 + perturb_ratio(input1)

    if input2 is None:
        return perturbed_input1

    return perturbed_input1, input2 + perturb_ratio(input2) 
开发者ID:pytorch,项目名称:captum,代码行数:24,代码来源:test_sensitivity.py

示例10: construct_feature_mask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def construct_feature_mask(
        self, inputs: Tuple[Tensor, ...]
    ) -> Tuple[Tuple[Tensor, ...], int]:
        feature_mask = []
        current_num_features = 0
        for i in range(len(inputs)):
            num_features = torch.numel(inputs[i][0])
            feature_mask.append(
                current_num_features
                + torch.reshape(
                    torch.arange(num_features, device=inputs[i].device),
                    inputs[i][0:1].shape,
                )
            )
            current_num_features += num_features
        total_features = current_num_features
        feature_mask = tuple(feature_mask)
        return feature_mask, total_features 
开发者ID:pytorch,项目名称:captum,代码行数:20,代码来源:shapley_value.py

示例11: l2_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def l2_loss(pred_traj, pred_traj_gt, loss_mask, random=0, mode='average'):
    """
    Input:
    - pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
    - pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
    predictions.
    - loss_mask: Tensor of shape (batch, seq_len)
    - mode: Can be one of sum, average, raw
    Output:
    - loss: l2 loss depending on mode
    """
    seq_len, batch, _ = pred_traj.size()
    loss = (loss_mask.unsqueeze(dim=2) *
            (pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2)
    if mode == 'sum':
        return torch.sum(loss)
    elif mode == 'average':
        return torch.sum(loss) / torch.numel(loss_mask.data)
    elif mode == 'raw':
        return loss.sum(dim=2).sum(dim=1) 
开发者ID:agrimgupta92,项目名称:sgan,代码行数:22,代码来源:losses.py

示例12: var_loss_function_joint

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def var_loss_function_joint(output_samples_classification, target, output_samples_recon, inp, mu, std, device):
    recon_loss = nn.BCEWithLogitsLoss(reduction='sum')
    class_loss = nn.CrossEntropyLoss(reduction='sum')

    # Place-holders for the final loss values over all latent space samples
    recon_losses = torch.zeros(output_samples_recon.size(0)).to(device)
    cl_losses = torch.zeros(output_samples_classification.size(0)).to(device)

    # numerical value for stability of log computation
    eps = 1e-8

    # loop through each sample for each input and calculate the correspond loss. Normalize the losses.
    for i in range(output_samples_classification.size(0)):
        cl_losses[i] = class_loss(output_samples_classification[i], target) / torch.numel(target)
        recon_losses[i] = recon_loss(output_samples_recon[i], inp) / torch.numel(inp)

    # average the loss over all samples per input
    cl = torch.mean(cl_losses, dim=0)
    rl = torch.mean(recon_losses, dim=0)

    # Compute the KL divergence, normalized by latent dimensionality
    kld = -0.5 * torch.sum(1 + torch.log(eps + std ** 2) - (mu ** 2) - (std ** 2)) / torch.numel(mu)

    return cl, rl, kld 
开发者ID:MrtnMndt,项目名称:Deep_Openset_Recognition_through_Uncertainty,代码行数:26,代码来源:loss_functions.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def forward(self, x):
         bsn = 1
         batchSize, dim, h, w = x.data.shape
         x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, dim)  # batchsize,h, w, dim,
         y = torch.ones(batchSize, self.output_dim, device=x.device)

         for img in range(batchSize // bsn):
             segLen = bsn * h * w
             upper = batchSize * h * w
             interLarge = torch.arange(img * segLen, min(upper, (img + 1) * segLen), dtype=torch.long)
             interSmall = torch.arange(img * bsn, min(upper, (img + 1) * bsn), dtype=torch.long)
             batch_x = x_flat[interLarge, :]

             sketch1 = batch_x.mm(self.sparseM[0].to(x.device)).unsqueeze(2)
             sketch1 = torch.fft(torch.cat((sketch1, torch.zeros(sketch1.size(), device=x.device)), dim=2), 1)

             sketch2 = batch_x.mm(self.sparseM[1].to(x.device)).unsqueeze(2)
             sketch2 = torch.fft(torch.cat((sketch2, torch.zeros(sketch2.size(), device=x.device)), dim=2), 1)

             Re = sketch1[:, :, 0].mul(sketch2[:, :, 0]) - sketch1[:, :, 1].mul(sketch2[:, :, 1])
             Im = sketch1[:, :, 0].mul(sketch2[:, :, 1]) + sketch1[:, :, 1].mul(sketch2[:, :, 0])

             tmp_y = torch.ifft(torch.cat((Re.unsqueeze(2), Im.unsqueeze(2)), dim=2), 1)[:, :, 0]

             y[interSmall, :] = tmp_y.view(torch.numel(interSmall), h, w, self.output_dim).sum(dim=1).sum(dim=1)

         y = self._signed_sqrt(y)
         y = self._l2norm(y)
         return y 
开发者ID:jiangtaoxie,项目名称:fast-MPN-COV,代码行数:31,代码来源:CBP.py

示例14: get_num_parameters

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def get_num_parameters(model):
    """
    Returns the number of trainable parameters in a model of type nn.Module
    :param model: nn.Module containing trainable parameters
    :return: number of trainable parameters in model
    """
    num_parameters = 0
    for parameter in model.parameters():
        num_parameters += torch.numel(parameter)
    return num_parameters 
开发者ID:bayesiains,项目名称:nsf,代码行数:12,代码来源:torchutils.py

示例15: loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import numel [as 别名]
def loss(self, proposal_classes: Tensor,gt_proposal_classes: Tensor, batch_size,batch_indices) -> Tuple[Tensor, Tensor]:
            # assert np.any(np.isnan(np.array(proposal_classes)))==False
            # assert np.any(np.isnan(np.array(gt_proposal_classes))) == False
            cross_entropies = torch.zeros(batch_size, dtype=torch.float, device=proposal_classes.device).cuda()
            #batch_indices=torch.tensor(batch_indices,dtype=torch.float)
            for batch_index in range(batch_size):
                selected_indices = (batch_indices == batch_index).nonzero().view(-1)
                input=proposal_classes[selected_indices]
                target=gt_proposal_classes[selected_indices]
                if torch.numel(input)==0 or torch.numel(target)==0:
                    #print("Warning:None DATA:",batch_index)
                    continue
                assert torch.numel(input)==torch.numel(target)
                # print('input:',input)
                # print("input_sigmoid:", F.sigmoid(input))
                # print('target:',target)


                cross_entropy =F.multilabel_soft_margin_loss(input=proposal_classes[selected_indices],target=gt_proposal_classes[selected_indices],reduction="mean")

                # cross_entropy = F.binary_cross_entropy(input=F.sigmoid(proposal_classes[selected_indices]),
                #                                                target=gt_proposal_classes[selected_indices])
                torch.nn.MultiLabelSoftMarginLoss
                # print('cross_entropy:',cross_entropy)
                # print('cross_entropy:',cross_entropy)
                # cross_entropy = F.cross_entropy(input=proposal_classes[selected_indices],
                #                                 target=gt_proposal_classes[selected_indices])

                cross_entropies[batch_index] = cross_entropy
            return cross_entropies 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:32,代码来源:model.py


注:本文中的torch.numel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。