当前位置: 首页>>代码示例>>Python>>正文


Python torch.abs方法代码示例

本文整理汇总了Python中torch.abs方法的典型用法代码示例。如果您正苦于以下问题:Python torch.abs方法的具体用法?Python torch.abs怎么用?Python torch.abs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.abs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_uncertainty

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def _get_uncertainty(self, mask_pred, labels):
        """Estimate uncertainty based on pred logits.

        We estimate uncertainty as L1 distance between 0.0 and the logits
        prediction in 'mask_pred' for the foreground class in `classes`.

        Args:
            mask_pred (Tensor): mask predication logits, shape (num_rois,
                num_classes, mask_height, mask_width).

            labels (list[Tensor]): Either predicted or ground truth label for
                each predicted mask, of length num_rois.

        Returns:
            scores (Tensor): Uncertainty scores with the most uncertain
                locations having the highest uncertainty score,
                shape (num_rois, 1, mask_height, mask_width)
        """
        if mask_pred.shape[1] == 1:
            gt_class_logits = mask_pred.clone()
        else:
            inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
            gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
        return -torch.abs(gt_class_logits) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:26,代码来源:mask_point_head.py

示例2: smooth_l1_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def smooth_l1_loss(pred, target, beta=1.0):
    """Smooth L1 loss.

    Args:
        pred (torch.Tensor): The prediction.
        target (torch.Tensor): The learning target of the prediction.
        beta (float, optional): The threshold in the piecewise function.
            Defaults to 1.0.

    Returns:
        torch.Tensor: Calculated loss
    """
    assert beta > 0
    assert pred.size() == target.size() and target.numel() > 0
    diff = torch.abs(pred - target)
    loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
                       diff - 0.5 * beta)
    return loss 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:20,代码来源:smooth_l1_loss.py

示例3: plot_examples

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def plot_examples(data_loader, model, epoch, plotter, ind = [0, 10, 20]):

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(data_loader):
        if i in ind:
            subfolder_path = 'batch_' + str(i) + '_t_' + str(int(target[0][0])) + '/epoch_' + str(epoch) + '/'
            if not os.path.isdir(args.plotPath + subfolder_path):
                os.makedirs(args.plotPath + subfolder_path)

            num_nodes = torch.sum(torch.sum(torch.abs(h[0, :, :]), 1) > 0)
            am = g[0, 0:num_nodes, 0:num_nodes].numpy()
            pos = h[0, 0:num_nodes, :].numpy()

            plotter.plot_graph(am, position=pos, fig_name=subfolder_path+str(i) + '_input.png')

            # Prepare input data
            if args.cuda:
                g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
            g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

            # Compute output
            model(g, h, e, lambda cls, id: plotter.plot_graph(am, position=pos, cls=cls,
                                                          fig_name=subfolder_path+ id)) 
开发者ID:priba,项目名称:nmp_qc,代码行数:27,代码来源:demo_letter_duvenaud.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def forward(self, images):
        """Extract image feature vectors."""
        features = self.cnn(images)

        # normalization in the image embedding space
        features = l2norm(features)

        # linear projection to the joint embedding space
        features = self.fc(features)

        # normalization in the joint embedding space
        if not self.no_imgnorm:
            features = l2norm(features)

        # take the absolute value of the embedding (used in order embeddings)
        if self.use_abs:
            features = torch.abs(features)

        return features 
开发者ID:ExplorerFreda,项目名称:VSE-C,代码行数:21,代码来源:model.py

示例5: _smooth_l1_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def _smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):

    sigma_2 = sigma ** 2
    box_diff = bbox_pred - bbox_targets
    in_box_diff = bbox_inside_weights * box_diff
    abs_in_box_diff = torch.abs(in_box_diff)
    smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float()
    in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
                  + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
    out_loss_box = bbox_outside_weights * in_loss_box
    loss_box = out_loss_box

    s = loss_box.size(0)
    loss_box = loss_box.view(s, -1).sum(1).mean()
    # for i in sorted(dim, reverse=True):
    #   loss_box = loss_box.sum(i)
    # loss_box = loss_box.mean()
    return loss_box 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:20,代码来源:net_utils.py

示例6: mu_law_encoding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def mu_law_encoding(
        x: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Encode signal based on mu-law companding.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This algorithm assumes the signal has been scaled to between -1 and 1 and
    returns a signal encoded with values from 0 to quantization_channels - 1.

    Args:
        x (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law encoding
    """
    mu = quantization_channels - 1.0
    if not x.is_floating_point():
        x = x.to(torch.float)
    mu = torch.tensor(mu, dtype=x.dtype)
    x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
    x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
    return x_mu 
开发者ID:pytorch,项目名称:audio,代码行数:26,代码来源:functional.py

示例7: mu_law_decoding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def mu_law_decoding(
        x_mu: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Decode mu-law encoded signal.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This expects an input with values between 0 and quantization_channels - 1
    and returns a signal scaled between -1 and 1.

    Args:
        x_mu (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law decoding
    """
    mu = quantization_channels - 1.0
    if not x_mu.is_floating_point():
        x_mu = x_mu.to(torch.float)
    mu = torch.tensor(mu, dtype=x_mu.dtype)
    x = ((x_mu) / mu) * 2 - 1.0
    x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
    return x 
开发者ID:pytorch,项目名称:audio,代码行数:26,代码来源:functional.py

示例8: test_mu_law_companding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def test_mu_law_companding(self):

        quantization_channels = 256

        waveform = self.waveform.clone()
        if not waveform.is_floating_point():
            waveform = waveform.to(torch.get_default_dtype())
        waveform /= torch.abs(waveform).max()

        self.assertTrue(waveform.min() >= -1. and waveform.max() <= 1.)

        waveform_mu = transforms.MuLawEncoding(quantization_channels)(waveform)
        self.assertTrue(waveform_mu.min() >= 0. and waveform_mu.max() <= quantization_channels)

        waveform_exp = transforms.MuLawDecoding(quantization_channels)(waveform_mu)
        self.assertTrue(waveform_exp.min() >= -1. and waveform_exp.max() <= 1.) 
开发者ID:pytorch,项目名称:audio,代码行数:18,代码来源:test_transforms.py

示例9: huber_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def huber_loss(error, delta=1.0):
    """
    Args:
        error: Torch tensor (d1,d2,...,dk)
    Returns:
        loss: Torch tensor (d1,d2,...,dk)

    x = error = pred - gt or dist(pred,gt)
    0.5 * |x|^2                 if |x|<=d
    0.5 * d^2 + d * (|x|-d)     if |x|>d
    Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
    """
    abs_error = torch.abs(error)
    #quadratic = torch.min(abs_error, torch.FloatTensor([delta]))
    quadratic = torch.clamp(abs_error, max=delta)
    linear = (abs_error - quadratic)
    loss = 0.5 * quadratic**2 + delta * linear
    return loss 
开发者ID:zaiweizhang,项目名称:H3DNet,代码行数:20,代码来源:nn_distance.py

示例10: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def __init__(self,in_channel):
        super(InvConv,self).__init__()

        weight=np.random.randn(in_channel,in_channel)
        q,_=linalg.qr(weight)
        w_p,w_l,w_u=linalg.lu(q.astype(np.float32))
        w_s=np.diag(w_u)
        w_u=np.triu(w_u,1)
        u_mask=np.triu(np.ones_like(w_u),1)
        l_mask=u_mask.T

        self.register_buffer('w_p',torch.from_numpy(w_p))
        self.register_buffer('u_mask',torch.from_numpy(u_mask))
        self.register_buffer('l_mask',torch.from_numpy(l_mask))
        self.register_buffer('l_eye',torch.eye(l_mask.shape[0]))
        self.register_buffer('s_sign',torch.sign(torch.from_numpy(w_s)))
        self.w_l=torch.nn.Parameter(torch.from_numpy(w_l))
        self.w_s=torch.nn.Parameter(torch.log(1e-7+torch.abs(torch.from_numpy(w_s))))
        self.w_u=torch.nn.Parameter(torch.from_numpy(w_u))

        self.weight=None
        self.invweight=None

        return 
开发者ID:joansj,项目名称:blow,代码行数:26,代码来源:blow.py

示例11: tforward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def tforward(self, disp, edge=None):
    self.sobel=self.sobel.to(disp.device)

    if edge is not None:
      grad = self.sobel(disp)
      grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
      pdf = (1-edge)/self.b0 * torch.exp(-torch.abs(grad)/self.b0) + \
            edge/self.b1 * torch.exp(-torch.abs(grad)/self.b1)
      val = torch.mean(-torch.log(pdf.clamp(min=1e-4)))
    else:
      # on qifeng's data we don't have ambient info
      # therefore we supress edge everywhere
      grad = self.sobel(disp)
      grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
      grad= torch.clamp(grad, 0, 1.0)
      val = torch.mean(grad)

    return val 
开发者ID:autonomousvision,项目名称:connecting_the_dots,代码行数:20,代码来源:networks.py

示例12: fwd

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def fwd(self, depth0, depth1, R0, t0, R1, t1):
    uv1, d1 = super().tforward(depth0, R0, t0, R1, t1)

    uv1[..., 0] = 2 * (uv1[..., 0] / (self.im_width-1) - 0.5)
    uv1[..., 1] = 2 * (uv1[..., 1] / (self.im_height-1) - 0.5)
    uv1 = uv1.view(-1, self.im_height, self.im_width, 2).clone()

    depth10 = torch.nn.functional.grid_sample(depth1, uv1, padding_mode='border')

    diff = torch.abs(d1.view(-1) - depth10.view(-1))

    if self.clamp > 0:
      diff = torch.clamp(diff, 0, self.clamp)

    # return diff without clamping for debugging
    return diff.mean() 
开发者ID:autonomousvision,项目名称:connecting_the_dots,代码行数:18,代码来源:networks.py

示例13: smooth_l1_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
    """
    very similar to the smooth_l1_loss from pytorch, but with
    the extra beta parameter

    Modified according to detectron2's fvcore,
    refer to https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
    """
    if beta < 1e-5:
        # if beta == 0, then torch.where will result in nan gradients when
        # the chain rule is applied due to pytorch implementation details
        # (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
        # zeros, rather than "no gradient"). To avoid this issue, we define
        # small values of beta to be exactly l1 loss.
        loss = torch.abs(input - target)
    else:
        n = torch.abs(input - target)
        cond = n < beta
        loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)

    if size_average:
        return loss.mean()
    return loss.sum() 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:25,代码来源:smooth_l1_loss.py

示例14: smooth_l1_loss_LW

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def smooth_l1_loss_LW(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, beta=1.0):
    """
    SmoothL1(x) = 0.5 * x^2 / beta      if |x| < beta
                  |x| - 0.5 * beta      otherwise.
    1 / N * sum_i alpha_out[i] * SmoothL1(alpha_in[i] * (y_hat[i] - y[i])).
    N is the number of batch elements in the input predictions
    """
    box_diff = bbox_pred - bbox_targets
    in_box_diff = bbox_inside_weights * box_diff
    abs_in_box_diff = torch.abs(in_box_diff)
    smoothL1_sign = (abs_in_box_diff < beta).detach().float()
    in_loss_box = smoothL1_sign * 0.5 * torch.pow(in_box_diff, 2) / beta + \
                  (1 - smoothL1_sign) * (abs_in_box_diff - (0.5 * beta))
    out_loss_box = bbox_outside_weights * in_loss_box
    loss_box = out_loss_box
    N = loss_box.size(0)  # batch size
    loss_box = loss_box.view(-1).sum(0) / N
    return loss_box 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:20,代码来源:smooth_l1_loss.py

示例15: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import abs [as 别名]
def forward(self, inputs, target, size_average=True):

        n = torch.abs(inputs -target)
        with torch.no_grad():
            if torch.isnan(n.var(dim=0)).sum().item() == 0:
                self.running_mean = self.running_mean.to(n.device)
                self.running_mean *= (1 - self.momentum)
                self.running_mean += (self.momentum * n.mean(dim=0))
                self.running_var = self.running_var.to(n.device)
                self.running_var *= (1 - self.momentum)
                self.running_var += (self.momentum * n.var(dim=0))


        beta = (self.running_mean - self.running_var)
        beta = beta.clamp(max=self.beta, min=1e-3)

        beta = beta.view(-1, self.num_features).to(n.device)
        cond = n < beta.expand_as(n)
        loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
        if size_average:
            return loss.mean()
        return loss.sum() 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:24,代码来源:adjust_smooth_l1_loss.py


注:本文中的torch.abs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。