当前位置: 首页>>代码示例>>Python>>正文


Python torch.dot方法代码示例

本文整理汇总了Python中torch.dot方法的典型用法代码示例。如果您正苦于以下问题:Python torch.dot方法的具体用法?Python torch.dot怎么用?Python torch.dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def test(self, dataset):
        self.model.eval()
        with torch.no_grad():
            total_loss = 0.0
            predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
            indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
            for idx in tqdm(range(len(dataset)), desc='Testing epoch  ' + str(self.epoch) + ''):
                ltree, linput, rtree, rinput, label = dataset[idx]
                target = utils.map_label_to_target(label, dataset.num_classes)
                linput, rinput = linput.to(self.device), rinput.to(self.device)
                target = target.to(self.device)
                output = self.model(ltree, linput, rtree, rinput)
                loss = self.criterion(output, target)
                total_loss += loss.item()
                output = output.squeeze().to('cpu')
                predictions[idx] = torch.dot(indices, torch.exp(output))
        return total_loss / len(dataset), predictions 
开发者ID:dasguptar,项目名称:treelstm.pytorch,代码行数:19,代码来源:trainer.py

示例2: lovasz_hinge_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.relu(errors_sorted), Variable(grad))
    return loss 
开发者ID:edwardzhou130,项目名称:PolarSeg,代码行数:20,代码来源:lovasz_losses.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def forward(self, inputs, targets):

        N, C, H, W = inputs.size()
        masks = torch.zeros(N, C, H, W).to(targets.device).scatter_(1, targets.view(N, 1, H, W), 1)

        loss = 0.

        for mask, input in zip(masks.view(N, -1), inputs.view(N, -1)):

            max_margin_errors = 1. - ((mask * 2 - 1) * input)
            errors_sorted, indices = torch.sort(max_margin_errors, descending=True)
            labels_sorted = mask[indices.data]

            inter = labels_sorted.sum() - labels_sorted.cumsum(0)
            union = labels_sorted.sum() + (1. - labels_sorted).cumsum(0)
            iou = 1. - inter / union

            p = len(labels_sorted)
            if p > 1:
                iou[1:p] = iou[1:p] - iou[0:-1]

            loss += torch.dot(nn.functional.relu(errors_sorted), iou)

        return loss / N 
开发者ID:mapbox,项目名称:robosat,代码行数:26,代码来源:losses.py

示例4: compute_global_norm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def compute_global_norm(curr_state, prev_state, d_loss):
    """Compute the norm of the line segment between current parameters and previous parameters.

    Arguments:
        curr_state (OrderedDict): the state dict at current iteration.
        prev_state (OrderedDict): the state dict at previous iteration.
        d_loss (torch.Tensor, float): the loss delta between current at previous iteration (optional).
    """
    norm = d_loss * d_loss if d_loss is not None else 0

    for name, curr_param in curr_state.items():
        if not curr_param.requires_grad:
            continue

        curr_param = curr_param.detach()
        prev_param = prev_state[name].detach()
        param_delta = curr_param.data.view(-1) - prev_param.data.view(-1)
        norm += torch.dot(param_delta, param_delta)
    norm = norm.sqrt()
    return norm 
开发者ID:amzn,项目名称:metalearn-leap,代码行数:22,代码来源:utils.py

示例5: lovasz_hinge_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_hinge_flat(self, logits, labels):
        """
        Binary Lovasz hinge loss
          logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
          labels: [P] Tensor, binary ground truth labels (0 or 1)
          ignore: label to ignore
        """
        if len(labels) == 0:
            # only void pixels, the gradients should be 0
            return logits.sum() * 0.
        signs = 2. * labels.float() - 1.
        errors = (1. - logits * Variable(signs))
        errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
        perm = perm.data
        gt_sorted = labels[perm]
        grad = lovasz_grad(gt_sorted)
        loss = torch.dot(F.relu(errors_sorted), Variable(grad))
        return loss 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:20,代码来源:lovasz_hinge_loss.py

示例6: lovasz_hinge_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * signs)

    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted), grad)
    return loss 
开发者ID:neptune-ai,项目名称:open-solution-salt-identification,代码行数:21,代码来源:lovasz_losses.py

示例7: lovasz_softmax_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_softmax_flat(probas, labels, only_present=False):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      only_present: average only on classes present in ground truth
    """
    C = probas.size(1)
    losses = []
    for c in range(C):
        fg = (labels == c).float()  # foreground for class c
        if only_present and fg.sum() == 0:
            continue

        errors = (fg - probas[:, c]).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
    return mean(losses) 
开发者ID:neptune-ai,项目名称:open-solution-salt-identification,代码行数:22,代码来源:lovasz_losses.py

示例8: lovasz_softmax_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_softmax_flat(probas, labels, only_present=False):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      only_present: average only on classes present in ground truth
    """
    C = probas.size(1)
    losses = []
    for c in range(C):
        fg = (labels == c).float() # foreground for class c
        if only_present and fg.sum() == 0:
            continue

        errors = (fg - probas[:, c]).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
    return mean(losses) 
开发者ID:neptune-ai,项目名称:open-solution-salt-identification,代码行数:22,代码来源:lovash_losses.py

示例9: lovasz_hinge_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
    return loss 
开发者ID:lRomul,项目名称:argus-tgs-salt,代码行数:20,代码来源:lovasz.py

示例10: lovasz_softmax_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_softmax_flat(probas, labels, only_present=False):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      only_present: average only on classes present in ground truth
    """
    C = probas.size(1)
    losses = []
    for c in range(C):
        fg = (labels == c).float() # foreground for class c
        if only_present and fg.sum() == 0:
            continue
        errors = (Variable(fg) - probas[:, c]).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
    return mean(losses) 
开发者ID:lRomul,项目名称:argus-tgs-salt,代码行数:21,代码来源:lovasz.py

示例11: compute_weight

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def compute_weight(self, module):
        weight = getattr(module, self.name + '_org')
        u = getattr(module, self.name + '_u')
        height = weight.size(0)
        weight_mat = weight.view(height, -1)
        with torch.no_grad():
            for _ in range(self.n_power_iterations):
                # Spectral norm of weight equals to `u^T W v`, where `u` and `v`
                # are the first left and right singular vectors.
                # This power iteration produces approximations of `u` and `v`.
                v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
                u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)

            sigma = torch.dot(u, torch.matmul(weight_mat, v))
        weight = weight / sigma
        return weight, u 
开发者ID:Lotayou,项目名称:everybody_dance_now_pytorch,代码行数:18,代码来源:spectral_norm.py

示例12: get_barycentric_coords

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def get_barycentric_coords(point, verts):
        if len(verts) == 2:
            diff = verts[1] - verts[0]
            diff_norm = torch.norm(diff)
            normalized_diff = diff / diff_norm
            u = torch.dot(verts[1] - point, normalized_diff) / diff_norm
            v = torch.dot(point - verts[0], normalized_diff) / diff_norm
            return u, v
        elif len(verts) == 3:
            # TODO Area method instead of LinAlg
            M = torch.cat([
                torch.cat([verts[0], verts[0].new_ones(1)]).unsqueeze(1),
                torch.cat([verts[1], verts[1].new_ones(1)]).unsqueeze(1),
                torch.cat([verts[2], verts[2].new_ones(1)]).unsqueeze(1),
            ], dim=1)
            invM = torch.inverse(M)
            uvw = torch.matmul(invM, torch.cat([point, point.new_ones(1)]).unsqueeze(1))
            return uvw
        else:
            raise ValueError('Barycentric coords only works for 2 or 3 points') 
开发者ID:locuslab,项目名称:lcp-physics,代码行数:22,代码来源:contacts.py

示例13: lovasz_loss_flat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def lovasz_loss_flat(logits, labels, error_func):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.

    errors = error_func(logits, labels)

    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    #loss = torch.dot(F.relu(errors_sorted), Variable(grad))
    loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
    return loss 
开发者ID:tugstugi,项目名称:pytorch-saltnet,代码行数:22,代码来源:lovasz_losses.py

示例14: dice_error

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def dice_error(input, target):
    eps = 0.000001
    _, result_ = input.max(1)
    result_ = torch.squeeze(result_)
    if input.is_cuda:
        result = torch.cuda.FloatTensor(result_.size())
        target_ = torch.cuda.FloatTensor(target.size())
    else:
        result = torch.FloatTensor(result_.size())
        target_ = torch.FloatTensor(target.size())
    result.copy_(result_.data)
    target_.copy_(target.data)
    target = target_
    intersect = torch.dot(result, target)

    result_sum = torch.sum(result)
    target_sum = torch.sum(target)
    union = result_sum + target_sum + 2*eps
    intersect = np.max([eps, intersect])
    # the target volume can be empty - so we still want to
    # end up with a score of 1 if the result is 0/0
    IoU = intersect / union
#    print('union: {:.3f}\t intersect: {:.6f}\t target_sum: {:.0f} IoU: result_sum: {:.0f} IoU {:.7f}'.format(
#        union, intersect, target_sum, result_sum, 2*IoU))
    return 2*IoU 
开发者ID:pykao,项目名称:Modified-3D-UNet-Pytorch,代码行数:27,代码来源:loss.py

示例15: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import dot [as 别名]
def forward(self, input:Tensor, target:Tensor) -> Tensor:
        r'''
        Evaluate loss for given predictions

        Arguments:
            input: prediction tensor
            target: target tensor
        
        Returns:
            (weighted) loss
        '''

        input, target = input.squeeze(), target.squeeze()
        # Reweight accordign to batch size
        sig_wgt = (target*self.weight)*self.sig_wgt/torch.dot(target, self.weight)
        bkg_wgt = ((1-target)*self.weight)*self.bkg_wgt/torch.dot(1-target, self.weight)
        # Compute Signal and background weights without a hard cut
        s = torch.dot(sig_wgt*input, target)
        b = torch.dot(bkg_wgt*input, (1-target))
        return 1/self.func(s, b)  # Return inverse of significance (would negative work better?) 
开发者ID:GilesStrong,项目名称:lumin,代码行数:22,代码来源:hep_losses.py


注:本文中的torch.dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。