当前位置: 首页>>代码示例>>Python>>正文


Python functional.soft_margin_loss方法代码示例

本文整理汇总了Python中torch.nn.functional.soft_margin_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.soft_margin_loss方法的具体用法?Python functional.soft_margin_loss怎么用?Python functional.soft_margin_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.soft_margin_loss方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: select_mask_logistic_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    if len(p_m.shape) == 4:
        p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
        p_m = torch.index_select(p_m, 0, pos)
        p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
        p_m = p_m.view(-1, g_sz * g_sz)
    else:
        p_m = torch.index_select(p_m, 0, pos)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=0, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
开发者ID:foolwood,项目名称:SiamMask,代码行数:22,代码来源:siammask_sharp.py

示例2: select_mask_logistic_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
    p_m = torch.index_select(p_m, 0, pos)
    p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
    p_m = p_m.view(-1, g_sz * g_sz)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=32, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
开发者ID:foolwood,项目名称:SiamMask,代码行数:19,代码来源:siammask.py

示例3: loss_per_level

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def loss_per_level(self, estDisp, gtDisp, label):
        N, C, H, W = estDisp.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # (start disparity, max disparity / scale)
        # Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        if mask.sum() < 1.0:
            print('Relative loss: there is no point\'s disparity is in ({},{})!'.format(self.start_disp,
                                                                                        self.max_disp / scale))
            loss = (torch.abs(estDisp - scaled_gtDisp) * mask.float()).mean()
            return loss

        # relative loss
        valid_pixel_number = mask.float().sum()
        diff = scaled_gtDisp[mask] - estDisp[mask]
        label = label[mask]
        # some value which is over large for torch.exp() is not suitable for soft margin loss
        # get absolute value great than 66
        over_large_mask = torch.gt(torch.abs(diff), 66)
        over_large_diff = diff[over_large_mask]
        # get absolute value smaller than 66
        proper_mask = torch.le(torch.abs(diff), 66)
        proper_diff = diff[proper_mask]
        # generate lable for soft margin loss
        label = label[proper_mask]
        loss = F.soft_margin_loss(proper_diff, label, reduction='sum') + torch.abs(over_large_diff).sum()
        loss = loss / valid_pixel_number

        return loss 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:39,代码来源:relative_loss.py

示例4: __call__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def __call__(self, _, global_features, targets):
        if self._normalize_feature:
            global_features = normalize(global_features, axis=-1)

        dist_mat = euclidean_dist(global_features, global_features)

        N = dist_mat.size(0)
        is_pos = targets.expand(N, N).eq(targets.expand(N, N).t())
        is_neg = targets.expand(N, N).ne(targets.expand(N, N).t())

        if self._hard_mining:
            dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)
        else:
            dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)

        y = dist_an.new().resize_as_(dist_an).fill_(1)

        if self._margin > 0:
            loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self._margin)
        else:
            loss = F.soft_margin_loss(dist_an - dist_ap, y)
            if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)

        return {
            "loss_triplet": loss * self._scale,
        } 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:28,代码来源:metric_loss.py

示例5: soft_margin

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def soft_margin(y_pred, y_true):
    return F.soft_margin_loss(y_pred, y_true) 
开发者ID:GRAAL-Research,项目名称:poutyne,代码行数:4,代码来源:batch_metrics.py

示例6: test_soft_margin_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import soft_margin_loss [as 别名]
def test_soft_margin_loss(self):
        inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
        target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=False)
        output = F.soft_margin_loss(inp, target, size_average=None, reduce=None, reduction='mean') 
开发者ID:NVIDIA,项目名称:apex,代码行数:6,代码来源:test_pyprof_nvtx.py


注:本文中的torch.nn.functional.soft_margin_loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。