当前位置: 首页>>代码示例>>Python>>正文


Python functional.margin_ranking_loss方法代码示例

本文整理汇总了Python中torch.nn.functional.margin_ranking_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.margin_ranking_loss方法的具体用法?Python functional.margin_ranking_loss怎么用?Python functional.margin_ranking_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.margin_ranking_loss方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor):
        """
        Calculate rank hinge loss.

        :param y_pred: Predicted result.
        :param y_true: Label.
        :return: Hinge loss computed by user-defined margin.
        """
        y_pos = y_pred[::(self.num_neg + 1), :]
        y_neg = []
        for neg_idx in range(self.num_neg):
            neg = y_pred[(neg_idx + 1)::(self.num_neg + 1), :]
            y_neg.append(neg)
        y_neg = torch.cat(y_neg, dim=-1)
        y_neg = torch.mean(y_neg, dim=-1, keepdim=True)
        y_true = torch.ones_like(y_pos)
        return F.margin_ranking_loss(
            y_pos, y_neg, y_true,
            margin=self.margin,
            reduction=self.reduction
        ) 
开发者ID:NTMC-Community,项目名称:MatchZoo-py,代码行数:23,代码来源:rank_hinge_loss.py

示例2: ucir_ranking

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def ucir_ranking(logits, targets, n_classes, task_size, nb_negatives=2, margin=0.2):
    return github_ucir_ranking_mr(logits, targets, n_classes, task_size, nb_negatives, margin)
    # Ranking loss maximizing the inter-class separation between old & new:

    # 1. Fetching from the batch only samples from the batch that belongs
    #    to old classes:
    old_indexes = targets.lt(n_classes - 1)
    old_logits = logits[old_indexes]
    old_targets = targets[old_indexes]

    # 2. Getting positive values, aka ground-truth's logit predictions:
    old_values = old_logits[torch.arange(len(old_logits)), old_targets]
    old_values = old_values.repeat(nb_negatives, 1).t().contiguous().view(-1)

    # 3. Getting top-k negative values:
    nb_old_classes = n_classes - task_size
    negative_indexes = old_logits[..., nb_old_classes:].argsort(dim=1, descending=True)[
        ..., :nb_negatives] + nb_old_classes
    new_values = old_logits[torch.arange(len(old_logits)).view(-1, 1), negative_indexes].view(-1)

    return F.margin_ranking_loss(
        old_values, new_values, -torch.ones(len(old_values)).to(logits.device), margin=margin
    ) 
开发者ID:arthurdouillard,项目名称:incremental_learning.pytorch,代码行数:25,代码来源:base.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def forward(self, input1, input2, target):
        return self.loss_weight * F.margin_ranking_loss(
            input1,
            input2,
            target,
            margin=self.margin,
            reduction=self.reduction) 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:9,代码来源:margin_ranking_loss.py

示例4: loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def loss(self, predictions, data):
        """
        Args:
            predictions (List): 
                - output (Tensor, shape [2*T, 2]): Positive and negative attention weights for each sample
                - loss_weigh (Tensor, shape [2*T, 1]): Loss weighting applied to each sampled frame
            data        (None) 

            T: number of sampled frames from video (default: 5)
        Return:
            Frame-wise weighting loss 
        """
        output, loss_weigh = predictions

        if self.loss_weighting or self.obj_interact: 
            rank_batch = F.margin_ranking_loss(output[:,0:1], output[:,1:2], 
                torch.ones(output.size()).type(output.data.type()), margin=self.ranking_margin, reduction='none')
            if self.loss_weighting and self.obj_interact:
                loss_weigh = (output[:, 0:1]+loss_weigh)/2. # avg
            elif self.loss_weighting:
                loss_weigh = output[:,0:1]
            else:
                loss_weigh = loss_weigh.unsqueeze(1)
            # ranking loss
            cls_loss = self.loss_factor*(rank_batch*loss_weigh).mean()+ \
                        (1-self.loss_factor)*-torch.log(2*loss_weigh).mean()
        else:
            # ranking loss
            cls_loss = F.margin_ranking_loss(output[:,0:1], output[:,1:2],
                torch.Tensor([[1],[1]]).type(output.data.type()), margin=self.ranking_margin)


        return cls_loss 
开发者ID:MichiganCOG,项目名称:ViP,代码行数:35,代码来源:losses.py

示例5: __call__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def __call__(self, _, global_features, targets):
        if self._normalize_feature:
            global_features = normalize(global_features, axis=-1)

        dist_mat = euclidean_dist(global_features, global_features)

        N = dist_mat.size(0)
        is_pos = targets.expand(N, N).eq(targets.expand(N, N).t())
        is_neg = targets.expand(N, N).ne(targets.expand(N, N).t())

        if self._hard_mining:
            dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)
        else:
            dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)

        y = dist_an.new().resize_as_(dist_an).fill_(1)

        if self._margin > 0:
            loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self._margin)
        else:
            loss = F.soft_margin_loss(dist_an - dist_ap, y)
            if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)

        return {
            "loss_triplet": loss * self._scale,
        } 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:28,代码来源:metric_loss.py

示例6: validate

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def validate(epoch):
    t = time.time()
    model.eval()
    torch.set_grad_enabled(False)
    eloss = 0
    for batch_idx, instance in enumerate(valid_generator):
        pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
        pos = pos.to(device)
        neg = neg.to(device)
        # text information
        pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
        ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
        nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
        ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
        batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
        # get relative location according to the batch_nodes
        shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
        batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
        batch_adj = torch.from_numpy(batch_adj).to(device)
        shifted_pos = torch.LongTensor(shifted_pos).to(device)
        shifted_neg = torch.LongTensor(shifted_neg).to(device)
        score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
                    ptt[0], ptt[1], ptt[2])
        score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
                    ntt[0], ntt[1], ntt[2])
        loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
        sys.stdout.write(
            '%d batches processed. current valid batch loss: %f\r' %
            (batch_idx, loss_train.item())
        )
        eloss += loss_train.item()
        del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
        if batch_idx%500==0:
            gc.collect()
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_valid: {:.4f}'.format(eloss/(batch_idx+1)),
          'time: {:.4f}s'.format(time.time() - t))

    return eloss 
开发者ID:EagleW,项目名称:PaperRobot,代码行数:41,代码来源:train.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def forward(self, input, target):
        y_true = target.int().unsqueeze(-1)
        same_id = torch.eq(y_true, y_true.t()).type_as(input)

        pos_mask = same_id
        neg_mask = 1 - same_id

        def _mask_max(input_tensor, mask, axis=None, keepdims=False):
            input_tensor = input_tensor - 1e6 * (1 - mask)
            _max, _idx = torch.max(input_tensor, dim=axis, keepdim=keepdims)
            return _max, _idx

        def _mask_min(input_tensor, mask, axis=None, keepdims=False):
            input_tensor = input_tensor + 1e6 * (1 - mask)
            _min, _idx = torch.min(input_tensor, dim=axis, keepdim=keepdims)
            return _min, _idx

        # output[i, j] = || feature[i, :] - feature[j, :] ||_2
        dist_squared = torch.sum(input ** 2, dim=1, keepdim=True) + \
                       torch.sum(input.t() ** 2, dim=0, keepdim=True) - \
                       2.0 * torch.matmul(input, input.t())
        dist = dist_squared.clamp(min=1e-16).sqrt()

        pos_max, pos_idx = _mask_max(dist, pos_mask, axis=-1)
        neg_min, neg_idx = _mask_min(dist, neg_mask, axis=-1)

        # loss(x, y) = max(0, -y * (x1 - x2) + margin)
        y = torch.ones(same_id.size()[0]).to(DEVICE)
        return F.margin_ranking_loss(neg_min.float(),
                                     pos_max.float(),
                                     y,
                                     self.margin,
                                     self.size_average) 
开发者ID:levyfan,项目名称:reid-mgn,代码行数:35,代码来源:triplet.py

示例8: test_margin_ranking_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def test_margin_ranking_loss(self):
        inp1 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
        inp2 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
        target = (torch.randint(0, 1, (128,), device='cuda') - 1).type_as(inp1)
        output = F.margin_ranking_loss(inp1, inp2, target, margin=0, size_average=None, reduce=None, reduction='mean') 
开发者ID:NVIDIA,项目名称:apex,代码行数:7,代码来源:test_pyprof_nvtx.py

示例9: train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import margin_ranking_loss [as 别名]
def train(epoch):
    print("Epoch", epoch)
    t = time.time()
    model.train(True)
    torch.set_grad_enabled(True)
    eloss = 0
    for batch_idx, instance in enumerate(train_generator):
        pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
        pos = pos.to(device)
        neg = neg.to(device)
        # text information
        pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
        ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
        nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
        ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
        batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
        # get relative location according to the batch_nodes
        shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
        batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
        batch_adj = torch.from_numpy(batch_adj).to(device)
        shifted_pos = torch.LongTensor(shifted_pos).to(device)
        shifted_neg = torch.LongTensor(shifted_neg).to(device)
        score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
                    ptt[0], ptt[1], ptt[2])
        score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
                    ntt[0], ntt[1], ntt[2])
        loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
        sys.stdout.write(
            '%d batches processed. current train batch loss: %f\r' %
            (batch_idx, loss_train.item())
        )
        eloss += loss_train.item()
        loss_train.backward()
        del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
        optimizer.step()
        if batch_idx%500==0:
            gc.collect()
    print('\n')
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(eloss/(batch_idx+1)),
          'time: {:.4f}s'.format(time.time() - t))

    return eloss


# Valid 
开发者ID:EagleW,项目名称:PaperRobot,代码行数:48,代码来源:train.py


注:本文中的torch.nn.functional.margin_ranking_loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。