本文整理汇总了Python中torch.nn.functional.multi_margin_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.multi_margin_loss方法的具体用法?Python functional.multi_margin_loss怎么用?Python functional.multi_margin_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.multi_margin_loss方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multi_margin_loss [as 别名]
def loss(self, scores, true_pos, lamb=1e-7):
loss = F.multi_margin_loss(scores, true_pos, margin=self.margin)
if self.use_local_only:
return loss
# regularization
X = F.normalize(self.rel_embs)
diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt()
diff = diff * (diff < 1).float()
loss -= torch.sum(diff).mul(lamb)
X = F.normalize(self.ew_embs)
diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt()
diff = diff * (diff < 1).float()
loss -= torch.sum(diff).mul(lamb)
return loss
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multi_margin_loss [as 别名]
def forward(self, predictions, target):
loss = Variable(torch.zeros(1))
target_index_var = Variable(torch.LongTensor([0]))
if torch.cuda.is_available():
loss = loss.cuda()
target_index_var = target_index_var.cuda()
target_sorted, target_indices = torch.sort(target, dim=-1, descending=True)
predictions = predictions.gather(1, target_indices)
margins = DEFAULT_MARGIN * target_sorted.data
# margins = margins.clamp(max=1.0, min=0.5)
for sample_index in range(target_indices.size(0)):
target_index = 0
while target_index < min(target_indices.size(1), 10) and \
(target_sorted[sample_index, target_index].data[0] > MIN_TARGET_VALUE):
loss += F.multi_margin_loss(predictions[sample_index, target_index:],
target_index_var,
margin=margins[sample_index, target_index],
size_average=False)
target_index += 1
return loss
示例3: loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multi_margin_loss [as 别名]
def loss(self, scores, true_pos):
loss = F.multi_margin_loss(scores, true_pos, margin=self.margin)
return loss
示例4: multi_margin
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multi_margin_loss [as 别名]
def multi_margin(y_pred, y_true):
return F.multi_margin_loss(y_pred, y_true)
示例5: fast_rcnn_losses
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multi_margin_loss [as 别名]
def fast_rcnn_losses(cls_score, bbox_pred, label_int32, bbox_targets,
bbox_inside_weights, bbox_outside_weights):
device_id = cls_score.get_device()
rois_label = Variable(torch.from_numpy(label_int32.astype('int64'))).cuda(device_id)
if cfg.FAST_RCNN.LOSS_TYPE in ['cross_entropy', 'triplet_softmax']:
if cfg.FAST_RCNN.LOSS_TYPE == 'triplet_softmax':
cls_score = cls_score * 3 # This method is borrowed from ji zhang's large scale relationship detection
if not cfg.MODEL.TAGGING:
loss_cls = F.cross_entropy(cls_score, rois_label)
else:
loss_cls = F.cross_entropy(cls_score, rois_label, ignore_index=0)
if cfg.FAST_RCNN.LOSS_TYPE == 'triplet_softmax':
cls_score = cls_score / 3
else:
if cfg.FAST_RCNN.LOSS_TYPE == 'multi_margin':
loss_cls = F.multi_margin_loss(cls_score, rois_label,
margin=cfg.FAST_RCNN.MARGIN,
reduction='none')
elif cfg.FAST_RCNN.LOSS_TYPE == 'max_margin':
cls_score_with_high_target = cls_score.clone()
cls_score_with_high_target.scatter_(1, rois_label.view(-1, 1), 1e10) # This make sure the following variable always has the target in the first column
target_and_offender_index = cls_score_with_high_target.sort(1, True)[1][:, :2] # Target and the largest score excpet target
loss_cls = F.multi_margin_loss(cls_score.gather(1, target_and_offender_index), rois_label.data * 0,
margin=cfg.FAST_RCNN.MARGIN,
reduction='none')
loss_cls = loss_cls[rois_label > 0]
loss_cls = loss_cls.mean() if loss_cls.numel() > 0 else loss_cls.new_tensor(0)
# Secretly log the mean similarity!
if cfg.FAST_RCNN.LOSS_TYPE in ['triplet_softmax', 'max_margin', 'multi_margin']:
loss_cls.mean_similarity = cls_score[rois_label>0].gather(1, rois_label[rois_label>0].unsqueeze(1)).mean().detach() / 3
bbox_targets = Variable(torch.from_numpy(bbox_targets)).cuda(device_id)
bbox_inside_weights = Variable(torch.from_numpy(bbox_inside_weights)).cuda(device_id)
bbox_outside_weights = Variable(torch.from_numpy(bbox_outside_weights)).cuda(device_id)
loss_bbox = net_utils.smooth_l1_loss(
bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
if cfg.MODEL.TAGGING:
loss_bbox = torch.zeros_like(loss_bbox)
# class accuracy
cls_preds = cls_score.max(dim=1)[1].type_as(rois_label)
if not cfg.MODEL.TAGGING:
accuracy_cls = cls_preds.eq(rois_label).float().mean(dim=0)
else:
accuracy_cls = cls_preds[rois_label > 0].eq(rois_label[rois_label > 0]).float().mean(dim=0) # Ignore index 0
return loss_cls, loss_bbox, accuracy_cls
# ---------------------------------------------------------------------------- #
# Box heads
# ---------------------------------------------------------------------------- #