本文整理汇总了Python中torch.nn.functional.multilabel_soft_margin_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.multilabel_soft_margin_loss方法的具体用法?Python functional.multilabel_soft_margin_loss怎么用?Python functional.multilabel_soft_margin_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.multilabel_soft_margin_loss方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: validate
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def validate(model, data_loader):
print('\nvalidating ... ', flush=True, end='')
val_loss_meter = pyutils.AverageMeter('loss')
model.eval()
with torch.no_grad():
for pack in data_loader:
img = pack[1]
label = pack[2].cuda(non_blocking=True)
label = label.unsqueeze(2).unsqueeze(3)
x = model(img)
x = F.adaptive_avg_pool2d(x, (1,1))
loss = F.multilabel_soft_margin_loss(x, label)
val_loss_meter.add({'loss': loss.item()})
model.train()
print('loss:', val_loss_meter.pop('loss'))
return
示例2: calculate_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def calculate_loss(answer, pred, method):
"""
answer = [batch, 3129]
pred = [batch, 3129]
"""
if method == 'binary_cross_entropy_with_logits':
loss = F.binary_cross_entropy_with_logits(pred, answer) * config.max_answers
elif method == 'soft_cross_entropy':
nll = -F.log_softmax(pred, dim=1)
loss = (nll * answer).sum(dim=1).mean() # this is worse than binary_cross_entropy_with_logits
elif method == 'KL_divergence':
pred = F.softmax(pred, dim=1)
kl = ((answer / (pred + 1e-12)) + 1e-12).log()
loss = (kl * answer).sum(1).mean()
elif method == 'multi_label_soft_margin':
loss = F.multilabel_soft_margin_loss(pred, answer)
else:
print('Error, pls define loss function')
return loss
示例3: validate
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def validate(model, data_loader):
print('\nvalidating ... ', flush=True, end='')
val_loss_meter = pyutils.AverageMeter('loss')
model.eval()
with torch.no_grad():
for pack in data_loader:
img = pack[1]
label = pack[2].cuda(non_blocking=True)
x = model(img)
loss = F.multilabel_soft_margin_loss(x, label)
val_loss_meter.add({'loss': loss.item()})
model.train()
print('loss:', val_loss_meter.pop('loss'))
return
示例4: validate
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def validate(model, data_loader):
print('validating ... ', flush=True, end='')
val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')
model.eval()
with torch.no_grad():
for pack in data_loader:
img = pack['img']
label = pack['label'].cuda(non_blocking=True)
x = model(img)
loss1 = F.multilabel_soft_margin_loss(x, label)
val_loss_meter.add({'loss1': loss1.item()})
model.train()
print('loss: %.4f' % (val_loss_meter.pop('loss1')))
return
示例5: multilabel_soft_margin_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def multilabel_soft_margin_loss(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: bool = True,
reduce: bool = True,
difficult_samples: bool = False) -> Tensor:
"""Multilabel soft margin loss.
"""
if difficult_samples:
# label 1: positive samples
# label 0: difficult samples
# label -1: negative samples
gt_label = target.clone()
gt_label[gt_label == 0] = 1
gt_label[gt_label == -1] = 0
else:
gt_label = target
return F.multilabel_soft_margin_loss(input, gt_label, weight, size_average, reduce)
示例6: loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def loss(self, proposal_classes: Tensor,gt_proposal_classes: Tensor, batch_size,batch_indices) -> Tuple[Tensor, Tensor]:
# assert np.any(np.isnan(np.array(proposal_classes)))==False
# assert np.any(np.isnan(np.array(gt_proposal_classes))) == False
cross_entropies = torch.zeros(batch_size, dtype=torch.float, device=proposal_classes.device).cuda()
#batch_indices=torch.tensor(batch_indices,dtype=torch.float)
for batch_index in range(batch_size):
selected_indices = (batch_indices == batch_index).nonzero().view(-1)
input=proposal_classes[selected_indices]
target=gt_proposal_classes[selected_indices]
if torch.numel(input)==0 or torch.numel(target)==0:
#print("Warning:None DATA:",batch_index)
continue
assert torch.numel(input)==torch.numel(target)
# print('input:',input)
# print("input_sigmoid:", F.sigmoid(input))
# print('target:',target)
cross_entropy =F.multilabel_soft_margin_loss(input=proposal_classes[selected_indices],target=gt_proposal_classes[selected_indices],reduction="mean")
# cross_entropy = F.binary_cross_entropy(input=F.sigmoid(proposal_classes[selected_indices]),
# target=gt_proposal_classes[selected_indices])
torch.nn.MultiLabelSoftMarginLoss
# print('cross_entropy:',cross_entropy)
# print('cross_entropy:',cross_entropy)
# cross_entropy = F.cross_entropy(input=proposal_classes[selected_indices],
# target=gt_proposal_classes[selected_indices])
cross_entropies[batch_index] = cross_entropy
return cross_entropies
示例7: multilabel_soft_margin
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import multilabel_soft_margin_loss [as 别名]
def multilabel_soft_margin(y_pred, y_true):
return F.multilabel_soft_margin_loss(y_pred, y_true)