本文整理汇总了Python中torch.nn.functional.binary_cross_entropy_with_logits方法的典型用法代码示例。如果您正苦于以下问题:Python functional.binary_cross_entropy_with_logits方法的具体用法?Python functional.binary_cross_entropy_with_logits怎么用?Python functional.binary_cross_entropy_with_logits使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.binary_cross_entropy_with_logits方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _weighted_cross_entropy_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def _weighted_cross_entropy_loss(preds, edges):
""" Calculate sum of weighted cross entropy loss. """
# Reference:
# hed/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
# https://github.com/s9xie/hed/issues/7
mask = (edges > 0.5).float()
b, c, h, w = mask.shape
num_pos = torch.sum(mask, dim=[1, 2, 3]).float() # Shape: [b,].
num_neg = c * h * w - num_pos # Shape: [b,].
weight = torch.zeros_like(mask)
weight[edges > 0.5] = num_neg / (num_pos + num_neg)
weight[edges <= 0.5] = num_pos / (num_pos + num_neg)
# Calculate loss.
losses = F.binary_cross_entropy_with_logits(
preds.float(), edges.float(), weight=weight, reduction='none')
loss = torch.sum(losses) / b
return loss
示例2: weighted_cross_entropy_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def weighted_cross_entropy_loss(preds, edges):
""" Calculate sum of weighted cross entropy loss. """
# Reference:
# hed/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
# https://github.com/s9xie/hed/issues/7
mask = (edges > 0.5).float()
b, c, h, w = mask.shape
num_pos = torch.sum(mask, dim=[1, 2, 3], keepdim=True).float() # Shape: [b,].
num_neg = c * h * w - num_pos # Shape: [b,].
weight = torch.zeros_like(mask)
#weight[edges > 0.5] = num_neg / (num_pos + num_neg)
#weight[edges <= 0.5] = num_pos / (num_pos + num_neg)
weight.masked_scatter_(edges > 0.5,
torch.ones_like(edges) * num_neg / (num_pos + num_neg))
weight.masked_scatter_(edges <= 0.5,
torch.ones_like(edges) * num_pos / (num_pos + num_neg))
# Calculate loss.
# preds=torch.sigmoid(preds)
losses = F.binary_cross_entropy_with_logits(
preds.float(), edges.float(), weight=weight, reduction='none')
loss = torch.sum(losses) / b
return loss
示例3: py_sigmoid_focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def py_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean'):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
weight = weight * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * weight
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
示例4: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def train():
model.train()
optimizer.zero_grad()
x, pos_edge_index = data.x, data.train_pos_edge_index
_edge_index, _ = remove_self_loops(pos_edge_index)
pos_edge_index_with_self_loops, _ = add_self_loops(_edge_index,
num_nodes=x.size(0))
neg_edge_index = negative_sampling(
edge_index=pos_edge_index_with_self_loops, num_nodes=x.size(0),
num_neg_samples=pos_edge_index.size(1))
link_logits = model(pos_edge_index, neg_edge_index)
link_labels = get_link_labels(pos_edge_index, neg_edge_index)
loss = F.binary_cross_entropy_with_logits(link_logits, link_labels)
loss.backward()
optimizer.step()
return loss
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def forward(self, inputs, dx_labels=None, rx_labels=None):
# inputs (B, 2, max_len)
# bert_pool (B, hidden)
_, dx_bert_pool = self.bert(inputs[:, 0, :], torch.zeros(
(inputs.size(0), inputs.size(2))).long().to(inputs.device))
_, rx_bert_pool = self.bert(inputs[:, 1, :], torch.zeros(
(inputs.size(0), inputs.size(2))).long().to(inputs.device))
dx2dx, rx2dx, dx2rx, rx2rx = self.cls(dx_bert_pool, rx_bert_pool)
# output logits
if rx_labels is None or dx_labels is None:
return F.sigmoid(dx2dx), F.sigmoid(rx2dx), F.sigmoid(dx2rx), F.sigmoid(rx2rx)
else:
loss = F.binary_cross_entropy_with_logits(dx2dx, dx_labels) + \
F.binary_cross_entropy_with_logits(rx2dx, dx_labels) + \
F.binary_cross_entropy_with_logits(dx2rx, rx_labels) + \
F.binary_cross_entropy_with_logits(rx2rx, rx_labels)
return loss, F.sigmoid(dx2dx), F.sigmoid(rx2dx), F.sigmoid(dx2rx), F.sigmoid(rx2rx)
示例6: focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def focal_loss(self, x, y):
'''Focal loss.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
alpha = 0.25
gamma = 2
t = F.one_hot(y.data, 1+self.num_classes) # [N,21]
t = t[:,1:] # exclude background
t = Variable(t)
p = x.sigmoid()
pt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p
w = alpha*t + (1-alpha)*(1-t) # w = alpha if t > 0 else 1-alpha
w = w * (1-pt).pow(gamma)
return F.binary_cross_entropy_with_logits(x, t, w, reduction='sum')
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def forward(self, input, target):
# inputs and targets are assumed to be BatchxClasses
assert len(input.shape) == len(target.shape)
assert input.size(0) == target.size(0)
assert input.size(1) == target.size(1)
weight = Variable(self.weight)
# compute the negative likelyhood
logpt = - F.binary_cross_entropy_with_logits(input, target, pos_weight=weight, reduction=self.reduction)
pt = torch.exp(logpt)
# compute the loss
focal_loss = -( (1-pt)**self.gamma ) * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
示例8: binary_cross_entropy
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
示例9: py_sigmoid_focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def forward(self, input, target_is_real):
if self.gan_type == 'LSGAN':
if target_is_real:
return torch.pow(F.sigmoid(input) - 1, 2).mean()
else:
return torch.pow(F.sigmoid(input), 2).mean()
elif self.gan_type == 'vanillaGAN':
input = input.view(-1)
if target_is_real:
return F.binary_cross_entropy_with_logits(input,
gpu_wrapper(Variable(torch.ones(input.shape[0]))))
else:
return F.binary_cross_entropy_with_logits(input,
gpu_wrapper(Variable(torch.zeros(input.shape[0]))))
elif self.gan_type == 'WGAN_hinge':
if target_is_real:
return F.relu(1.0 - input).mean()
else:
return F.relu(input + 1.0).mean()
else:
raise ValueError()
示例11: semantic_segmentation_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def semantic_segmentation_loss(self, segment_data, mask_t, class_t, interpolation_mode='bilinear'):
# Note num_classes here is without the background class so cfg.num_classes-1
batch_size, num_classes, mask_h, mask_w = segment_data.size()
loss_s = 0
for idx in range(batch_size):
cur_segment = segment_data[idx]
cur_class_t = class_t[idx]
with torch.no_grad():
downsampled_masks = F.interpolate(mask_t[idx].unsqueeze(0), (mask_h, mask_w),
mode=interpolation_mode, align_corners=False).squeeze(0)
downsampled_masks = downsampled_masks.gt(0.5).float()
# Construct Semantic Segmentation
segment_t = torch.zeros_like(cur_segment, requires_grad=False)
for obj_idx in range(downsampled_masks.size(0)):
segment_t[cur_class_t[obj_idx]] = torch.max(segment_t[cur_class_t[obj_idx]], downsampled_masks[obj_idx])
loss_s += F.binary_cross_entropy_with_logits(cur_segment, segment_t, reduction='sum')
return loss_s / mask_h / mask_w * cfg.semantic_segmentation_alpha
示例12: binary_cross_entropy
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
示例13: mask_cross_entropy
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
示例14: quality_focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
示例15: py_sigmoid_focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy_with_logits [as 别名]
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss