本文整理汇总了Python中torch.nn.functional.cross_entropy方法的典型用法代码示例。如果您正苦于以下问题:Python functional.cross_entropy方法的具体用法?Python functional.cross_entropy怎么用?Python functional.cross_entropy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.cross_entropy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_losses
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def _add_losses(self, sigma_rpn=3.0):
# classification loss
image_prob = self._predictions["image_prob"]
# assert ((image_prob.data>=0).sum()+(image_prob.data<=1).sum())==image_prob.data.size(1)*2, image_prob
# assert ((self._labels.data>=0).sum()+(self._labels.data<=1).sum())==self._labels.data.size(1)*2, self._labels
cross_entropy = F.binary_cross_entropy(image_prob.clamp(0,1),self._labels)
fast_loss = self._add_losses_fast()
self._losses['wsddn_loss'] = cross_entropy
self._losses['fast_loss'] = fast_loss
loss = cross_entropy + fast_loss
self._losses['total_loss'] = loss
for k in self._losses.keys():
self._event_summaries[k] = self._losses[k]
return loss
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:22,代码来源:network.py
示例2: distribution_focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
示例3: loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def loss(self, anchor_objectnesses: Tensor, anchor_transformers: Tensor,
gt_anchor_objectnesses: Tensor, gt_anchor_transformers: Tensor,
batch_size: int, batch_indices: Tensor) -> Tuple[Tensor, Tensor]:
cross_entropies = torch.empty(batch_size, dtype=torch.float, device=anchor_objectnesses.device)
smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=anchor_transformers.device)
for batch_index in range(batch_size):
selected_indices = (batch_indices == batch_index).nonzero().view(-1)
cross_entropy = F.cross_entropy(input=anchor_objectnesses[selected_indices],
target=gt_anchor_objectnesses[selected_indices])
fg_indices = gt_anchor_objectnesses[selected_indices].nonzero().view(-1)
smooth_l1_loss = beta_smooth_l1_loss(input=anchor_transformers[selected_indices][fg_indices],
target=gt_anchor_transformers[selected_indices][fg_indices],
beta=self._anchor_smooth_l1_loss_beta)
cross_entropies[batch_index] = cross_entropy
smooth_l1_losses[batch_index] = smooth_l1_loss
return cross_entropies, smooth_l1_losses
示例4: nll
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def nll(self, logits, data):
"""Calculates -log p(data), given logits (the conditionals).
Args:
logits: [batch size, ncols+1, d_model].
data: [batch size, ncols].
Returns:
nll: [batch size].
"""
if data.dtype != torch.long:
data = data.long()
nll = torch.zeros(logits.size()[0], device=logits.device)
for i in range(self.nin):
logits_i = self.logits_for_col(i, logits)
ce = F.cross_entropy(logits_i, data[:, i], reduction='none')
nll += ce
return nll
示例5: nll
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def nll(self, logits, data):
"""Calculates -log p(data), given logits (the conditionals).
Args:
logits: [batch size, hidden] where hidden can either be sum(dom
sizes), or emb_dims.
data: [batch size, nin].
Returns:
nll: [batch size].
"""
if data.dtype != torch.long:
data = data.long()
nll = torch.zeros(logits.size()[0], device=logits.device)
for i in range(self.nin):
logits_i = self.logits_for_col(i, logits)
nll += F.cross_entropy(logits_i, data[:, i], reduction='none')
return nll
示例6: __call__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def __call__(self, proposals, keypoint_logits):
heatmaps = []
valid = []
for proposals_per_image in proposals:
kp = proposals_per_image.get_field("keypoints")
heatmaps_per_image, valid_per_image = project_keypoints_to_heatmap(
kp, proposals_per_image, self.discretization_size
)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
keypoint_targets = cat(heatmaps, dim=0)
valid = cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.nonzero(valid).squeeze(1)
# torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely
if keypoint_targets.numel() == 0 or len(valid) == 0:
return keypoint_logits.sum() * 0
N, K, H, W = keypoint_logits.shape
keypoint_logits = keypoint_logits.view(N * K, H * W)
keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
return keypoint_loss
示例7: loss_single
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples, cfg):
loss_cls_all = F.cross_entropy(
cls_score, labels, reduction='none') * label_weights
pos_inds = (labels > 0).nonzero().view(-1)
neg_inds = (labels == 0).nonzero().view(-1)
num_pos_samples = pos_inds.size(0)
num_neg_samples = cfg.neg_pos_ratio * num_pos_samples
if num_neg_samples > neg_inds.size(0):
num_neg_samples = neg_inds.size(0)
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
loss_bbox = weighted_smoothl1(
bbox_pred,
bbox_targets,
bbox_weights,
beta=cfg.smoothl1_beta,
avg_factor=num_total_samples)
return loss_cls[None], loss_bbox
示例8: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
pred = output.data.max(1, keepdim=True)[1]
loss.backward()
if args.sr:
updateBN()
BN_grad_zero()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
示例9: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
示例10: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
pred = output.data.max(1, keepdim=True)[1]
loss.backward()
if args.sr:
updateBN()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
示例11: __call__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def __call__(self, proposals, keypoint_logits):
heatmaps = []
valid = []
for proposals_per_image in proposals:
kp = proposals_per_image.get_field("keypoints")
heatmaps_per_image, valid_per_image = project_keypoints_to_heatmap(
kp, proposals_per_image, self.discretization_size
)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
keypoint_targets = cat(heatmaps, dim=0)
valid = cat(valid, dim=0).to(dtype=torch.bool)
valid = torch.nonzero(valid).squeeze(1)
# torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely
if keypoint_targets.numel() == 0 or len(valid) == 0:
return keypoint_logits.sum() * 0
N, K, H, W = keypoint_logits.shape
keypoint_logits = keypoint_logits.view(N * K, H * W)
keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
return keypoint_loss
示例12: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
示例13: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
示例14: train_step
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def train_step(self, blobs, train_op):
self.forward(blobs['data'], blobs['im_info'], blobs['boxes'], blobs['labels'])
cross_entropy, total_loss = self._losses['wsddn_loss'].data[0], \
self._losses['total_loss'].data[0]
#utils.timer.timer.tic('backward')
train_op.zero_grad()
self._losses['total_loss'].backward()
#utils.timer.timer.toc('backward')
train_op.step()
self.delete_intermediate_states()
return cross_entropy, total_loss
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:15,代码来源:network.py
示例15: train_step_with_summary
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cross_entropy [as 别名]
def train_step_with_summary(self, blobs, train_op):
self.forward(blobs['data'], blobs['im_info'], blobs['boxes'], blobs['labels'])
cross_entropy, total_loss = self._losses['wsddn_loss'].data[0], \
self._losses['total_loss'].data[0]
train_op.zero_grad()
self._losses['total_loss'].backward()
train_op.step()
summary = self._run_summary_op()
self.delete_intermediate_states()
return cross_entropy, total_loss, summary
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:15,代码来源:network.py