本文整理汇总了Python中torch.nn.functional.smooth_l1_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.smooth_l1_loss方法的具体用法?Python functional.smooth_l1_loss怎么用?Python functional.smooth_l1_loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.smooth_l1_loss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_rpn_bbox_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_rpn_bbox_loss(rpn_target_deltas, rpn_pred_deltas, rpn_match):
"""
:param rpn_target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param rpn_pred_deltas: predicted deltas from RPN. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(rpn_match == 1).size():
indices = torch.nonzero(rpn_match == 1).squeeze(1)
# Pick bbox deltas that contribute to the loss
rpn_pred_deltas = rpn_pred_deltas[indices]
# Trim target bounding box deltas to the same length as rpn_bbox.
target_deltas = rpn_target_deltas[:rpn_pred_deltas.size()[0], :]
# Smooth L1 loss
loss = F.smooth_l1_loss(rpn_pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例2: compute_mrcnn_bbox_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例3: calc_priorities
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def calc_priorities(self, target_net, transitions, alpha=0.6, gamma=0.999,
device=torch.device("cpu")):
batch = utils.Transition(*zip(*transitions))
next_state_batch = torch.stack(batch.next_state).to(device)
state_batch = torch.stack(batch.state).to(device)
action_batch = torch.stack(batch.action).to(device)
reward_batch = torch.stack(batch.reward).to(device)
done_batch = torch.stack(batch.done).to(device)
state_action_values = self.forward(state_batch).gather(1, action_batch)
next_action = self.forward(next_state_batch).argmax(dim=1).unsqueeze(1)
next_state_values = target_net(next_state_batch).gather(1, next_action).detach()
expected_state_action_values = (next_state_values * gamma * (1.0 - done_batch)) \
+ reward_batch
delta = F.smooth_l1_loss(state_action_values, expected_state_action_values, reduce=False)
prios = (delta.abs() + 1e-5).pow(alpha)
return delta, prios.detach()
示例4: loss_per_level
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def loss_per_level(self, estDisp, gtDisp):
N, C, H, W = estDisp.shape
scaled_gtDisp = gtDisp
scale = 1.0
if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
# compute scale per level and scale gtDisp
scale = gtDisp.shape[-1] / (W * 1.0)
scaled_gtDisp = gtDisp / scale
scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))
# mask for valid disparity
# (start disparity, max disparity / scale)
# Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
if mask.sum() < 1.0:
print('SmoothL1 loss: there is no point\'s disparity is in ({},{})!'.format(self.start_disp,
self.max_disp / scale))
loss = (torch.abs(estDisp - scaled_gtDisp) * mask.float()).mean()
return loss
# smooth l1 loss
loss = F.smooth_l1_loss(estDisp[mask], scaled_gtDisp[mask], reduction='mean')
return loss
示例5: finish_episode
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for (action, value), r in zip(saved_actions, rewards):
reward = r - value.data[0,0]
action.reinforce(reward)
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
示例6: _reg_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
示例7: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
target_Q = reward + done * self.discount * self.Q_target(next_state).max(1, keepdim=True)[0]
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
# Compute Q loss
Q_loss = F.smooth_l1_loss(current_Q, target_Q)
# Optimize the Q
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def forward(self, predictions, targets):
print('+++++++++++++++++++++++++++++++++++')
cout, rout = predictions
""" class """
class_pred = cout.squeeze().permute(1,2,0).reshape(-1, 2)
class_target = targets[:, 0].long()
pos_index = list(np.where(class_target == 1)[0])
neg_index = list(np.where(class_target == 0)[0])
class_target = class_target[pos_index + neg_index]
class_pred = class_pred[pos_index + neg_index]
closs = F.cross_entropy(class_pred, class_target, size_average=False, reduce=False)
closs = torch.div(torch.sum(closs[np.where(class_target != -100)]), 64)
reg_pred = rout.view(-1, 4)
reg_target = targets[:, 1:] #[1445, 4]
rloss = F.smooth_l1_loss(reg_pred, reg_target, size_average=False, reduce=False)
rloss = torch.div(torch.sum(rloss[np.where(class_target == 1)]), 16)
#debug vis pos anchor
loss = closs + rloss
return closs, rloss, loss, reg_pred, reg_target, pos_index, neg_index
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def forward(self, predictions, targets):
print('+++++++++++++++++++++++++++++++++++++++++++++++++++')
cout, rout = predictions
""" class """
class_pred, class_target = cout, targets[:, 0].long()
pos_index , neg_index = list(np.where(class_target == 1)[0]), list(np.where(class_target == 0)[0])
pos_num, neg_num = len(pos_index), len(neg_index)
class_pred, class_target = class_pred[pos_index + neg_index], class_target[pos_index + neg_index]
closs = F.cross_entropy(class_pred, class_target, size_average=False, reduce=False)
closs = torch.div(torch.sum(closs), 64)
""" regression """
reg_pred = rout
reg_target = targets[:, 1:]
rloss = F.smooth_l1_loss(reg_pred, reg_target, size_average=False, reduce=False) #1445, 4
rloss = torch.div(torch.sum(rloss, dim = 1), 4)
rloss = torch.div(torch.sum(rloss[pos_index]), 16)
loss = closs + rloss
return closs, rloss, loss, reg_pred, reg_target, pos_index, neg_index
示例10: compute_rpn_bbox_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_rpn_bbox_loss(rpn_pred_deltas, rpn_target_deltas, rpn_match):
"""
:param rpn_target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param rpn_pred_deltas: predicted deltas from RPN. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(rpn_match == 1).size():
indices = torch.nonzero(rpn_match == 1).squeeze(1)
# Pick bbox deltas that contribute to the loss
rpn_pred_deltas = rpn_pred_deltas[indices]
# Trim target bounding box deltas to the same length as rpn_bbox.
target_deltas = rpn_target_deltas[:rpn_pred_deltas.size()[0], :]
# Smooth L1 loss
loss = F.smooth_l1_loss(rpn_pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例11: compute_mrcnn_bbox_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_mrcnn_bbox_loss(mrcnn_pred_deltas, mrcnn_target_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例12: compute_mrcnn_regression_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_mrcnn_regression_loss(tasks, pred, target, target_class_ids):
"""regression loss is a distance metric between target vector and predicted regression vector.
:param pred: (n_sampled_rois, n_classes, [n_rg_feats if real regression or 1 if rg_bin task)
:param target: (n_sampled_rois, [n_rg_feats or n_rg_bins])
:return: differentiable loss, torch 1D tensor on cuda
"""
if not 0 in target.shape and not 0 in torch.nonzero(target_class_ids > 0).shape:
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target = target[positive_roi_ix].detach()
pred = pred[positive_roi_ix, positive_roi_class_ids]
if "regression_bin" in tasks:
loss = F.cross_entropy(pred, target.long())
else:
loss = F.smooth_l1_loss(pred, target)
#loss = F.mse_loss(pred, target)
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
############################################################
# Detection Layer
############################################################
示例13: compute_bbox_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_bbox_loss(target_deltas, pred_deltas, anchor_matches):
"""
:param target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unused bbox deltas.
:param pred_deltas: predicted deltas from bbox regression head. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param anchor_matches: tensor (n_anchors). value in [-1, 0, class_ids] for negative, neutral, and positive matched anchors.
i.e., positively matched anchors are marked by class_id >0
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(anchor_matches>0).shape:
indices = torch.nonzero(anchor_matches>0).squeeze(1)
# Pick bbox deltas that contribute to the loss
pred_deltas = pred_deltas[indices]
# Trim target bounding box deltas to the same length as pred_deltas.
target_deltas = target_deltas[:pred_deltas.shape[0], :].detach()
# Smooth L1 loss
loss = F.smooth_l1_loss(pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例14: compute_rg_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def compute_rg_loss(tasks, target, pred, anchor_matches):
"""
:param target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param pred_deltas: predicted deltas from bbox regression head. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param anchor_matches: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if not 0 in target.shape and not 0 in torch.nonzero(anchor_matches>0).shape:
indices = torch.nonzero(anchor_matches>0).squeeze(1)
# Pick rgs that contribute to the loss
pred = pred[indices]
# Trim target
target = target[:pred.shape[0]].detach()
if 'regression_bin' in tasks:
loss = F.cross_entropy(pred, target.long())
else:
loss = F.smooth_l1_loss(pred, target)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
示例15: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import smooth_l1_loss [as 别名]
def forward(self, confidence, predicted_locations, labels, gt_locations):
"""Compute classification loss and smooth l1 loss.
Args:
confidence (batch_size, num_priors, num_classes): class predictions.
locations (batch_size, num_priors, 4): predicted locations.
labels (batch_size, num_priors): real labels of all the priors.
boxes (batch_size, num_priors, 4): real boxes corresponding all the priors.
"""
num_classes = confidence.size(2)
with torch.no_grad():
# derived from cross_entropy=sum(log(p))
loss = -F.log_softmax(confidence, dim=2)[:, :, 0]
mask = box_utils.hard_negative_mining(loss, labels, self.neg_pos_ratio)
confidence = confidence[mask, :]
classification_loss = F.cross_entropy(confidence.reshape(-1, num_classes), labels[mask], size_average=False)
pos_mask = labels > 0
predicted_locations = predicted_locations[pos_mask, :].reshape(-1, 4)
gt_locations = gt_locations[pos_mask, :].reshape(-1, 4)
smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, size_average=False)
num_pos = gt_locations.size(0)
return smooth_l1_loss/num_pos, classification_loss/num_pos