本文整理汇总了Python中torch.nn.functional.l1_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.l1_loss方法的具体用法?Python functional.l1_loss怎么用?Python functional.l1_loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.l1_loss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
if self.with_dropout:
h1 = F.dropout(h1, training=self.training)
pred = self.h2_weights(h1)[:, 0]
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
mae = mae.cpu().detach()
return pred, mae, mse
else:
return pred
示例2: linear_motion_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def linear_motion_loss(outputs, mask):
#batch_size = outputs.shape[0]
s_len = outputs.shape[1]
loss = outputs.new_zeros(1)
for idx in range(2, s_len, 1):
# mask loss to valid outputs
# motion_mask: (B, 1), the mask of current frame
motion_mask = mask[:, idx].view(mask.shape[0], 1)
# Loss: |(loc_t - loc_t-1), (loc_t-1, loc_t-2)|_1 for t = [2, s_len]
# If loc_t is empty, mask it out by motion_mask
curr_motion = (outputs[:, idx] - outputs[:, idx - 1]) * motion_mask
past_motion = (outputs[:, idx - 1] - outputs[:, idx - 2]) * motion_mask
loss += torch.mean(1.0 - F.cosine_similarity(past_motion, curr_motion))
loss += F.l1_loss(past_motion, curr_motion)
return loss / (torch.sum(mask))
示例3: regression_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def regression_loss(logit, target, loss_type='l1', weight=None):
"""
Alpha reconstruction loss
:param logit:
:param target:
:param loss_type: "l1" or "l2"
:param weight: tensor with shape [N,1,H,W] weights for each pixel
:return:
"""
if weight is None:
if loss_type == 'l1':
return F.l1_loss(logit, target)
elif loss_type == 'l2':
return F.mse_loss(logit, target)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
else:
if loss_type == 'l1':
return F.l1_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
elif loss_type == 'l2':
return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
示例4: photometricLossgray
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray,
mask, lighting_est, device, K, thres):
N,C,H,W = colorImg_gray.size()
# color loss
normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
SHs = lighting.normalToSHBatch(normals,device)
SHs = torch.reshape(SHs, (N, H*W, 9))
lighting_est = torch.reshape(lighting_est, (N, 9, 1))
#SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]
color_shading = torch.bmm(SHs, lighting_est) # N H*W 1
color_shading = torch.reshape(color_shading, (N, H, W))
mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
color_pre = mask1 * (color_shading * albedoImg_gray) # N*H*W
colorImg_gray_mask = mask1 * colorImg_gray # mask
colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
return colorloss, color_pre
# come from hmr-src/util/image.py
示例5: heatmap_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def heatmap_loss(scores, labels, pos_weight=100):
labels = labels.float()
# loss = F.binary_cross_entropy_with_logits(scores, labels, reduction='none')
loss = F.l1_loss(scores, labels, reduction='none')
weighted = loss * (1. + (pos_weight - 1.) * labels)
return weighted.sum()
# def uncertainty_loss(logvar, sqr_dists):
# sqr_dists = sqr_dists.clamp(min=1.+1e-6)
# c = (1 + torch.log(sqr_dists)) / sqr_dists
# loss = torch.log1p(logvar.exp()) / sqr_dists + torch.sigmoid(-logvar) - c
# print('dists', float(sqr_dists.min()), float(sqr_dists.max()))
# print('logvar', float(logvar.min()), float(logvar.max()))
# print('loss', float(loss.min()), float(loss.max()))
# def hook(grad):
# print('grad', float(grad.min()), float(grad.max()), float(grad.sum()))
# logvar.register_hook(hook)
# return loss.mean()
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def forward(self, cbhg_outs, spcs, olens):
"""Calculate forward propagation.
Args:
cbhg_outs (Tensor): Batch of CBHG outputs (B, Lmax, spc_dim).
spcs (Tensor): Batch of groundtruth of spectrogram (B, Lmax, spc_dim).
olens (LongTensor): Batch of the lengths of each sequence (B,).
Returns:
Tensor: L1 loss value
Tensor: Mean square error loss value.
"""
# perform masking for padded values
if self.use_masking:
mask = make_non_pad_mask(olens).unsqueeze(-1).to(spcs.device)
spcs = spcs.masked_select(mask)
cbhg_outs = cbhg_outs.masked_select(mask)
# calculate loss
cbhg_l1_loss = F.l1_loss(cbhg_outs, spcs)
cbhg_mse_loss = F.mse_loss(cbhg_outs, spcs)
return cbhg_l1_loss, cbhg_mse_loss
示例7: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def train(self, inputs, labels, learning_rate=None):
for opt in self.opts.values():
if learning_rate:
for param_group in opt.param_groups:
param_group["lr"] = learning_rate
lr = inputs[0]
sigma = torch.rand(1, device=lr.device) * 75 / 255
noise = torch.randn_like(lr) * sigma
hr = self.ffdnet((lr + noise).clamp(0, 1), sigma)
loss = F.l1_loss(hr, labels[0])
self.opt.zero_grad()
loss.backward()
self.opt.step()
return {
'loss': loss.detach().cpu().numpy()
}
示例8: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def train(self, inputs, labels, learning_rate=None):
for opt in self.opts.values():
if learning_rate:
for param_group in opt.param_groups:
param_group["lr"] = learning_rate
lr = inputs[0]
batch = lr.shape[0]
noise, stddev = self.gen_random_noise(lr.shape)
kernel = [self.gen_random_kernel() for _ in range(batch)]
degpar = torch.tensor([pca.get_degradation(k) for k in kernel],
dtype=lr.dtype, device=lr.device)
kernel = torch.tensor(kernel, dtype=lr.dtype, device=lr.device)
noise = torch.tensor(noise, dtype=lr.dtype, device=lr.device)
stddev = torch.tensor(stddev, dtype=lr.dtype, device=lr.device)
lr = imfilter(lr, kernel) + noise
sr = self.srmd(lr, degpar, stddev)
loss = F.l1_loss(sr, labels[0])
self.opt.zero_grad()
loss.backward()
self.opt.step()
return {
'loss': loss.detach().cpu().numpy()
}
示例9: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def test(imgL,imgR,disp_true):
model.eval()
if args.cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
#---------
mask = disp_true < 192
#----
if imgL.shape[2] % 16 != 0:
times = imgL.shape[2]//16
top_pad = (times+1)*16 -imgL.shape[2]
else:
top_pad = 0
if imgL.shape[3] % 16 != 0:
times = imgL.shape[3]//16
right_pad = (times+1)*16-imgL.shape[3]
else:
right_pad = 0
imgL = F.pad(imgL,(0,right_pad, top_pad,0))
imgR = F.pad(imgR,(0,right_pad, top_pad,0))
with torch.no_grad():
output3 = model(imgL,imgR)
output3 = torch.squeeze(output3)
if top_pad !=0:
img = output3[:,top_pad:,:]
else:
img = output3
if len(disp_true[mask])==0:
loss = 0
else:
loss = F.l1_loss(img[mask],disp_true[mask]) #torch.mean(torch.abs(img[mask]-disp_true[mask])) # end-point-error
return loss.data.cpu()
示例10: feed_forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def feed_forward(net, x, y_bon, y_cor):
x = x.to(device)
y_bon = y_bon.to(device)
y_cor = y_cor.to(device)
losses = {}
y_bon_, y_cor_ = net(x)
losses['bon'] = F.l1_loss(y_bon_, y_bon)
losses['cor'] = F.binary_cross_entropy_with_logits(y_cor_, y_cor)
losses['total'] = losses['bon'] + losses['cor']
return losses
示例11: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def forward(self, outputs, target, delta=5):
l1_loss = F.l1_loss(outputs, target, reduce=False)
mse_loss = F.mse_loss(outputs, target, reduce=False)
mask = (l1_loss < delta).float()
loss = (0.5 * mse_loss) * mask + delta*(l1_loss - 0.5*delta) * (1-mask)
return torch.mean(loss)
示例13: _get_loss_func
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def _get_loss_func(self, loss):
if isinstance(loss, str):
if loss == "binary_crossentropy":
loss_func = F.binary_cross_entropy
elif loss == "mse":
loss_func = F.mse_loss
elif loss == "mae":
loss_func = F.l1_loss
else:
raise NotImplementedError
else:
loss_func = loss
return loss_func
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
示例15: compute_cen_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import l1_loss [as 别名]
def compute_cen_loss(output, target):
return F.l1_loss(output, target)