本文整理匯總了Python中torch.nn.NLLLoss2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.NLLLoss2d方法的具體用法?Python nn.NLLLoss2d怎麽用?Python nn.NLLLoss2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.NLLLoss2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def test():
loss_nll = nn.NLLLoss2d()
loss_focal = FocalLoss(gamma=0)
target = torch.Tensor(2, 1, 5).random_(3).long()
data = torch.rand(2, 3, 1, 5)
input1 = torch.Tensor(data, requires_grad=True)
loss1 = loss_nll(F.log_softmax(input1), target)
loss1.backward()
print(loss1)
print(input1.grad)
input2 = torch.Tensor(data, requires_grad=True)
loss2 = loss_focal(F.log_softmax(input2), target)
loss2.backward()
print(loss2)
print(input2.grad)
#test()
示例2: loss_calc
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def loss_calc(out, label, gpu0):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = label[:,:,0,:].transpose(2,0,1)
label = torch.from_numpy(label).long()
if useGPU:
label = Variable(label).cuda(gpu0)
if onlyLesions:
criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000]))
else:
criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000, 100000]))
else:
label = Variable(label)
if onlyLesions:
criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000]))
else:
criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000, 100000]))
m = nn.LogSoftmax()
out = m(out)
return criterion(out,label)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None, ignore = None):
'''
:param weight: 1D weight vector to deal with the class-imbalance
'''
super().__init__()
if int(torch.__version__[2]) < 4:
self.loss = nn.NLLLoss2d(weight, ignore_index=ignore)
else:
self.loss = nn.NLLLoss(weight, ignore_index=ignore)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None, size_average=True, ignore_index=255):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index)
示例8: loss_calc
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def loss_calc(out, label,gpu0):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = label[:,:,0,:].transpose(2,0,1)
label = torch.from_numpy(label).long()
label = Variable(label).cuda(gpu0)
m = nn.LogSoftmax()
criterion = nn.NLLLoss2d()
out = m(out)
return criterion(out,label)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None, size_average=False, ignore_index=255):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weights=None):
super(CrossEntropyLoss2d, self).__init__()
self.loss = nn.NLLLoss2d(weight=weights)
self.loss.cuda()
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None, ignore_label=255):
'''
:param weight: 1D weight vector to deal with the class-imbalance
Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftmax layer in the last layer of your network.
You may use CrossEntropyLoss instead, if you prefer not to add an extra layer.
'''
super().__init__()
# self.loss = nn.NLLLoss2d(weight, ignore_index=255)
self.loss = nn.NLLLoss(weight, ignore_index=ignore_label)
示例13: test
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def test(model, data_loader):
model.eval()
epoch_loss_obj = 0
epoch_loss_dist = 0
epoch_accuracy_obj = 0
epoch_accuracy_dist = 0
batch = 0
for batch, (screens, distances, objects) in enumerate(data_loader):
screens, distances, objects = screens.to(device), distances.to(device), objects.to(device)
pred_objects, pred_distances = model(screens)
loss_obj = objects_criterion(pred_objects, objects)
loss_dist = distances_criterion(pred_distances, distances)
epoch_loss_obj += loss_obj.item()
epoch_loss_dist += loss_dist.item()
_, pred_objects = pred_objects.max(1)
accuracy = (pred_objects == objects).float().mean()
epoch_accuracy_obj += accuracy
_, pred_distances = pred_distances.max(1)
accuracy = (pred_distances == distances).float().mean()
epoch_accuracy_dist += accuracy
batch_num = batch + 1
epoch_loss_obj /= batch_num
epoch_loss_dist /= batch_num
epoch_accuracy_obj /= batch_num
epoch_accuracy_dist /= batch_num
model.train()
return (epoch_loss_obj, epoch_loss_dist), (epoch_accuracy_obj, epoch_accuracy_dist)
#objects_criterion = nn.NLLLoss2d()
#distances_criterion = nn.NLLLoss2d()
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def __init__(self, weight=None):
'''
:param weight: 1D weight vector to deal with the class-imbalance
'''
super().__init__()
self.loss = nn.NLLLoss2d(weight)
示例15: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import NLLLoss2d [as 別名]
def forward(self, output, target):
"""
Forward pass
:param output: torch.tensor (NxC)
:param target: torch.tensor (N)
:return: scalar
"""
return self.nll_loss(output, target)
# class CrossEntropyLoss2d(nn.Module):
# '''
# This file defines a cross entropy loss for 2D images
# '''
#
# def __init__(self, weight=None, ignore_label=255):
# '''
# :param weight: 1D weight vector to deal with the class-imbalance
# Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftmax layer in the last layer of your network.
# You may use CrossEntropyLoss instead, if you prefer not to add an extra layer.
# '''
# super().__init__()
#
# # self.loss = nn.NLLLoss2d(weight, ignore_index=255)
# self.loss = nn.NLLLoss(weight, ignore_index=ignore_label)
#
# def forward(self, outputs, targets):
# return self.loss(F.log_softmax(outputs, dim=1), targets)