本文整理汇总了Python中torch.nn.functional.nll_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functional.nll_loss方法的具体用法?Python functional.nll_loss怎么用?Python functional.nll_loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.nll_loss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mle_steps
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def mle_steps(key, model, input_, targets, attention_mask,
loss_reduction="mean", i=None):
word_acts = decode(model, input_.unsqueeze(1),
attention_mask, i)
word_dist = train_utils.modify_output_for_loss_fn(
"nll", word_acts, dim=-1)
# Compute losses
loss = F.nll_loss(
word_dist.view(-1, word_dist.size(-1)),
targets, reduction=loss_reduction)
if loss_reduction != "mean":
return loss.view(word_dist.size(0), -1), word_dist
else:
return loss, word_dist
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def forward(self, input, target): # input = Variable(logits), target = labels
loss = Variable(torch.zeros(1).cuda()) if self.gpu else Variable(torch.zeros(1))
# novel loss
if self.loo > 0.:
target_novel = self.labels_relevant[target]
for i, rel in enumerate(self.relevant):
if target_novel[:,i].any():
relevant_loc = target_novel[:,i].nonzero().view(-1)
loss += -F.log_softmax(input[relevant_loc][:, rel], dim=1)[:,0].mean() * self.class_weight[i]
loss *= self.loo
# known loss
log_probs = F.log_softmax(input, dim=1)
loss += F.nll_loss(log_probs, Variable(target))
# regularization
if self.label_smooth > 0.:
loss -= (log_probs.mean() + self.kld_u_const) * self.label_smooth
return loss
示例3: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例4: train_one_epoch
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def train_one_epoch(self):
"""
One epoch of training
:return:
"""
self.model.train()
for batch_idx, (data, target) in enumerate(self.data_loader.train_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.nll_loss(output, target)
loss.backward()
self.optimizer.step()
if batch_idx % self.config.log_interval == 0:
self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
self.current_epoch, batch_idx * len(data), len(self.data_loader.train_loader.dataset),
100. * batch_idx / len(self.data_loader.train_loader), loss.item()))
self.current_iteration += 1
示例5: validate
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def validate(self):
"""
One cycle of model validation
:return:
"""
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in self.data_loader.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(self.data_loader.test_loader.dataset)
self.logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(self.data_loader.test_loader.dataset),
100. * correct / len(self.data_loader.test_loader.dataset)))
示例6: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(DEVICE), target.to(DEVICE)
out = model(data, data, target, DEVICE)
s_output = out[0]
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).item() # sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
示例7: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(self):
self.model.eval()
test_loss = 0
correct = 0
for data, target in self.target_test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
s_output, t_output = self.model(data, data)
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).item() # sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= self.len_target_dataset
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
target_name, test_loss, correct, self.len_target_dataset,
100. * correct / self.len_target_dataset))
return correct
示例8: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(model):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for tgt_test_data, tgt_test_label in tgt_test_loader:
if cuda:
tgt_test_data, tgt_test_label = tgt_test_data.cuda(), tgt_test_label.cuda()
tgt_test_data, tgt_test_label = Variable(tgt_test_data), Variable(tgt_test_label)
tgt_pred, mmd_loss = model(tgt_test_data, tgt_test_data)
test_loss += F.nll_loss(F.log_softmax(tgt_pred, dim = 1), tgt_test_label, reduction='sum').item() # sum up batch loss
pred = tgt_pred.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(tgt_test_label.data.view_as(pred)).cpu().sum()
test_loss /= tgt_dataset_len
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
tgt_name, test_loss, correct, tgt_dataset_len,
100. * correct / tgt_dataset_len))
return correct
示例9: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
s_output, t_output = model(data, data, target)
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, reduction='sum').item()# sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
示例10: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test(model):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in target_test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
s_output, t_output = model(data, data, target)
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target).item() # sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len_target_dataset
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
target_name, test_loss, correct, len_target_dataset,
100. * correct / len_target_dataset))
return correct
示例11: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例12: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # Sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # Get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例13: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例14: test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0]() # Sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # Get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例15: train
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import nll_loss [as 别名]
def train(model, optimizer, loader, device, regression=False, ARR=0):
model.train()
total_loss = 0
for data in loader:
optimizer.zero_grad()
data = data.to(device)
out = model(data)
if regression:
loss = F.mse_loss(out, data.y.view(-1))
else:
loss = F.nll_loss(out, data.y.view(-1))
if ARR != 0:
for gconv in model.convs:
w = torch.matmul(
gconv.att,
gconv.basis.view(gconv.num_bases, -1)
).view(gconv.num_relations, gconv.in_channels, gconv.out_channels)
reg_loss = torch.sum((w[1:, :, :] - w[:-1, :, :])**2)
loss += ARR * reg_loss
loss.backward()
total_loss += loss.item() * num_graphs(data)
optimizer.step()
torch.cuda.empty_cache()
return total_loss / len(loader.dataset)