本文整理匯總了Python中torch.log_softmax方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.log_softmax方法的具體用法?Python torch.log_softmax怎麽用?Python torch.log_softmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.log_softmax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, x):
# 先計算得到線性的那一部分
linear_part = self.linear(x)
# 計算交叉部分
interaction_part = 0.0
for i in range(self.fea_num):
for j in range(i + 1, self.fea_num):
v_ifj = self.v[i, self.field_map_dict[j], :, :]
v_jfi = self.v[j, self.field_map_dict[i], :, :]
xij = torch.unsqueeze(x[:, i] * x[:, j], dim=1)
v_ijji = torch.unsqueeze(torch.sum(v_ifj * v_jfi, dim=0), dim=0)
interaction_part += torch.mm(xij, v_ijji)
output = linear_part + interaction_part
output = torch.log_softmax(output, dim=1)
return output
示例2: train_one_epoch
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args):
epoch_loss = 0.0
for image, target, input_len, target_len in tqdm(data_loader):
image = image.to(device)
# print(target, target_len, input_len)
outputs = model(image.to(torch.float32)) # [B,N,C]
outputs = torch.log_softmax(outputs, dim=2)
outputs = outputs.permute([1, 0, 2]) # [N,B,C]
loss = criterion(outputs[:], target, input_len, target_len)
# 梯度更新
model.zero_grad()
loss.backward()
optimizer.step()
# 當前輪的loss
epoch_loss += loss.item() * image.size(0)
if np.isnan(loss.item()):
print(target, input_len, target_len)
epoch_loss = epoch_loss / len(data_loader.dataset)
# 打印日誌,保存權重
print('Epoch: {}/{} loss: {:03f}'.format(epoch + 1, args.epochs, epoch_loss))
return epoch_loss
示例3: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, task_id, x, y, seq_len):
words_emb = self.embedding(x)
char_emb = self.char(x)
x = torch.cat([words_emb, char_emb], dim=-1)
x, _ = self.lstm(x, seq_len)
self.dropout(x)
logit = self.out[task_id[0]](x)
seq_mask = seq_len_to_mask(seq_len, x.size(1))
if self.crf is not None:
logit = torch.log_softmax(logit, dim=-1)
loss = self.crf[task_id[0]](logit, y, seq_mask).mean()
pred = self.crf[task_id[0]].viterbi_decode(logit, seq_mask)[0]
else:
loss = ce_loss(logit, y, seq_mask)
pred = torch.argmax(logit, dim=2)
return {"loss": loss, "pred": pred}
示例4: distillation
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def distillation(logits_student, logits_teacher, ylens, temperature=5.0):
"""Compute cross entropy loss for knowledge distillation of sequence-to-sequence models.
Args:
logits_student (FloatTensor): `[B, T, vocab]`
logits_teacher (FloatTensor): `[B, T, vocab]`
ylens (IntTensor): `[B]`
temperature (float):
Returns:
loss_mean (FloatTensor): `[1]`
"""
bs, _, vocab = logits_student.size()
log_probs_student = torch.log_softmax(logits_student, dim=-1)
probs_teacher = torch.softmax(logits_teacher / temperature, dim=-1).data
loss = -torch.mul(probs_teacher, log_probs_student)
loss_mean = np.sum([loss[b, :ylens[b], :].sum() for b in range(bs)]) / ylens.sum()
return loss_mean
示例5: kldiv_lsm_ctc
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def kldiv_lsm_ctc(logits, ylens):
"""Compute KL divergence loss for label smoothing of CTC and Transducer models.
Args:
logits (FloatTensor): `[B, T, vocab]`
ylens (IntTensor): `[B]`
Returns:
loss_mean (FloatTensor): `[1]`
"""
bs, _, vocab = logits.size()
log_uniform = logits.new_zeros(logits.size()).fill_(math.log(1 / (vocab - 1)))
probs = torch.softmax(logits, dim=-1)
log_probs = torch.log_softmax(logits, dim=-1)
loss = torch.mul(probs, log_probs - log_uniform)
loss_mean = np.sum([loss[b, :ylens[b], :].sum() for b in range(bs)]) / ylens.sum()
# assert loss_mean >= 0
return loss_mean
示例6: focal_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def focal_loss(logits, ys, ylens, alpha, gamma):
"""Compute focal loss.
Args:
logits (FloatTensor): `[B, T, vocab]`
ys (LongTensor): Indices of labels. `[B, L]`
ylens (IntTensor): `[B]`
alpha (float):
gamma (float):
Returns:
loss_mean (FloatTensor): `[1]`
"""
bs = ys.size(0)
log_probs = torch.log_softmax(logits, dim=-1)
probs_inv = -torch.softmax(logits, dim=-1) + 1
loss = -alpha * torch.mul(torch.pow(probs_inv, gamma), log_probs)
loss_mean = np.sum([loss[b, :ylens[b], :].sum() for b in range(bs)]) / ylens.sum()
return loss_mean
示例7: greedy
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def greedy(self, eouts, elens):
"""Greedy decoding.
Args:
eouts (FloatTensor): `[B, T, enc_n_units]`
elens (np.ndarray): `[B]`
Returns:
hyps (np.ndarray): Best path hypothesis. `[B, L]`
"""
log_probs = torch.log_softmax(self.output(eouts), dim=-1)
best_paths = log_probs.argmax(-1) # `[B, L]`
hyps = []
for b in range(eouts.size(0)):
indices = [best_paths[b, t].item() for t in range(elens[b])]
# Step 1. Collapse repeated labels
collapsed_indices = [x[0] for x in groupby(indices)]
# Step 2. Remove all blank labels
best_hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]
hyps.append(np.array(best_hyp))
return np.array(hyps)
示例8: test_log_softmax
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def test_log_softmax():
src = torch.tensor([0.2, 0, 0.2, -2.1, 3.2, 7, -1, float('-inf')])
src.requires_grad_()
index = torch.tensor([0, 1, 0, 1, 1, 2, 4, 4])
out = scatter_log_softmax(src, index)
out0 = torch.log_softmax(torch.tensor([0.2, 0.2]), dim=-1)
out1 = torch.log_softmax(torch.tensor([0, -2.1, 3.2]), dim=-1)
out2 = torch.log_softmax(torch.tensor([7], dtype=torch.float), dim=-1)
out4 = torch.log_softmax(torch.tensor([-1, float('-inf')]), dim=-1)
expected = torch.stack([
out0[0], out1[0], out0[1], out1[1], out1[2], out2[0], out4[0], out4[1]
], dim=0)
assert torch.allclose(out, expected)
out.backward(torch.randn_like(out))
示例9: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, x, target):
"""Compute loss between x and target.
:param torch.Tensor x: prediction (batch, seqlen, class)
:param torch.Tensor target:
target signal masked with self.padding_id (batch, seqlen)
:return: scalar float value
:rtype torch.Tensor
"""
assert x.size(2) == self.size
batch_size = x.size(0)
x = x.view(-1, self.size)
target = target.view(-1)
with torch.no_grad():
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
ignore = target == self.padding_idx # (B,)
total = len(target) - ignore.sum().item()
target = target.masked_fill(ignore, 0) # avoid -1 index
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
denom = total if self.normalize_length else batch_size
return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
示例10: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, x, y, get_scores=False):
"""
Compute the loss, and optionally the scores.
"""
assert (y == self.pad_index).sum().item() == 0
if self.asm is False:
scores = self.proj(x).view(-1, self.n_words)
if self.label_smoothing == 0.0:
loss = F.cross_entropy(scores, y, reduction='elementwise_mean')
else:
lprobs = torch.log_softmax(scores, dim=1)
nll_loss = -lprobs.gather(dim=-1, index=y.unsqueeze(1))
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
nll_loss, smooth_loss = nll_loss.sum(), smooth_loss.sum()
eps_i = self.label_smoothing / lprobs.size(-1)
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
loss = loss / x.shape[0]
else:
_, loss = self.proj(x, y)
scores = self.proj.log_prob(x) if get_scores else None
return scores, loss
示例11: init_step
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def init_step(self, beam, expected_len_pen):
# init_preds: [4, 3, 5, 6, 7] - no EOS's
init_scores = torch.log_softmax(torch.tensor(
[[0, 0, 0, 4, 5, 3, 2, 1]], dtype=torch.float), dim=1)
init_scores = deepcopy(init_scores.repeat(
self.BATCH_SZ * self.BEAM_SZ, 1))
new_scores = init_scores + beam.topk_log_probs.view(-1).unsqueeze(1)
expected_beam_scores, expected_preds_0 = new_scores \
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS) \
.topk(self.BEAM_SZ, dim=-1)
beam.advance(deepcopy(init_scores), self.random_attn())
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_ids.equal(expected_preds_0))
self.assertFalse(beam.is_finished.any())
self.assertFalse(beam.done)
return expected_beam_scores
示例12: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, x, target):
"""Compute loss between x and target.
:param torch.Tensor x: prediction (batch, seqlen, class)
:param torch.Tensor target: target signal masked with self.padding_id (batch, seqlen)
:return: scalar float value
:rtype torch.Tensor
"""
assert x.size(2) == self.size
batch_size = x.size(0)
x = x.view(-1, self.size)
target = target.view(-1)
with torch.no_grad():
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
ignore = target == self.padding_idx # (B,)
total = len(target) - ignore.sum().item()
target = target.masked_fill(ignore, 0) # avoid -1 index
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
denom = total if self.normalize_length else batch_size
return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(ctx, logits, label, lb_smooth, lb_ignore):
# prepare label
num_classes = logits.size(1)
lb_pos, lb_neg = 1. - lb_smooth, lb_smooth / num_classes
label = label.clone().detach()
ignore = label == lb_ignore
n_valid = (label != lb_ignore).sum()
label[ignore] = 0
lb_one_hot = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
mask = [a, torch.arange(logits.size(1)), *b]
lb_one_hot[mask] = 0
coeff = (num_classes - 1) * lb_neg + lb_pos
ctx.variables = coeff, mask, logits, lb_one_hot
loss = torch.log_softmax(logits, dim=1).neg_().mul_(lb_one_hot).sum(dim=1)
return loss
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def forward(self, x, target):
"""Compute loss between x and target
:param torch.Tensor x: prediction (batch, seqlen, class)
:param torch.Tensor target: target signal masked with self.padding_id (batch, seqlen)
:return: scalar float value
:rtype torch.Tensor
"""
assert x.size(2) == self.size
batch_size = x.size(0)
x = x.view(-1, self.size)
target = target.reshape(-1)
with torch.no_grad():
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
ignore = target == self.padding_idx # (B,)
total = len(target) - ignore.sum().item()
target = target.masked_fill(ignore, 0) # avoid -1 index
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
denom = total if self.normalize_length else batch_size
return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
示例15: discriminate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import log_softmax [as 別名]
def discriminate(self, z, edge_index):
"""Given node embeddings :obj:`z`, classifies the link relation
between node pairs :obj:`edge_index` to be either positive,
negative or non-existent.
Args:
x (Tensor): The input node features.
edge_index (LongTensor): The edge indices.
"""
value = torch.cat([z[edge_index[0]], z[edge_index[1]]], dim=1)
value = self.lin(value)
return torch.log_softmax(value, dim=1)