本文整理汇总了Python中torch.gather方法的典型用法代码示例。如果您正苦于以下问题:Python torch.gather方法的具体用法?Python torch.gather怎么用?Python torch.gather使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.gather方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_body
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def _get_body(self, x, target):
cos_t = torch.gather(x, 1, target.unsqueeze(1)) # cos(theta_yi)
if self.easy_margin:
cond = torch.relu(cos_t)
else:
cond_v = cos_t - self.threshold
cond = torch.relu(cond_v)
cond = cond.bool()
# Apex would convert FP16 to FP32 here
# cos(theta_yi + m)
new_zy = torch.cos(torch.acos(cos_t) + self.m).type(cos_t.dtype)
if self.easy_margin:
zy_keep = cos_t
else:
zy_keep = cos_t - self.mm # (cos(theta_yi) - sin(pi - m)*m)
new_zy = torch.where(cond, new_zy, zy_keep)
diff = new_zy - cos_t # cos(theta_yi + m) - cos(theta_yi)
gt_one_hot = F.one_hot(target, num_classes=self.classes)
body = gt_one_hot * diff
return body
示例2: masked_cross_entropy_
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def masked_cross_entropy_(logits, target, length, take_log=False):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
# logits_flat: (batch * max_len, num_classes)
# -1 means infered from other dimentions
logits_flat = logits.view(-1, logits.size(-1))
if take_log:
logits_flat = torch.log(logits_flat)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
示例3: masked_cross_entropy_for_slot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def masked_cross_entropy_for_slot(logits, target, mask, use_softmax=True):
# print("logits", logits)
# print("target", target)
# -1 means infered from other dimentions
logits_flat = logits.view(-1, logits.size(-1))
# print(logits_flat.size())
if use_softmax:
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
else:
log_probs_flat = logits_flat # torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s|
losses = losses * mask.float()
loss = losses.sum() / (losses.size(0)*losses.size(1))
# print("loss inside", loss)
return loss
示例4: get_ranking_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def get_ranking_loss(n):
def ranking_loss(scores, targets):
""" Slack-rescaled max margin loss
"""
costs = targets[1]
true_ants = targets[2]
weights = targets[4] if len(targets) == 5 else None
true_ant_score = torch.gather(scores, 1, true_ants)
top_true, _ = true_ant_score.max(dim=1)
tmp_loss = scores.add(1).add(
top_true.unsqueeze(1).neg()
) # 1 + scores - top_true
if weights is not None:
tmp_loss = tmp_loss.mul(weights)
tmp_loss = tmp_loss.mul(costs)
loss, _ = tmp_loss.max(dim=1)
out_score = torch.sum(loss)
return out_score / n
return ranking_loss
示例5: _compute_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def _compute_loss(self, batch, output, target):
scores = self.generator(self._bottle(output))
gtruth = target.view(-1)
if self.confidence < 1:
tdata = gtruth.data
mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
log_likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))
tmp_ = self.one_hot.repeat(gtruth.size(0), 1)
tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
if mask.dim() > 0:
log_likelihood.index_fill_(0, mask, 0)
tmp_.index_fill_(0, mask, 0)
gtruth = Variable(tmp_, requires_grad=False)
loss = self.criterion(scores, gtruth)
if self.confidence < 1:
# Default: report smoothed ppl.
# loss_data = -log_likelihood.sum(0)
loss_data = loss.data.clone()
else:
loss_data = loss.data.clone()
stats = self._stats(loss_data, scores.data, target.view(-1).data)
return loss, stats
示例6: get_ranking
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def get_ranking(predictions, labels, num_guesses=5):
"""
Given a matrix of predictions and labels for the correct ones, get the number of guesses
required to get the prediction right per example.
:param predictions: [batch_size, range_size] predictions
:param labels: [batch_size] array of labels
:param num_guesses: Number of guesses to return
:return:
"""
assert labels.size(0) == predictions.size(0)
assert labels.dim() == 1
assert predictions.dim() == 2
values, full_guesses = predictions.topk(predictions.size(1), dim=1)
_, ranking = full_guesses.topk(full_guesses.size(1), dim=1, largest=False)
gt_ranks = torch.gather(ranking.data, 1, labels[:, None]).squeeze()
guesses = full_guesses[:, :num_guesses]
return gt_ranks, guesses
示例7: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def forward(self, x, y=None, batch=256):
if y is not None:
x, y = self.cr(x, y)
else:
x, y = self.cr(x)
x = torch.nn.utils.rnn.pad_packed_sequence(x)
r = torch.transpose(x[0], 0, 1)
y = y.view(batch, self.hidden_size)
ind = x[1].view(batch, 1, 1)
ind = ind - 1
ind = ind.expand(-1, -1, self.hidden_size)
t = torch.gather(r, 1, ind)
t = t.view(batch, self.hidden_size)
t = self.fn(t)
y = self.fn2(y)
t = t + y
t = self.sig(t)
return t
示例8: calculate_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def calculate_loss(
self, states, actions, old_policy, old_values, returns, advantages):
"""
All parameters ought to be tensors on the appropriate compute device.
"""
values, policy = self.model(states)
a_policy = torch.gather(policy, -1, actions[..., np.newaxis])[..., 0]
prob_diff = advantages.sign() * (1 - a_policy / old_policy)
policy_loss = advantages.abs() * torch.clamp(prob_diff, min=-self.eps_policy)
policy_loss = policy_loss.mean()
v_clip = old_values + torch.clamp(
values - old_values, min=-self.eps_value, max=+self.eps_value)
value_loss = torch.max((v_clip - returns)**2, (values - returns)**2)
value_loss = value_loss.mean()
entropy = torch.sum(-policy * torch.log(policy + 1e-12), dim=-1)
entropy_loss = torch.clamp(entropy.mean(), max=self.entropy_clip)
entropy_loss *= -self.entropy_reg
return entropy, policy_loss + value_loss * self.vf_coef + entropy_loss
示例9: eval
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def eval(b, x, *k_b):
original_shape = x.shape
x = x.reshape(-1)
n_values = x.shape[0]
x = bit_decomposition(x)
s, t = Array(n + 1, λ, n_values), Array(n + 1, 1, n_values)
s[0] = k_b[0]
# here k[1:] is (CW, CW_n)
CW = k_b[1].unbind() + (k_b[2],)
t[0] = b
for i in range(0, n):
τ = G(s[i]) ^ (t[i] * CW[i])
τ = τ.reshape(2, λ + 1, n_values)
x_i = x[i].unsqueeze(0).expand(λ + 1, n_values).unsqueeze(0).long()
filtered_τ = th.gather(τ, 0, x_i).squeeze(0)
s[i + 1], t[i + 1] = split(filtered_τ, [λ, 1])
flat_result = (-1) ** b * (Convert(s[n]) + t[n].squeeze() * CW[n])
return flat_result.reshape(original_shape)
示例10: masked_cross_entropy
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def masked_cross_entropy(logits, target, length, use_cuda=True):
if use_cuda:
length = Variable(torch.LongTensor(length)).cuda()
target_flat = target.view(-1, 1).cuda()
else:
length = Variable(torch.LongTensor(length))
target_flat = target.view(-1, 1)
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# log_probs_flat: (batch * max_len, num_classes)
log_probs_flat = functional.log_softmax(logits_flat)
# target_flat: (batch * max_len, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
示例11: _expand_visual
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def _expand_visual(self, visual: utils.TensorOrSequence, cur_beam_size: int, selected_beam: torch.Tensor):
if isinstance(visual, torch.Tensor):
visual_shape = visual.shape
visual_exp_shape = (self.b_s, cur_beam_size) + visual_shape[1:]
visual_red_shape = (self.b_s * self.beam_size,) + visual_shape[1:]
selected_beam_red_size = (self.b_s, self.beam_size) + tuple(1 for _ in range(len(visual_exp_shape) - 2))
selected_beam_exp_size = (self.b_s, self.beam_size) + visual_exp_shape[2:]
visual_exp = visual.view(visual_exp_shape)
selected_beam_exp = selected_beam.view(selected_beam_red_size).expand(selected_beam_exp_size)
visual = torch.gather(visual_exp, 1, selected_beam_exp).view(visual_red_shape)
else:
new_visual = []
for im in visual:
visual_shape = im.shape
visual_exp_shape = (self.b_s, cur_beam_size) + visual_shape[1:]
visual_red_shape = (self.b_s * self.beam_size,) + visual_shape[1:]
selected_beam_red_size = (self.b_s, self.beam_size) + tuple(1 for _ in range(len(visual_exp_shape) - 2))
selected_beam_exp_size = (self.b_s, self.beam_size) + visual_exp_shape[2:]
visual_exp = im.view(visual_exp_shape)
selected_beam_exp = selected_beam.view(selected_beam_red_size).expand(selected_beam_exp_size)
new_im = torch.gather(visual_exp, 1, selected_beam_exp).view(visual_red_shape)
new_visual.append(new_im)
visual = tuple(new_visual)
return visual
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def forward(self, input, target, th=1.0):
log_p = F.log_softmax(input, dim=1)
if th < 1: # This is done while using Hardmining. Not used for our model training
mask = F.softmax(input, dim=1) > th
mask = mask.data
new_target = target.data.clone()
new_target[new_target == self.ignore] = 0
indx = torch.gather(mask, 1, new_target.unsqueeze(1))
indx = indx.squeeze(1)
mod_target = target.clone()
mod_target[indx] = self.ignore
target = mod_target
loss = self.nll_loss(log_p, target)
total_valid_pixel = torch.sum(target.data != self.ignore)
return loss, Variable(torch.FloatTensor([total_valid_pixel]).cuda())
示例13: update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def update(self, model_output, target, **kwargs):
"""
args:
model_output: tensor of shape (B, C) where each value is
either logit or class probability.
target: tensor of shape (B, C), which is one-hot /
multi-label encoded, or tensor of shape (B) /
(B, 1), integer encoded
"""
# Convert target to 0/1 encoding if isn't
target = maybe_convert_to_one_hot(target, model_output)
_, pred = model_output.topk(max(self._topk), dim=1, largest=True, sorted=True)
for i, k in enumerate(self._topk):
self._curr_correct_predictions_k[i] += (
torch.gather(target, dim=1, index=pred[:, :k])
.max(dim=1)
.values.sum()
.item()
)
self._curr_sample_count += model_output.shape[0]
示例14: select_next_words
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def select_next_words(
self, word_scores, bsz, beam_size, possible_translation_tokens
):
cand_scores, cand_indices = torch.topk(word_scores.view(bsz, -1), k=beam_size)
possible_tokens_size = self.vocab_size
if possible_translation_tokens is not None:
possible_tokens_size = possible_translation_tokens.size(0)
cand_beams = torch.div(cand_indices, possible_tokens_size)
cand_indices.fmod_(possible_tokens_size)
# Handle vocab reduction
if possible_translation_tokens is not None:
possible_translation_tokens = possible_translation_tokens.view(
1, possible_tokens_size
).expand(cand_indices.size(0), possible_tokens_size)
cand_indices = torch.gather(
possible_translation_tokens, dim=1, index=cand_indices, out=cand_indices
)
return cand_scores, cand_indices, cand_beams
示例15: get_kd_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import gather [as 别名]
def get_kd_loss(self, sample, student_lprobs, lprobs):
"""
The second return argument is used for unit testing.
Args:
* sample: batched sample that has teacher score keys (top_k_scores and
top_k_indices)
* student_lprobs: tensor of student log probabilities
* lprobs: flat version of student_lprobs
"""
top_k_teacher_probs_normalized = sample["top_k_scores"]
indices = sample["top_k_indices"]
assert indices.shape[0:1] == student_lprobs.shape[0:1]
kd_loss = -torch.sum(
torch.gather(student_lprobs, 2, indices)
* top_k_teacher_probs_normalized.float()
)
return kd_loss