本文整理匯總了Python中torch.masked_select方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.masked_select方法的具體用法?Python torch.masked_select怎麽用?Python torch.masked_select使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.masked_select方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
labels=None, input_mask=None):
last_bert_layer, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, \
output_all_encoded_layers=False)
last_bert_layer = last_bert_layer.view(-1, self.hidden_size)
last_bert_layer = self.dropout(last_bert_layer)
logits = self.classifier(last_bert_layer)
if labels is not None:
loss_fct = CrossEntropyLoss()
if input_mask is not None:
masked_logits = torch.masked_select(logits, input_mask)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
示例2: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, seq, bn_seq, fg_seq, seqLogprobs, bnLogprobs, fgLogprobs, reward):
seqLogprobs = seqLogprobs.view(-1)
reward = reward.view(-1)
fg_seq = fg_seq.squeeze()
seq_mask = torch.cat((seq.new(seq.size(0),1).fill_(1).byte(), seq.gt(0)[:,:-1]),1).view(-1) #& fg_seq.eq(0)).view(-1)
seq_mask = Variable(seq_mask)
seq_out = - torch.masked_select(seqLogprobs * reward, seq_mask)
seq_out = torch.sum(seq_out) / torch.sum(seq_mask.data)
bnLogprobs = bnLogprobs.view(-1)
bn_mask = fg_seq.gt(0).view(-1)
bn_mask = Variable(bn_mask)
bn_out = - torch.masked_select(bnLogprobs * reward, bn_mask)
bn_out = torch.sum(bn_out) / max(torch.sum(bn_mask.data),1)
fgLogprobs = fgLogprobs.view(-1)
fg_out = - torch.masked_select(fgLogprobs * reward, bn_mask)
fg_out = torch.sum(fg_out) / max(torch.sum(bn_mask.data),1)
return seq_out, bn_out, fg_out
示例3: compute_masked_likelihood
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def compute_masked_likelihood(mu, data, mask, likelihood_func):
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
res = []
for i in range(n_traj_samples):
for k in range(n_traj):
for j in range(n_dims):
data_masked = torch.masked_select(data[i,k,:,j], mask[i,k,:,j].byte())
#assert(torch.sum(data_masked == 0.) < 10)
mu_masked = torch.masked_select(mu[i,k,:,j], mask[i,k,:,j].byte())
log_prob = likelihood_func(mu_masked, data_masked, indices = (i,k,j))
res.append(log_prob)
# shape: [n_traj*n_traj_samples, 1]
res = torch.stack(res, 0).to(get_device(data))
res = res.reshape((n_traj_samples, n_traj, n_dims))
# Take mean over the number of dimensions
res = torch.mean(res, -1) # !!!!!!!!!!! changed from sum to mean
res = res.transpose(0,1)
return res
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, displacement):
# compute displacement field
displacement = self._grid + displacement
# compute current mask
mask = super(MSE, self).GetCurrentMask(displacement)
# warp moving image with dispalcement field
self.warped_moving_image = F.grid_sample(self._moving_image.image, displacement)
# compute squared differences
value = (self.warped_moving_image - self._fixed_image.image).pow(2)
# mask values
value = th.masked_select(value, mask)
return self.return_loss(value)
示例5: compute_stage_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def compute_stage_loss(criterion, target_var, outputs, mask_var, total_labeled_cpm, weight_of_idt):
total_loss = 0
each_stage_loss = []
mask_outputs = []
for output_var in outputs:
stage_loss = 0
output = torch.masked_select(output_var, mask_var)
target = torch.masked_select(target_var, mask_var)
mask_outputs.append(output)
stage_loss = criterion(output, target) / (total_labeled_cpm*2)
total_loss = total_loss + stage_loss
each_stage_loss.append(stage_loss.item())
if weight_of_idt is not None and weight_of_idt > 0:
pair_loss_a = torch.sum( torch.abs(mask_outputs[0] - mask_outputs[1]) )
pair_loss_b = torch.sum( torch.abs(mask_outputs[0] - mask_outputs[2]) )
pair_loss_c = torch.sum( torch.abs(mask_outputs[1] - mask_outputs[2]) )
identity_loss = weight_of_idt * (pair_loss_a + pair_loss_b + pair_loss_c) / 3
each_stage_loss.append(identity_loss.item())
total_loss = total_loss + identity_loss
return total_loss, each_stage_loss
示例6: postprocess
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def postprocess(self, pred):
cls_pred = pred[..., 0]
activation = cls_pred > self.config['cls_threshold']
num_boxes = int(activation.sum())
if num_boxes == 0:
print("No bounding box found")
return [], []
corners = torch.zeros((num_boxes, 8))
for i in range(1, 9):
corners[:, i - 1] = torch.masked_select(pred[i, ...], activation)
corners = corners.view(-1, 4, 2).numpy()
scores = torch.masked_select(cls_pred, activation).cpu().numpy()
# NMS
selected_ids = non_max_suppression(corners, scores, self.config['nms_iou_threshold'])
corners = corners[selected_ids]
scores = scores[selected_ids]
return corners, scores
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, prob, target, reward):
"""
Args:
prob: (N, C), torch Variable
target : (N, ), torch Variable
reward : (N, ), torch Variable
"""
N = target.size(0)
C = prob.size(1)
one_hot = torch.zeros((N, C))
if prob.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if prob.is_cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
loss = loss * reward
loss = -torch.sum(loss)
return loss
示例8: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, preds, targets):
"""
Args:
inputs:(n, h, w, d)
targets:(n, h, w, d)
"""
assert not targets.requires_grad
assert preds.shape == targets.shape,'dim of preds and targets are different'
dist = torch.abs(preds - targets).view(-1)
#
baseV = targets.view(-1)
baseV = torch.abs(baseV + self.eps)
relativeDist = torch.div(dist, baseV)
mask = relativeDist.ge(self.threshold)
largerLossVec = torch.masked_select(dist, mask)
loss = torch.mean(largerLossVec)
return loss
示例9: sentencewise_scores2paragraph_tokenwise_scores
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def sentencewise_scores2paragraph_tokenwise_scores(sentences_scores, sentences_mask):
"""
# Input:
# sentences_mask: (batch_size X num_sentences X sent_seq_len)
# sentences_scores: (batch_size X num_sentences)
# Output:
# paragraph_tokenwise_scores: (batch_size X max_para_seq_len)
"""
paragraph_tokenwise_scores = []
for instance_sentences_scores, instance_sentences_mask in zip(torch.unbind(sentences_scores, dim=0),
torch.unbind(sentences_mask, dim=0)):
instance_paragraph_tokenwise_scores = torch.masked_select(instance_sentences_scores.unsqueeze(-1),
instance_sentences_mask.byte())
paragraph_tokenwise_scores.append(instance_paragraph_tokenwise_scores)
paragraph_tokenwise_scores = torch.nn.utils.rnn.pad_sequence(paragraph_tokenwise_scores, batch_first=True)
return paragraph_tokenwise_scores
示例10: _cam_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def _cam_loss(key, targets_hot, decay):
def loss(state):
img = state[torchbearer.MODEL].input_image
return - torch.masked_select(state[key], targets_hot).sum() + decay * img.pow(2).sum()
return loss
示例11: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, input, target):
logprob_select = torch.gather(input, 1, target)
mask = target.data.gt(0) # generate the mask
if isinstance(input, Variable):
mask = Variable(mask, volatile=input.volatile)
out = torch.masked_select(logprob_select, mask)
loss = -torch.sum(out) # get the average loss.
return loss
示例12: split_on_targets
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def split_on_targets(self, hiddens, targets):
# Split the targets into those in the head and in the tail
split_targets = []
split_hiddens = []
# Determine to which split each element belongs (for each start split value, add 1 if equal or greater)
# This method appears slower at least for WT-103 values for approx softmax
#masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]
#mask = torch.sum(torch.cat(masks, dim=0), dim=0)
###
# This is equally fast for smaller splits as method below but scales linearly
mask = None
for idx in range(1, self.nsplits):
partial_mask = targets >= self.splits[idx]
mask = mask + partial_mask if mask is not None else partial_mask
###
#masks = torch.stack([targets] * (self.nsplits - 1))
#mask = torch.sum(masks >= self.split_starts, dim=0)
for idx in range(self.nsplits):
# If there are no splits, avoid costly masked select
if self.nsplits == 1:
split_targets, split_hiddens = [targets], [hiddens]
continue
# If all the words are covered by earlier targets, we have empties so later stages don't freak out
if sum(len(t) for t in split_targets) == len(targets):
split_targets.append([])
split_hiddens.append([])
continue
# Are you in our split?
tmp_mask = mask == idx
split_targets.append(torch.masked_select(targets, tmp_mask))
split_hiddens.append(hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))
return split_targets, split_hiddens
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def forward(self, images, labels):
r"""
Overridden.
"""
fails = torch.arange(images.shape[0]).to(self.device)
final_images = images.clone().to(self.device)
labels = labels.to(self.device)
for i, attack in enumerate(self.attacks) :
print('- Multi Attack Progress [%d / %d] ' %((i+1), len(self.attacks)), end='\r')
adv_images = attack(images[fails], labels[fails])
outputs = self.model(adv_images)
_, pre = torch.max(outputs.data, 1)
succeeds = torch.masked_select(fails, pre != labels[fails])
succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), pre != labels[fails])
final_images[succeeds] = adv_images[succeeds_of_fails]
fails = torch.masked_select(fails, pre == labels[fails])
if len(fails) == 0 :
warnings.warn("\n * Ealry Stopped cause all images are successfully perturbed.", Warning)
break
return final_images
示例14: split_on_targets
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def split_on_targets(self, hiddens, targets):
# Split the targets into those in the head and in the tail
split_targets = []
split_hiddens = []
# Determine to which split each element belongs (for each start split value, add 1 if equal or greater)
# This method appears slower at least for WT-103 values for approx softmax
# masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]
# mask = torch.sum(torch.cat(masks, dim=0), dim=0)
###
# This is equally fast for smaller splits as method below but scales linearly
mask = None
for idx in range(1, self.nsplits):
partial_mask = targets >= self.splits[idx]
mask = mask + partial_mask if mask is not None else partial_mask
###
# masks = torch.stack([targets] * (self.nsplits - 1))
# mask = torch.sum(masks >= self.split_starts, dim=0)
for idx in range(self.nsplits):
# If there are no splits, avoid costly masked select
if self.nsplits == 1:
split_targets, split_hiddens = [targets], [hiddens]
continue
# If all the words are covered by earlier targets, we have empties so later stages don't freak out
if sum(len(t) for t in split_targets) == len(targets):
split_targets.append([])
split_hiddens.append([])
continue
# Are you in our split?
tmp_mask = mask == idx
split_targets.append(torch.masked_select(targets, tmp_mask))
split_hiddens.append(
hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))
return split_targets, split_hiddens
示例15: multi_nll_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import masked_select [as 別名]
def multi_nll_loss(scores, target_mask):
"""
Select actions with sampling at train-time, argmax at test-time:
"""
scores = scores.exp()
loss = 0
for i in range(scores.size(0)):
loss += torch.neg(torch.log(torch.masked_select(scores[i], target_mask[i]).sum() / scores[i].sum()))
return loss