当前位置: 首页>>代码示例>>Python>>正文


Python torch.masked_select方法代码示例

本文整理汇总了Python中torch.masked_select方法的典型用法代码示例。如果您正苦于以下问题:Python torch.masked_select方法的具体用法?Python torch.masked_select怎么用?Python torch.masked_select使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.masked_select方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, input_ids, token_type_ids=None, attention_mask=None, 
        labels=None, input_mask=None):

        last_bert_layer, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, \
            output_all_encoded_layers=False)
        last_bert_layer = last_bert_layer.view(-1, self.hidden_size)
        last_bert_layer = self.dropout(last_bert_layer)
        logits = self.classifier(last_bert_layer) 

        if labels is not None:
            loss_fct = CrossEntropyLoss()
            if input_mask is not None:
                masked_logits = torch.masked_select(logits, input_mask)
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) 
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            return loss 
        else:
            return logits 
开发者ID:pranciskus,项目名称:mrc-for-flat-nested-ner,代码行数:21,代码来源:bert_tagger.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, seq, bn_seq, fg_seq, seqLogprobs, bnLogprobs, fgLogprobs, reward):

        seqLogprobs = seqLogprobs.view(-1)
        reward = reward.view(-1)
        fg_seq = fg_seq.squeeze()
        seq_mask = torch.cat((seq.new(seq.size(0),1).fill_(1).byte(), seq.gt(0)[:,:-1]),1).view(-1) #& fg_seq.eq(0)).view(-1)
        seq_mask = Variable(seq_mask)
        seq_out = - torch.masked_select(seqLogprobs * reward, seq_mask)
        seq_out = torch.sum(seq_out) / torch.sum(seq_mask.data)

        bnLogprobs = bnLogprobs.view(-1)
        bn_mask = fg_seq.gt(0).view(-1)
        bn_mask = Variable(bn_mask)

        bn_out = - torch.masked_select(bnLogprobs * reward, bn_mask)
        bn_out = torch.sum(bn_out) / max(torch.sum(bn_mask.data),1)

        fgLogprobs = fgLogprobs.view(-1)
        fg_out = - torch.masked_select(fgLogprobs * reward, bn_mask)
        fg_out = torch.sum(fg_out) / max(torch.sum(bn_mask.data),1)

        return seq_out, bn_out, fg_out 
开发者ID:jiasenlu,项目名称:NeuralBabyTalk,代码行数:24,代码来源:utils.py

示例3: compute_masked_likelihood

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def compute_masked_likelihood(mu, data, mask, likelihood_func):
	# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
	n_traj_samples, n_traj, n_timepoints, n_dims = data.size()

	res = []
	for i in range(n_traj_samples):
		for k in range(n_traj):
			for j in range(n_dims):
				data_masked = torch.masked_select(data[i,k,:,j], mask[i,k,:,j].byte())
				
				#assert(torch.sum(data_masked == 0.) < 10)

				mu_masked = torch.masked_select(mu[i,k,:,j], mask[i,k,:,j].byte())
				log_prob = likelihood_func(mu_masked, data_masked, indices = (i,k,j))
				res.append(log_prob)
	# shape: [n_traj*n_traj_samples, 1]

	res = torch.stack(res, 0).to(get_device(data))
	res = res.reshape((n_traj_samples, n_traj, n_dims))
	# Take mean over the number of dimensions
	res = torch.mean(res, -1) # !!!!!!!!!!! changed from sum to mean
	res = res.transpose(0,1)
	return res 
开发者ID:YuliaRubanova,项目名称:latent_ode,代码行数:25,代码来源:likelihood_eval.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, displacement):

        # compute displacement field
        displacement = self._grid + displacement

        # compute current mask
        mask = super(MSE, self).GetCurrentMask(displacement)

        # warp moving image with dispalcement field
        self.warped_moving_image = F.grid_sample(self._moving_image.image, displacement)

        # compute squared differences
        value = (self.warped_moving_image - self._fixed_image.image).pow(2)

        # mask values
        value = th.masked_select(value, mask)

        return self.return_loss(value) 
开发者ID:airlab-unibas,项目名称:airlab,代码行数:20,代码来源:pairwise.py

示例5: compute_stage_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def compute_stage_loss(criterion, target_var, outputs, mask_var, total_labeled_cpm, weight_of_idt):

  total_loss = 0
  each_stage_loss = []
  mask_outputs = []
  for output_var in outputs:
    stage_loss = 0
    output = torch.masked_select(output_var, mask_var)
    target = torch.masked_select(target_var, mask_var)
    mask_outputs.append(output)

    stage_loss = criterion(output, target) / (total_labeled_cpm*2)
    total_loss = total_loss + stage_loss
    each_stage_loss.append(stage_loss.item())
  if weight_of_idt is not None and weight_of_idt > 0:
    pair_loss_a = torch.sum( torch.abs(mask_outputs[0] - mask_outputs[1]) )
    pair_loss_b = torch.sum( torch.abs(mask_outputs[0] - mask_outputs[2]) )
    pair_loss_c = torch.sum( torch.abs(mask_outputs[1] - mask_outputs[2]) )
    identity_loss = weight_of_idt * (pair_loss_a + pair_loss_b + pair_loss_c) / 3
    each_stage_loss.append(identity_loss.item())
    total_loss = total_loss + identity_loss
  return total_loss, each_stage_loss 
开发者ID:D-X-Y,项目名称:landmark-detection,代码行数:24,代码来源:cpm_loss.py

示例6: postprocess

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def postprocess(self, pred):
        cls_pred = pred[..., 0]
        activation = cls_pred > self.config['cls_threshold']
        num_boxes = int(activation.sum())

        if num_boxes == 0:
            print("No bounding box found")
            return [], []

        corners = torch.zeros((num_boxes, 8))
        for i in range(1, 9):
            corners[:, i - 1] = torch.masked_select(pred[i, ...], activation)
        corners = corners.view(-1, 4, 2).numpy()
        scores = torch.masked_select(cls_pred, activation).cpu().numpy()

        # NMS
        selected_ids = non_max_suppression(corners, scores, self.config['nms_iou_threshold'])
        corners = corners[selected_ids]
        scores = scores[selected_ids]

        return corners, scores 
开发者ID:philip-huang,项目名称:PIXOR,代码行数:23,代码来源:run_kitti.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, prob, target, reward):
        """
        Args:
            prob: (N, C), torch Variable 
            target : (N, ), torch Variable
            reward : (N, ), torch Variable
        """
        N = target.size(0)
        C = prob.size(1)
        one_hot = torch.zeros((N, C))
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        one_hot.scatter_(1, target.data.view((-1,1)), 1)
        one_hot = one_hot.type(torch.ByteTensor)
        one_hot = Variable(one_hot)
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        loss = torch.masked_select(prob, one_hot)
        loss = loss * reward
        loss =  -torch.sum(loss)
        return loss 
开发者ID:IBM,项目名称:gan-toolkit,代码行数:23,代码来源:training_fucntion_generator.py

示例8: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, preds, targets):
        """
            Args:
                inputs:(n, h, w, d)
                targets:(n, h, w, d)  
        """
        assert not targets.requires_grad
        assert preds.shape == targets.shape,'dim of preds and targets are different'

        
        dist = torch.abs(preds - targets).view(-1)
#         
        baseV = targets.view(-1)
        
        baseV = torch.abs(baseV + self.eps)
          
        relativeDist = torch.div(dist, baseV)
        
        mask = relativeDist.ge(self.threshold)
        
        largerLossVec = torch.masked_select(dist, mask)
            
        loss = torch.mean(largerLossVec)

        return loss 
开发者ID:ginobilinie,项目名称:medSynthesisV1,代码行数:27,代码来源:nnBuildUnits.py

示例9: sentencewise_scores2paragraph_tokenwise_scores

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def sentencewise_scores2paragraph_tokenwise_scores(sentences_scores, sentences_mask):
    """
    # Input:
    # sentences_mask: (batch_size X num_sentences X sent_seq_len)
    # sentences_scores: (batch_size X num_sentences)

    # Output:
    # paragraph_tokenwise_scores: (batch_size X max_para_seq_len)
    """
    paragraph_tokenwise_scores = []
    for instance_sentences_scores, instance_sentences_mask in zip(torch.unbind(sentences_scores, dim=0),
                                                                  torch.unbind(sentences_mask, dim=0)):
        instance_paragraph_tokenwise_scores = torch.masked_select(instance_sentences_scores.unsqueeze(-1),
                                                                    instance_sentences_mask.byte())
        paragraph_tokenwise_scores.append(instance_paragraph_tokenwise_scores)
    paragraph_tokenwise_scores = torch.nn.utils.rnn.pad_sequence(paragraph_tokenwise_scores, batch_first=True)
    return paragraph_tokenwise_scores 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:19,代码来源:util.py

示例10: _cam_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def _cam_loss(key, targets_hot, decay):
    def loss(state):
        img = state[torchbearer.MODEL].input_image
        return - torch.masked_select(state[key], targets_hot).sum() + decay * img.pow(2).sum()
    return loss 
开发者ID:pytorchbearer,项目名称:torchbearer,代码行数:7,代码来源:inside_cnns.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, input, target):
        logprob_select = torch.gather(input, 1, target)

        mask = target.data.gt(0)  # generate the mask
        if isinstance(input, Variable):
            mask = Variable(mask, volatile=input.volatile)
        
        out = torch.masked_select(logprob_select, mask)

        loss = -torch.sum(out) # get the average loss.
        return loss 
开发者ID:jiasenlu,项目名称:visDial.pytorch,代码行数:13,代码来源:model.py

示例12: split_on_targets

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def split_on_targets(self, hiddens, targets):
        # Split the targets into those in the head and in the tail
        split_targets = []
        split_hiddens = []

        # Determine to which split each element belongs (for each start split value, add 1 if equal or greater)
        # This method appears slower at least for WT-103 values for approx softmax
        #masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]
        #mask = torch.sum(torch.cat(masks, dim=0), dim=0)
        ###
        # This is equally fast for smaller splits as method below but scales linearly
        mask = None
        for idx in range(1, self.nsplits):
            partial_mask = targets >= self.splits[idx]
            mask = mask + partial_mask if mask is not None else partial_mask
        ###
        #masks = torch.stack([targets] * (self.nsplits - 1))
        #mask = torch.sum(masks >= self.split_starts, dim=0)
        for idx in range(self.nsplits):
            # If there are no splits, avoid costly masked select
            if self.nsplits == 1:
                split_targets, split_hiddens = [targets], [hiddens]
                continue
            # If all the words are covered by earlier targets, we have empties so later stages don't freak out
            if sum(len(t) for t in split_targets) == len(targets):
                split_targets.append([])
                split_hiddens.append([])
                continue
            # Are you in our split?
            tmp_mask = mask == idx
            split_targets.append(torch.masked_select(targets, tmp_mask))
            split_hiddens.append(hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))
        return split_targets, split_hiddens 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:35,代码来源:splitcross.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def forward(self, images, labels):
        r"""
        Overridden.
        """
        fails = torch.arange(images.shape[0]).to(self.device)
        final_images = images.clone().to(self.device)
        labels = labels.to(self.device)
        
        for i, attack in enumerate(self.attacks) :    
            print('- Multi Attack Progress [%d / %d]             ' %((i+1), len(self.attacks)), end='\r')
            adv_images = attack(images[fails], labels[fails])
            
            outputs = self.model(adv_images)
            _, pre = torch.max(outputs.data, 1)

            succeeds = torch.masked_select(fails, pre != labels[fails])
            succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), pre != labels[fails])
            
            final_images[succeeds] = adv_images[succeeds_of_fails]
            
            fails = torch.masked_select(fails, pre == labels[fails])
            
            if len(fails) == 0 :
                warnings.warn("\n * Ealry Stopped cause all images are successfully perturbed.", Warning)
                break
            
        return final_images 
开发者ID:Harry24k,项目名称:adversarial-attacks-pytorch,代码行数:29,代码来源:multiattack.py

示例14: split_on_targets

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def split_on_targets(self, hiddens, targets):
        # Split the targets into those in the head and in the tail
        split_targets = []
        split_hiddens = []

        # Determine to which split each element belongs (for each start split value, add 1 if equal or greater)
        # This method appears slower at least for WT-103 values for approx softmax
        # masks = [(targets >= self.splits[idx]).view(1, -1) for idx in range(1, self.nsplits)]
        # mask = torch.sum(torch.cat(masks, dim=0), dim=0)
        ###
        # This is equally fast for smaller splits as method below but scales linearly
        mask = None
        for idx in range(1, self.nsplits):
            partial_mask = targets >= self.splits[idx]
            mask = mask + partial_mask if mask is not None else partial_mask
        ###
        # masks = torch.stack([targets] * (self.nsplits - 1))
        # mask = torch.sum(masks >= self.split_starts, dim=0)
        for idx in range(self.nsplits):
            # If there are no splits, avoid costly masked select
            if self.nsplits == 1:
                split_targets, split_hiddens = [targets], [hiddens]
                continue
            # If all the words are covered by earlier targets, we have empties so later stages don't freak out
            if sum(len(t) for t in split_targets) == len(targets):
                split_targets.append([])
                split_hiddens.append([])
                continue
            # Are you in our split?
            tmp_mask = mask == idx
            split_targets.append(torch.masked_select(targets, tmp_mask))
            split_hiddens.append(
                hiddens.masked_select(tmp_mask.unsqueeze(1).expand_as(hiddens)).view(-1, hiddens.size(1)))
        return split_targets, split_hiddens 
开发者ID:KnowingNothing,项目名称:FlexTensor,代码行数:36,代码来源:train-language-modeling.py

示例15: multi_nll_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import masked_select [as 别名]
def multi_nll_loss(scores, target_mask):
    """
    Select actions with sampling at train-time, argmax at test-time:
    """
    scores = scores.exp()
    loss = 0
    for i in range(scores.size(0)):
        loss += torch.neg(torch.log(torch.masked_select(scores[i], target_mask[i]).sum() / scores[i].sum()))
    return loss 
开发者ID:stanfordnlp,项目名称:coqa-baselines,代码行数:11,代码来源:layers.py


注:本文中的torch.masked_select方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。