当前位置: 首页>>代码示例>>Python>>正文


Python functional.cross_entropy函数代码示例

本文整理汇总了Python中torch.nn.functional.cross_entropy函数的典型用法代码示例。如果您正苦于以下问题:Python cross_entropy函数的具体用法?Python cross_entropy怎么用?Python cross_entropy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cross_entropy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

    def forward(self, sentences, lengths, cat_in=0, cat_out=0):
        # cat_in = cat_out = (n_categories)
        # sentences = (B, maxlen)
        # lengths = (B)

        # Compute Thought Vectors for each sentence. Also get the actual word embeddings for teacher forcing.
        thoughts, word_embeddings = self.encoder(sentences, cat_in)  # thoughts = (B, thought_size), word_embeddings = (B, maxlen, word_size)

        # Predict the words for previous and next sentences.
        prev_pred, next_pred = self.decoders(thoughts, word_embeddings, cat_out)  # both = (batch-1, maxlen, VOCAB_SIZE)

        # mask the predictions, so that loss for beyond-EOS word predictions is cancelled.
        prev_mask = self.create_mask(prev_pred, lengths[:-1])
        next_mask = self.create_mask(next_pred, lengths[1:])
        
        masked_prev_pred = prev_pred * prev_mask
        masked_next_pred = next_pred * next_mask
        
        prev_loss = F.cross_entropy(masked_prev_pred.view(-1, VOCAB_SIZE), sentences[:-1, :].view(-1))
        next_loss = F.cross_entropy(masked_next_pred.view(-1, VOCAB_SIZE), sentences[1:, :].view(-1))

        loss = prev_loss + next_loss
        
        _, prev_pred_ids = prev_pred[0].max(1)
        _, next_pred_ids = next_pred[0].max(1)

        return loss, sentences[0], sentences[1], prev_pred_ids, next_pred_ids
开发者ID:TheLortex,项目名称:DL-NLP-Transfer,代码行数:27,代码来源:model.py

示例2: forward

 def forward(self, input, target):
     assert not target.requires_grad
     if len(input.shape) == 4:
         input = input.permute(0, 2, 3, 1).contiguous()
     input = input.view(-1, self.n_classes)
     target = target.view(-1)
     assert input.shape[:1]==target.shape
     if not self.size_average:
         return F.cross_entropy(input, target, size_average=False).mul_(1.0 / target.size(0))
     else:
         return F.cross_entropy(input, target, size_average=True)
开发者ID:wangdingkang,项目名称:RoadDetector,代码行数:11,代码来源:pytorch_utils.py

示例3: _add_losses

    def _add_losses(self, sigma_rpn=3.0):
        # RPN, class loss
        rpn_cls_score = self._predictions['rpn_cls_score_reshape'].view(-1, 2)
        rpn_label = self._anchor_targets['rpn_labels'].view(-1)
        rpn_select = (rpn_label.data != -1).nonzero().view(-1)
        rpn_cls_score = rpn_cls_score.index_select(
            0, rpn_select).contiguous().view(-1, 2)
        rpn_label = rpn_label.index_select(0, rpn_select).contiguous().view(-1)
        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # RPN, bbox loss
        rpn_bbox_pred = self._predictions['rpn_bbox_pred']
        rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
        rpn_bbox_inside_weights = self._anchor_targets[
            'rpn_bbox_inside_weights']
        rpn_bbox_outside_weights = self._anchor_targets[
            'rpn_bbox_outside_weights']
        rpn_loss_box = self._smooth_l1_loss(
            rpn_bbox_pred,
            rpn_bbox_targets,
            rpn_bbox_inside_weights,
            rpn_bbox_outside_weights,
            sigma=sigma_rpn,
            dim=[1, 2, 3])

        # RCNN, class loss
        cls_score = self._predictions["cls_score"]
        label = self._proposal_targets["labels"].view(-1)
        cross_entropy = F.cross_entropy(
            cls_score.view(-1, self._num_classes), label)

        # RCNN, bbox loss
        bbox_pred = self._predictions['bbox_pred']
        bbox_targets = self._proposal_targets['bbox_targets']
        bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
        bbox_outside_weights = self._proposal_targets['bbox_outside_weights']
        loss_box = self._smooth_l1_loss(
            bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)

        self._losses['cross_entropy'] = cross_entropy
        self._losses['loss_box'] = loss_box
        self._losses['rpn_cross_entropy'] = rpn_cross_entropy
        self._losses['rpn_loss_box'] = rpn_loss_box

        loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box
        self._losses['total_loss'] = loss

        for k in self._losses.keys():
            self._event_summaries[k] = self._losses[k]

        return loss
开发者ID:zvant,项目名称:pytorch-faster-rcnn,代码行数:51,代码来源:network.py

示例4: forward

    def forward(self, task=None, input1=None, input2=None, label=None):
        '''
        Predict through model and task-specific prediction layer

        Args:
            - inputs (tuple(TODO))
            - pred_layer (nn.Module)
            - pair_input (int)

        Returns:
            - logits (TODO)
        '''
        pair_input = task.pair_input
        pred_layer = getattr(self, '%s_pred_layer' % task.name)
        if pair_input:
            if self.pair_enc_type == 'bow':
                sent1 = self.sent_encoder(input1)
                sent2 = self.sent_encoder(input2) # causes a bug with BiDAF
                logits = pred_layer(torch.cat([sent1, sent2, torch.abs(sent1 - sent2),
                                               sent1 * sent2], 1))
            else:
                pair_emb = self.pair_encoder(input1, input2)
                logits = pred_layer(pair_emb)

        else:
            sent_emb = self.sent_encoder(input1)
            logits = pred_layer(sent_emb)
        out = {'logits': logits}
        if label is not None:
            if isinstance(task, (STS14Task, STSBTask)):
                loss = F.mse_loss(logits, label)
                label = label.squeeze(-1).data.cpu().numpy()
                logits = logits.squeeze(-1).data.cpu().numpy()
                task.scorer1(pearsonr(logits, label)[0])
                task.scorer2(spearmanr(logits, label)[0])
            elif isinstance(task, CoLATask):
                label = label.squeeze(-1)
                loss = F.cross_entropy(logits, label)
                task.scorer2(logits, label)
                label = label.data.cpu().numpy()
                _, preds = logits.max(dim=1)
                task.scorer1(matthews_corrcoef(label, preds.data.cpu().numpy()))
            else:
                label = label.squeeze(-1)
                loss = F.cross_entropy(logits, label)
                task.scorer1(logits, label)
                if task.scorer2 is not None:
                    task.scorer2(logits, label)
            out['loss'] = loss
        return out
开发者ID:cyzhangAThit,项目名称:GLUE-baselines,代码行数:50,代码来源:models.py

示例5: forward

    def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
        """Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).

        Args:
          loc_preds: (tensor) predicted locations, sized [N, #anchors, 4].
          loc_targets: (tensor) encoded target locations, sized [N, #anchors, 4].
          cls_preds: (tensor) predicted class confidences, sized [N, #anchors, #classes].
          cls_targets: (tensor) encoded target labels, sized [N, #anchors].

        loss:
          (tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + CrossEntropyLoss(cls_preds, cls_targets).
        """
        pos = cls_targets > 0  # [N,#anchors]
        batch_size = pos.size(0)
        num_pos = pos.sum().item()

        # ===============================================================
        # loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
        # ===============================================================
        mask = pos.unsqueeze(2).expand_as(loc_preds)  # [N,#anchors,4]
        loc_loss = F.smooth_l1_loss(loc_preds[mask], loc_targets[mask], size_average=False)

        # ===============================================================
        # cls_loss = CrossEntropyLoss(cls_preds, cls_targets)
        # ===============================================================
        cls_loss = F.cross_entropy(cls_preds.view(-1, self.num_classes), cls_targets.view(-1), reduce=False)  # [N*#anchors,]
        cls_loss = cls_loss.view(batch_size, -1)
        cls_loss[cls_targets < 0] = 0  # set ignored loss to 0
        neg = self._hard_negative_mining(cls_loss, pos)  # [N,#anchors]
        cls_loss = cls_loss[pos | neg].sum()

        print('loc_loss: {} | cls_loss: {}'.format(loc_loss.item() / num_pos, cls_loss.item() / num_pos))
        loss = (loc_loss + cls_loss) / num_pos
        return loss
开发者ID:zhliue,项目名称:objdet,代码行数:34,代码来源:ssd.py

示例6: avg_cross_entropy_loss

def avg_cross_entropy_loss(predicted, targets):
    """ Helper function for computing the simple mean
        cross entropy loss between the predicted one-hot
        and the target class.
    """
    losses = []
    length = len(predicted)
    
    for i in range(length):
        target = np.array(targets[i], dtype=np.float32)
        target = torch.from_numpy(target)
        target = Variable(target).long()
        
        loss = F.cross_entropy(predicted[i], target)
        
        losses.append(loss)

    loss = losses[0]
    
    for i in range(1, length):
        loss += losses[i]
    
    loss = loss / length

    return loss
开发者ID:bage79,项目名称:multitask_sentiment_analysis,代码行数:25,代码来源:utils.py

示例7: single_scale_rpn_losses

def single_scale_rpn_losses(
        rpn_cls_logits, rpn_bbox_pred,
        rpn_labels_int32_wide, rpn_bbox_targets_wide,
        rpn_bbox_inside_weights_wide, rpn_bbox_outside_weights_wide):
    """Add losses for a single scale RPN model (i.e., no FPN)."""
    h, w = rpn_cls_logits.shape[2:]
    rpn_labels_int32 = rpn_labels_int32_wide[:, :, :h, :w]   # -1 means ignore
    h, w = rpn_bbox_pred.shape[2:]
    rpn_bbox_targets = rpn_bbox_targets_wide[:, :, :h, :w]
    rpn_bbox_inside_weights = rpn_bbox_inside_weights_wide[:, :, :h, :w]
    rpn_bbox_outside_weights = rpn_bbox_outside_weights_wide[:, :, :h, :w]

    if cfg.RPN.CLS_ACTIVATION == 'softmax':
        B, C, H, W = rpn_cls_logits.size()
        rpn_cls_logits = rpn_cls_logits.view(
            B, 2, C // 2, H, W).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)
        rpn_labels_int32 = rpn_labels_int32.contiguous().view(-1).long()
        # the loss is averaged over non-ignored targets
        loss_rpn_cls = F.cross_entropy(
            rpn_cls_logits, rpn_labels_int32, ignore_index=-1)
    else:
        weight = (rpn_labels_int32 >= 0).float()
        loss_rpn_cls = F.binary_cross_entropy_with_logits(
            rpn_cls_logits, rpn_labels_int32.float(), weight, size_average=False)
        loss_rpn_cls /= weight.sum()

    loss_rpn_bbox = net_utils.smooth_l1_loss(
        rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights,
        beta=1/9)

    return loss_rpn_cls, loss_rpn_bbox
开发者ID:chenyilun95,项目名称:PANet,代码行数:31,代码来源:rpn_heads.py

示例8: keypoint_losses

def keypoint_losses(kps_pred, keypoint_locations_int32, keypoint_weights,
                    keypoint_loss_normalizer=None):
    """Mask R-CNN keypoint specific losses."""
    device_id = kps_pred.get_device()
    kps_target = Variable(torch.from_numpy(
        keypoint_locations_int32.astype('int64'))).cuda(device_id)
    keypoint_weights = Variable(torch.from_numpy(keypoint_weights)).cuda(device_id)
    # Softmax across **space** (woahh....space!)
    # Note: this is not what is commonly called "spatial softmax"
    # (i.e., softmax applied along the channel dimension at each spatial
    # location); This is softmax applied over a set of spatial locations (i.e.,
    # each spatial location is a "class").
    loss = F.cross_entropy(
        kps_pred.view(-1, cfg.KRCNN.HEATMAP_SIZE**2), kps_target, reduce=False)
    loss = torch.sum(loss * keypoint_weights) / torch.sum(keypoint_weights)
    loss *= cfg.KRCNN.LOSS_WEIGHT

    if not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
        # Discussion: the softmax loss above will average the loss by the sum of
        # keypoint_weights, i.e. the total number of visible keypoints. Since
        # the number of visible keypoints can vary significantly between
        # minibatches, this has the effect of up-weighting the importance of
        # minibatches with few visible keypoints. (Imagine the extreme case of
        # only one visible keypoint versus N: in the case of N, each one
        # contributes 1/N to the gradient compared to the single keypoint
        # determining the gradient direction). Instead, we can normalize the
        # loss by the total number of keypoints, if it were the case that all
        # keypoints were visible in a full minibatch. (Returning to the example,
        # this means that the one visible keypoint contributes as much as each
        # of the N keypoints.)
        loss *= keypoint_loss_normalizer.item() # np.float32 to float
    return loss
开发者ID:chenyilun95,项目名称:PANet,代码行数:32,代码来源:keypoint_rcnn_heads.py

示例9: validate

def validate():
    softmaxer = torch.nn.Softmax(dim=1)
    model.eval()
    correct = total = 0
    precisionmat = (1/np.arange(1,21))[::-1].cumsum()[::-1]
    precisionmat = torch.cuda.FloatTensor(precisionmat.copy())
    precision = 0
    crossentropy = 0
    hidden = model.initHidden()
    for batch in iter(val_iter):
        sentences = batch.text # n=32,bs
        if torch.cuda.is_available():
            sentences = sentences.cuda()
        out, hidden = model(sentences, hidden)
        for j in range(sentences.size(0)-1):
            outj = out[j] # bs,|V|
            labelsj = sentences[j+1] # bs
            # cross entropy
            crossentropy += F.cross_entropy(outj,labelsj,size_average=False,ignore_index=padidx)
            # precision
            outj, labelsj = softmaxer(outj).data, labelsj.data
            _, outsort = torch.sort(outj,dim=1,descending=True)
            outsort = outsort[:,:20]
            inds = (outsort-labelsj.unsqueeze(1)==0)
            inds = inds.sum(dim=0).type(torch.cuda.FloatTensor)
            precision += inds.dot(precisionmat)
            # plain ol accuracy
            _, predicted = torch.max(outj, 1)
            total += labelsj.ne(padidx).int().sum()
            correct += (predicted==labelsj).sum()
            # DEBUGGING: see the rest in trigram.py
        hidden = repackage_hidden(hidden)
    return correct/total, precision/total, torch.exp(crossentropy/total).data[0]
开发者ID:anihamde,项目名称:cs287-s18,代码行数:33,代码来源:lstm.py

示例10: forward

 def forward(self, input, target, kl_weight=1.0):
     assert not target.requires_grad
     kl = 0.0
     for module in self.net.modules():
         if hasattr(module, 'kl_reg'):
             kl = kl + module.kl_reg()
     return F.cross_entropy(input, target, size_average=True) * self.train_size + kl_weight * kl
开发者ID:AlliedToasters,项目名称:elko_den,代码行数:7,代码来源:metrics.py

示例11: forward

    def forward(self, model, sample, reduce=True):
        """Compute the loss for the given sample.

        Returns a tuple with three elements:
        1) the loss
        2) the sample size, which is used as the denominator for the gradient
        3) logging outputs to display while training
        """

        assert hasattr(model.decoder, 'adaptive_softmax') and model.decoder.adaptive_softmax is not None
        adaptive_softmax = model.decoder.adaptive_softmax

        net_output = model(**sample['net_input'])
        target = model.get_targets(sample, net_output).view(-1)

        bsz = target.size(0)

        logits, target = adaptive_softmax(net_output[0], target)
        assert len(target) == len(logits)

        loss = net_output[0].new(1 if reduce else bsz).zero_()

        for i in range(len(target)):
            if target[i] is not None:
                assert (target[i].min() >= 0 and target[i].max() <= logits[i].size(1))
                loss += F.cross_entropy(logits[i], target[i], size_average=False, ignore_index=self.padding_idx,
                                        reduce=reduce)

        sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
        logging_output = {
            'loss': utils.item(loss.data) if reduce else loss.data,
            'ntokens': sample['ntokens'],
            'sample_size': sample_size,
        }
        return loss, sample_size, logging_output
开发者ID:fyabc,项目名称:fairseq,代码行数:35,代码来源:adaptive_loss.py

示例12: test

def test(model, device, test_loader):
    model.to(device)
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        y_pred = []
        y_true = []
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            output = torch.mean(output.view(output.size(0), output.size(1), -1), dim=2)
            test_loss += F.cross_entropy(output, target)
            output = F.softmax(output, dim=1)
            confidence, pred = output.max(1)
            print('confidence: {}, prediction: {}, ground truth: {}'.format(confidence.cpu().numpy(), pred.cpu().numpy(), target.cpu().numpy()))
            y_pred += pred.data.tolist()
            y_true += target.data.tolist()
            correct += pred.eq(target.view_as(pred)).sum().item()

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
    print(metrics.classification_report(np.asarray(y_true), np.asarray(y_pred)))
    print('confusion matrix: \n', metrics.confusion_matrix(np.asarray(y_true), np.asarray(y_pred)))
    print('\n')
开发者ID:hubutui,项目名称:SonoNet-weights,代码行数:26,代码来源:example-pytorch.py

示例13: forward

 def forward(self, predict, target, weight=None):
     """
         Args:
             predict:(n, c, h, w)
             target:(n, h, w)
             weight (Tensor, optional): a manual rescaling weight given to each class.
                                        If given, has to be a Tensor of size "nclasses"
     """
     assert not target.requires_grad
     assert predict.dim() == 4
     assert target.dim() == 3
     assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
     assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
     assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
     n, c, h, w = predict.size()
     target_mask = (target >= 0) * (target != self.ignore_label)
     target = target[target_mask]
     if not target.data.dim():
         return Variable(torch.zeros(1))
     predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
     # contiguous():返回一段内存连续的Tensor
     predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
     # target [N] predict [N,C]
     loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
     return loss
开发者ID:MrtBian,项目名称:AdvSemiSeg,代码行数:25,代码来源:loss.py

示例14: ohem_detect_loss

    def ohem_detect_loss(self, cls_score, rois_label, bbox_pred, rois_target, rois_inside_ws, rois_outside_ws):

        def log_sum_exp(x):
            x_max = x.data.max()
            return torch.log(torch.sum(torch.exp(x - x_max), dim=1, keepdim=True)) + x_max

        num_hard = cfg.TRAIN.BATCH_SIZE * self.batch_size
        pos_idx = rois_label > 0
        num_pos = pos_idx.int().sum()

        # classification loss
        num_classes = cls_score.size(1)
        weight = cls_score.data.new(num_classes).fill_(1.)
        weight[0] = num_pos.data[0] / num_hard

        conf_p = cls_score.detach()
        conf_t = rois_label.detach()

        # rank on cross_entropy loss
        loss_c = log_sum_exp(conf_p) - conf_p.gather(1, conf_t.view(-1,1))
        loss_c[pos_idx] = 100. # include all positive samples
        _, topk_idx = torch.topk(loss_c.view(-1), num_hard)
        loss_cls = F.cross_entropy(cls_score[topk_idx], rois_label[topk_idx], weight=weight)

        # bounding box regression L1 loss
        pos_idx = pos_idx.unsqueeze(1).expand_as(bbox_pred)
        loc_p = bbox_pred[pos_idx].view(-1, 4)
        loc_t = rois_target[pos_idx].view(-1, 4)
        loss_box = F.smooth_l1_loss(loc_p, loc_t)

        return loss_cls, loss_box
开发者ID:lianDaniel,项目名称:R-FCN.pytorch,代码行数:31,代码来源:rfcn.py

示例15: cross_entropy_loss

def cross_entropy_loss(input, target):
    total_loss = torch.tensor(0.0)
    for i in range(input.size(1)):
        cls_idx = torch.full((input.size(0),), i, dtype=torch.long)
        loss = F.cross_entropy(input, cls_idx, reduce=False)
        total_loss += target[:, i].dot(loss)
    return total_loss / input.shape[0]
开发者ID:HazyResearch,项目名称:snorkel,代码行数:7,代码来源:noise_aware_model.py


注:本文中的torch.nn.functional.cross_entropy函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。