当前位置: 首页>>代码示例>>Python>>正文


Python functional.binary_cross_entropy方法代码示例

本文整理汇总了Python中torch.nn.functional.binary_cross_entropy方法的典型用法代码示例。如果您正苦于以下问题:Python functional.binary_cross_entropy方法的具体用法?Python functional.binary_cross_entropy怎么用?Python functional.binary_cross_entropy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.binary_cross_entropy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _add_losses

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def _add_losses(self, sigma_rpn=3.0): 

    # classification loss
    image_prob = self._predictions["image_prob"]
    
#    assert ((image_prob.data>=0).sum()+(image_prob.data<=1).sum())==image_prob.data.size(1)*2, image_prob
#    assert ((self._labels.data>=0).sum()+(self._labels.data<=1).sum())==self._labels.data.size(1)*2, self._labels

    cross_entropy = F.binary_cross_entropy(image_prob.clamp(0,1),self._labels)
    
    fast_loss = self._add_losses_fast()
    self._losses['wsddn_loss'] = cross_entropy
    self._losses['fast_loss'] = fast_loss
    
    loss = cross_entropy + fast_loss
    self._losses['total_loss'] = loss
    
    for k in self._losses.keys():
      self._event_summaries[k] = self._losses[k]    
    return loss 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:22,代码来源:network.py

示例2: negative_bag_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def negative_bag_loss(self, cls_prob, box_prob):
        """Compute negative bag loss.

        :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.

        :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.

        :math:`P_{j}^{bg}`: Classification probability of negative samples.

        Args:
            cls_prob (Tensor): Classification probability, in shape
                (num_img, num_anchors, num_classes).
            box_prob (Tensor): Box probability, in shape
                (num_img, num_anchors, num_classes).

        Returns:
            Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
        """  # noqa: E501, W605
        prob = cls_prob * (1 - box_prob)
        negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
            prob, torch.zeros_like(prob), reduction='none')
        return (1 - self.alpha) * negative_bag_loss 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:24,代码来源:free_anchor_retina_head.py

示例3: compute_mrcnn_mask_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_mrcnn_mask_loss(target_masks, pred_masks, target_class_ids):
    """
    :param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    :param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
    :param target_class_ids: (n_sampled_rois)
    :return: loss: torch 1D tensor.
    """
    if 0 not in torch.nonzero(target_class_ids > 0).size():
        # Only positive ROIs contribute to the loss. And only
        # the class specific mask of each ROI.
        positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
        positive_class_ids = target_class_ids[positive_ix].long()
        y_true = target_masks[positive_ix, :, :].detach()
        y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
        loss = F.binary_cross_entropy(y_pred, y_true)
    else:
        loss = torch.FloatTensor([0]).cuda()

    return loss


############################################################
#  Helper Layers
############################################################ 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:26,代码来源:mrcnn.py

示例4: compute_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    scores  = []

    with torch.no_grad():
        for X1, X2, target in data_loader:
            X1, X2, target = X1.to(device), X2.to(device), target.to(device)
            target = target.view(-1,1).float()
            y = model(X1, X2)
            loss += F.binary_cross_entropy(y, target, size_average=False)
            scores.append(y.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per utterance

    return loss, scores 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:19,代码来源:v7_validation.py

示例5: compute_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    scores  = {}

    with torch.no_grad():
        for id_list, X1, X2, target in data_loader:
            X1, X2, target = X1.to(device), X2.to(device), target.to(device)
            target = target.view(-1,1).float()
            y = model(X1, X2)
            loss += F.binary_cross_entropy(y, target, size_average=False)
            for i,id in enumerate(id_list):
                scores[id] = y[i].data.cpu().numpy()

    loss /= len(data_loader.dataset) # average loss

    return loss, scores 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:19,代码来源:v8_validation.py

示例6: train

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def train(args, model, device, train_loader, optimizer, epoch, rnn=False):
    model.train()
    for batch_idx, (_, data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        target = target.view(-1,1).float()
        optimizer.zero_grad()
        if rnn == True:
            model.hidden = model.init_hidden(data.size()[0]) # clear out the hidden state of the LSTM
        output = model(data)
        loss = F.binary_cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item())) 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:18,代码来源:v1_training.py

示例7: compute_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:22,代码来源:v3_validation.py

示例8: compute_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_loss(model, device, data_loader, rnn):
    model.eval()
    loss = 0
    scores  = {}

    with torch.no_grad():
        for id_list, data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            if rnn == True:
                model.hidden = model.init_hidden(data.size()[0]) # clear out the hidden state of the LSTM
            output = model(data) 
            loss += F.binary_cross_entropy(output, target, size_average=False)
            for i,id in enumerate(id_list):
                scores[id] = output[i].data.cpu().numpy()

    loss /= len(data_loader.dataset) # average loss

    return loss, scores 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:21,代码来源:v4_validation.py

示例9: compute_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_loss(model, device, data_loader, threshold=0.5):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)
            pred = output > 0.5
            correct += pred.byte().eq(target.byte()).sum().item() # not really meaningful

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores, correct 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:24,代码来源:v2_validation.py

示例10: train_step_projection

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def train_step_projection(self, h, r, t, hr_t, tr_h):
        if self.model.model_name.lower() == "conve" or self.model.model_name.lower() == "tucker":
            if hasattr(self.config, 'label_smoothing'):
                hr_t = hr_t * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity
                tr_h = tr_h * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity

            pred_tails = self.model(h, r, direction="tail")  # (h, r) -> hr_t forward
            pred_heads = self.model(t, r, direction="head")  # (t, r) -> tr_h backward

            loss_tails = torch.mean(F.binary_cross_entropy(pred_tails, hr_t))
            loss_heads = torch.mean(F.binary_cross_entropy(pred_heads, tr_h))

            loss = loss_tails + loss_heads

        else:
            loss_tails = self.model(h, r, hr_t, direction="tail")  # (h, r) -> hr_t forward
            loss_heads = self.model(t, r, tr_h, direction="head")  # (t, r) -> tr_h backward

            loss = loss_tails + loss_heads

            if hasattr(self.model, 'get_reg'):
                # now only complex distmult uses regularizer in algorithms,
                loss += self.model.get_reg()

        return loss 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:27,代码来源:trainer.py

示例11: loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def loss(self, mu, logvar, G_true, beta=0.005):
        # g_true: [batch_size * max_n-1 * xs]
        z = self.reparameterize(mu, logvar)
        type_scores, edge_scores = self._decode(z)
        res = 0
        true_types = torch.LongTensor([[g_true.vs[v_true]['type'] if v_true < g_true.vcount() 
                                      else self.START_TYPE for v_true in range(1, self.max_n)] 
                                      for g_true in G_true]).to(self.get_device())
        res += F.cross_entropy(type_scores.transpose(1, 2), true_types, reduction='sum')
        true_edges = torch.FloatTensor([np.pad(np.array(g_true.get_adjacency().data).transpose()[1:, :-1],
                                        ((0, self.max_n-g_true.vcount()), (0, self.max_n-g_true.vcount())),
                                        mode='constant', constant_values=(0, 0))
                                       for g_true in G_true]).to(self.get_device())
        res += F.binary_cross_entropy(edge_scores, true_edges, reduction='sum')
        kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
        return res + beta*kld, res, kld 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:18,代码来源:models.py

示例12: fool_dis

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def fool_dis(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
        if dis_lambda==0:
            return
        self.model.D.eval()
        self.model.share_encoder.train()
        x,y=self.get_dis_xy(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
        pred=self.model.D(x)
        loss=F.binary_cross_entropy(pred,1-y)
        loss=dis_lambda*loss
        if (loss!=loss).data.any():
            print("NaN Loss (fooling discriminator)")
            exit()
        self.encoder_optim.zero_grad()
        loss.backward()
        self.encoder_optim.step()
        return loss.data[0] 
开发者ID:thunlp,项目名称:AMNRE,代码行数:18,代码来源:trainer.py

示例13: binary_cross_entropy_weight

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def binary_cross_entropy_weight(y_pred, y,has_weight=False, weight_length=1, weight_max=10):
    '''

    :param y_pred:
    :param y:
    :param weight_length: how long until the end of sequence shall we add weight
    :param weight_value: the magnitude that the weight is enhanced
    :return:
    '''
    if has_weight:
        weight = torch.ones(y.size(0),y.size(1),y.size(2))
        weight_linear = torch.arange(1,weight_length+1)/weight_length*weight_max
        weight_linear = weight_linear.view(1,weight_length,1).repeat(y.size(0),1,y.size(2))
        weight[:,-1*weight_length:,:] = weight_linear
        loss = F.binary_cross_entropy(y_pred, y, weight=weight.cuda())
    else:
        loss = F.binary_cross_entropy(y_pred, y)
    return loss 
开发者ID:JiaxuanYou,项目名称:graph-generation,代码行数:20,代码来源:model.py

示例14: extract_grads

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def extract_grads(self, X, X_recon, t_mean, t_log_var):
        eps = np.finfo(float).eps
        X = torchify(X, requires_grad=False)
        X_recon = torchify(np.clip(X_recon, eps, 1 - eps))
        t_mean = torchify(t_mean)
        t_log_var = torchify(t_log_var)

        BCE = torch.sum(F.binary_cross_entropy(X_recon, X, reduction="none"), dim=1)

        # see Appendix B from VAE paper:
        # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
        # https://arxiv.org/abs/1312.6114
        # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
        KLD = -0.5 * torch.sum(1 + t_log_var - t_mean.pow(2) - t_log_var.exp(), dim=1)

        loss = torch.mean(BCE + KLD)
        loss.backward()

        grads = {
            "loss": loss.detach().numpy(),
            "dX_recon": X_recon.grad.numpy(),
            "dt_mean": t_mean.grad.numpy(),
            "dt_log_var": t_log_var.grad.numpy(),
        }
        return grads 
开发者ID:ddbourgin,项目名称:numpy-ml,代码行数:27,代码来源:nn_torch_models.py

示例15: compute_mrcnn_mask_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import binary_cross_entropy [as 别名]
def compute_mrcnn_mask_loss(pred_masks, target_masks, target_class_ids):
    """
    :param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    :param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
    :param target_class_ids: (n_sampled_rois)
    :return: loss: torch 1D tensor.
    """
    #print("targ masks", target_masks.unique(return_counts=True))
    if not 0 in torch.nonzero(target_class_ids > 0).size():
        # Only positive ROIs contribute to the loss. And only
        # the class-specific mask of each ROI.
        positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
        positive_class_ids = target_class_ids[positive_ix].long()
        y_true = target_masks[positive_ix, :, :].detach()
        y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
        loss = F.binary_cross_entropy(y_pred, y_true)
    else:
        loss = torch.FloatTensor([0]).cuda()

    return loss 
开发者ID:MIC-DKFZ,项目名称:RegRCNN,代码行数:22,代码来源:mrcnn.py


注:本文中的torch.nn.functional.binary_cross_entropy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。