當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.neg方法代碼示例

本文整理匯總了Python中torch.neg方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.neg方法的具體用法?Python torch.neg怎麽用?Python torch.neg使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.neg方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def get_loss(pred, y, criterion, mtr, a=0.5):
    """
    To calculate loss
    :param pred: predicted value
    :param y: actual value
    :param criterion: nn.CrossEntropyLoss
    :param mtr: beta matrix
    """
    mtr_t = torch.transpose(mtr, 1, 2)
    aa = torch.bmm(mtr, mtr_t)
    loss_fn = 0
    for i in range(aa.size()[0]):
        aai = torch.add(aa[i, ], Variable(torch.neg(torch.eye(mtr.size()[1]))))
        loss_fn += torch.trace(torch.mul(aai, aai).data)
    loss_fn /= aa.size()[0]
    loss = torch.add(criterion(pred, y), Variable(torch.FloatTensor([loss_fn * a])))
    return loss 
開發者ID:BarnesLab,項目名稱:Patient2Vec,代碼行數:19,代碼來源:Patient2Vec.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def forward(self, Hr, Hr_mask, h_0=None):
        Hr = self.dropout.forward(Hr)

        left_beta, _ = self.left_ptr_rnn.forward(Hr, Hr_mask, h_0)
        rtn_beta = left_beta
        if self.bidirectional:
            right_beta_inv, _ = self.right_ptr_rnn.forward(Hr, Hr_mask, h_0)
            right_beta = right_beta_inv[[1, 0], :]

            rtn_beta = (left_beta + right_beta) / 2

        # todo: unexplainable
        new_mask = torch.neg((Hr_mask - 1) * 1e-6)  # mask replace zeros with 1e-6, make sure no gradient explosion
        rtn_beta = rtn_beta + new_mask.unsqueeze(0)

        return rtn_beta 
開發者ID:laddie132,項目名稱:Match-LSTM,代碼行數:18,代碼來源:layers.py

示例3: weighted_binary_cross_entropy_interaction

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def weighted_binary_cross_entropy_interaction(output, target, weights=None):
    '''
     weights: (A, 2), 0 for negative 1 for positive
     output: (N, A)
     target: (N, A)
     A is action number
    '''
    output = F.sigmoid(output)
    if weights is not None:
        assert len(weights.shape) == 2
        loss = weights[:, 1].unsqueeze(dim=0) * (target * torch.log(output+1e-8)) + \
               weights[:, 0].unsqueeze(dim=0) * ((1 - target) * torch.log(1 - output+1e-8))
    else:
        loss = target * torch.log(output+1e-8) + (1 - target) * torch.log(1 - output+1e-8)

    return torch.neg(torch.mean(loss)) 
開發者ID:bobwan1995,項目名稱:PMFNet,代碼行數:18,代碼來源:net.py

示例4: E_Step

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def E_Step(X, logdet, c1_temp, pi_temp, SigmaXY, X_C_SIGMA, sum, c_idx, c_idx_9, c_idx_25, distances2, r_ik_5, neig, sumP, X_C, X_C_SIGMA_buf):

    """
    Computes the distances of the Data points for each centroid and normalize it,

    """
    torch.add(X.unsqueeze(1), torch.neg(c1_temp.reshape(-1, Global.neig_num, Global.D_)),out=X_C)
    torch.mul(X_C[:, :, 0].unsqueeze(2), SigmaXY[:, :, 0:2],out=X_C_SIGMA_buf)
    torch.addcmul(X_C_SIGMA_buf,1,X_C[:,:,1].unsqueeze(2),SigmaXY[:,:,2:4],out=X_C_SIGMA[:,:,0:2])
    X_C_SIGMA[:, :, 2:] = torch.mul(X_C[:, :, 2:], Global.SIGMA_INT)

    torch.mul(-X_C.view(-1, Global.neig_num,Global.D_),X_C_SIGMA.view(-1,Global.neig_num,Global.D_),out=distances2)
    distances2=distances2.view(-1,Global.neig_num,Global.D_)
    torch.sum(distances2,2,out=r_ik_5)

    r_ik_5.add_(torch.neg(logdet.reshape(-1, Global.neig_num)))
    r_ik_5.add_(torch.log(pi_temp.reshape(-1, Global.neig_num)))
    c_neig = c_idx_25.reshape(-1, Global.potts_area).float()
    torch.add(c_neig.unsqueeze(1), -c_idx.reshape(-1, Global.neig_num).unsqueeze(2).float(),out=neig)
    torch.sum((neig!=0).float(),2,out=sumP)
    r_ik_5.add_(-(Global.Beta_P*sumP))
    (my_help.softmaxTF(r_ik_5, 1,sum)) 
開發者ID:BGU-CS-VIL,項目名稱:BASS,代碼行數:24,代碼來源:BASS.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def forward(self, input, embs, label):
        n = input.shape[0]
        log_target = torch.log(
            torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
        )
        negs = torch.multinomial(
            self.sample_weights, self.num_sampled * n, replacement=True
        ).view(n, self.num_sampled)
        noise = torch.neg(self.weights[negs])
        sum_log_sampled = torch.sum(
            torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
        ).squeeze()

        loss = log_target + sum_log_sampled
        return -loss.sum() / n 
開發者ID:dmlc,項目名稱:dgl,代碼行數:17,代碼來源:main.py

示例6: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def forward(self, e, r, er_e2, direction="tail"):
        emb_hr_e = self.ent_embeddings(e)  # [m, k]
        emb_hr_r = self.rel_embeddings(r)  # [m, k]
        
        if direction == "tail":
            ere2_sigmoid = self.g(torch.dropout(self.f1(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)
        else:
            ere2_sigmoid = self.g(torch.dropout(self.f2(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)

        ere2_loss_left = -torch.sum((torch.log(torch.clamp(ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), er_e2)))
        ere2_loss_right = -torch.sum((torch.log(torch.clamp(1 - ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), torch.neg(er_e2))))

        hrt_loss = ere2_loss_left + ere2_loss_right

        return hrt_loss 
開發者ID:Sujit-O,項目名稱:pykg2vec,代碼行數:17,代碼來源:projection.py

示例7: multi_nll_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def multi_nll_loss(scores, target_mask):
    """
    Select actions with sampling at train-time, argmax at test-time:
    """
    scores = scores.exp()
    loss = 0
    for i in range(scores.size(0)):
        loss += torch.neg(torch.log(torch.masked_select(scores[i], target_mask[i]).sum() / scores[i].sum()))
    return loss 
開發者ID:stanfordnlp,項目名稱:coqa-baselines,代碼行數:11,代碼來源:layers.py

示例8: weighted_binary_cross_entropy

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def weighted_binary_cross_entropy(output, target, weights=None):
    if weights is not None:
        assert len(weights) == 2

        loss = weights[1] * (target * torch.log(output + epsilon)) + \
               weights[0] * ((1 - target) * torch.log(1 - output + epsilon))
    else:
        loss = target * torch.log(output + epsilon) + (1 - target) * torch.log(1 - output + epsilon)

    return torch.neg(torch.mean(loss)) 
開發者ID:anuragranj,項目名稱:cc,代碼行數:12,代碼來源:loss_functions.py

示例9: __call__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def __call__(self, output, target):
        if self.weights is not None:
            assert len(self.weights) == 2
            loss = self.weights[1] * (target * self.logsigmoid(output)) + \
                self.weights[0] * ((1 - target) * self.logsigmoid(-output))
        else:
            loss = target * self.logsigmoid(output) + (1 - target) * self.logsigmoid(-output)
        return torch.neg(torch.mean(loss)) 
開發者ID:Arseha,項目名稱:peakonly,代碼行數:10,代碼來源:training.py

示例10: aten_neg

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import neg [as 別名]
def aten_neg(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.NEG)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        return [_op.negative(inp)]

    return [torch.neg(inp)] 
開發者ID:traveller59,項目名稱:torch2trt,代碼行數:16,代碼來源:unary.py


注:本文中的torch.neg方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。