当前位置: 首页>>代码示例>>Python>>正文


Python torch.neg方法代码示例

本文整理汇总了Python中torch.neg方法的典型用法代码示例。如果您正苦于以下问题:Python torch.neg方法的具体用法?Python torch.neg怎么用?Python torch.neg使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.neg方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def get_loss(pred, y, criterion, mtr, a=0.5):
    """
    To calculate loss
    :param pred: predicted value
    :param y: actual value
    :param criterion: nn.CrossEntropyLoss
    :param mtr: beta matrix
    """
    mtr_t = torch.transpose(mtr, 1, 2)
    aa = torch.bmm(mtr, mtr_t)
    loss_fn = 0
    for i in range(aa.size()[0]):
        aai = torch.add(aa[i, ], Variable(torch.neg(torch.eye(mtr.size()[1]))))
        loss_fn += torch.trace(torch.mul(aai, aai).data)
    loss_fn /= aa.size()[0]
    loss = torch.add(criterion(pred, y), Variable(torch.FloatTensor([loss_fn * a])))
    return loss 
开发者ID:BarnesLab,项目名称:Patient2Vec,代码行数:19,代码来源:Patient2Vec.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def forward(self, Hr, Hr_mask, h_0=None):
        Hr = self.dropout.forward(Hr)

        left_beta, _ = self.left_ptr_rnn.forward(Hr, Hr_mask, h_0)
        rtn_beta = left_beta
        if self.bidirectional:
            right_beta_inv, _ = self.right_ptr_rnn.forward(Hr, Hr_mask, h_0)
            right_beta = right_beta_inv[[1, 0], :]

            rtn_beta = (left_beta + right_beta) / 2

        # todo: unexplainable
        new_mask = torch.neg((Hr_mask - 1) * 1e-6)  # mask replace zeros with 1e-6, make sure no gradient explosion
        rtn_beta = rtn_beta + new_mask.unsqueeze(0)

        return rtn_beta 
开发者ID:laddie132,项目名称:Match-LSTM,代码行数:18,代码来源:layers.py

示例3: weighted_binary_cross_entropy_interaction

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def weighted_binary_cross_entropy_interaction(output, target, weights=None):
    '''
     weights: (A, 2), 0 for negative 1 for positive
     output: (N, A)
     target: (N, A)
     A is action number
    '''
    output = F.sigmoid(output)
    if weights is not None:
        assert len(weights.shape) == 2
        loss = weights[:, 1].unsqueeze(dim=0) * (target * torch.log(output+1e-8)) + \
               weights[:, 0].unsqueeze(dim=0) * ((1 - target) * torch.log(1 - output+1e-8))
    else:
        loss = target * torch.log(output+1e-8) + (1 - target) * torch.log(1 - output+1e-8)

    return torch.neg(torch.mean(loss)) 
开发者ID:bobwan1995,项目名称:PMFNet,代码行数:18,代码来源:net.py

示例4: E_Step

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def E_Step(X, logdet, c1_temp, pi_temp, SigmaXY, X_C_SIGMA, sum, c_idx, c_idx_9, c_idx_25, distances2, r_ik_5, neig, sumP, X_C, X_C_SIGMA_buf):

    """
    Computes the distances of the Data points for each centroid and normalize it,

    """
    torch.add(X.unsqueeze(1), torch.neg(c1_temp.reshape(-1, Global.neig_num, Global.D_)),out=X_C)
    torch.mul(X_C[:, :, 0].unsqueeze(2), SigmaXY[:, :, 0:2],out=X_C_SIGMA_buf)
    torch.addcmul(X_C_SIGMA_buf,1,X_C[:,:,1].unsqueeze(2),SigmaXY[:,:,2:4],out=X_C_SIGMA[:,:,0:2])
    X_C_SIGMA[:, :, 2:] = torch.mul(X_C[:, :, 2:], Global.SIGMA_INT)

    torch.mul(-X_C.view(-1, Global.neig_num,Global.D_),X_C_SIGMA.view(-1,Global.neig_num,Global.D_),out=distances2)
    distances2=distances2.view(-1,Global.neig_num,Global.D_)
    torch.sum(distances2,2,out=r_ik_5)

    r_ik_5.add_(torch.neg(logdet.reshape(-1, Global.neig_num)))
    r_ik_5.add_(torch.log(pi_temp.reshape(-1, Global.neig_num)))
    c_neig = c_idx_25.reshape(-1, Global.potts_area).float()
    torch.add(c_neig.unsqueeze(1), -c_idx.reshape(-1, Global.neig_num).unsqueeze(2).float(),out=neig)
    torch.sum((neig!=0).float(),2,out=sumP)
    r_ik_5.add_(-(Global.Beta_P*sumP))
    (my_help.softmaxTF(r_ik_5, 1,sum)) 
开发者ID:BGU-CS-VIL,项目名称:BASS,代码行数:24,代码来源:BASS.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def forward(self, input, embs, label):
        n = input.shape[0]
        log_target = torch.log(
            torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
        )
        negs = torch.multinomial(
            self.sample_weights, self.num_sampled * n, replacement=True
        ).view(n, self.num_sampled)
        noise = torch.neg(self.weights[negs])
        sum_log_sampled = torch.sum(
            torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
        ).squeeze()

        loss = log_target + sum_log_sampled
        return -loss.sum() / n 
开发者ID:dmlc,项目名称:dgl,代码行数:17,代码来源:main.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def forward(self, e, r, er_e2, direction="tail"):
        emb_hr_e = self.ent_embeddings(e)  # [m, k]
        emb_hr_r = self.rel_embeddings(r)  # [m, k]
        
        if direction == "tail":
            ere2_sigmoid = self.g(torch.dropout(self.f1(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)
        else:
            ere2_sigmoid = self.g(torch.dropout(self.f2(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)

        ere2_loss_left = -torch.sum((torch.log(torch.clamp(ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), er_e2)))
        ere2_loss_right = -torch.sum((torch.log(torch.clamp(1 - ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), torch.neg(er_e2))))

        hrt_loss = ere2_loss_left + ere2_loss_right

        return hrt_loss 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:17,代码来源:projection.py

示例7: multi_nll_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def multi_nll_loss(scores, target_mask):
    """
    Select actions with sampling at train-time, argmax at test-time:
    """
    scores = scores.exp()
    loss = 0
    for i in range(scores.size(0)):
        loss += torch.neg(torch.log(torch.masked_select(scores[i], target_mask[i]).sum() / scores[i].sum()))
    return loss 
开发者ID:stanfordnlp,项目名称:coqa-baselines,代码行数:11,代码来源:layers.py

示例8: weighted_binary_cross_entropy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def weighted_binary_cross_entropy(output, target, weights=None):
    if weights is not None:
        assert len(weights) == 2

        loss = weights[1] * (target * torch.log(output + epsilon)) + \
               weights[0] * ((1 - target) * torch.log(1 - output + epsilon))
    else:
        loss = target * torch.log(output + epsilon) + (1 - target) * torch.log(1 - output + epsilon)

    return torch.neg(torch.mean(loss)) 
开发者ID:anuragranj,项目名称:cc,代码行数:12,代码来源:loss_functions.py

示例9: __call__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def __call__(self, output, target):
        if self.weights is not None:
            assert len(self.weights) == 2
            loss = self.weights[1] * (target * self.logsigmoid(output)) + \
                self.weights[0] * ((1 - target) * self.logsigmoid(-output))
        else:
            loss = target * self.logsigmoid(output) + (1 - target) * self.logsigmoid(-output)
        return torch.neg(torch.mean(loss)) 
开发者ID:Arseha,项目名称:peakonly,代码行数:10,代码来源:training.py

示例10: aten_neg

# 需要导入模块: import torch [as 别名]
# 或者: from torch import neg [as 别名]
def aten_neg(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.NEG)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        return [_op.negative(inp)]

    return [torch.neg(inp)] 
开发者ID:traveller59,项目名称:torch2trt,代码行数:16,代码来源:unary.py


注:本文中的torch.neg方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。