当前位置: 首页>>代码示例>>Python>>正文


Python functional.logsigmoid方法代码示例

本文整理汇总了Python中torch.nn.functional.logsigmoid方法的典型用法代码示例。如果您正苦于以下问题:Python functional.logsigmoid方法的具体用法?Python functional.logsigmoid怎么用?Python functional.logsigmoid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.logsigmoid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loss_per_level

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def loss_per_level(self, estConf, gtDisp):
        N, C, H, W = estConf.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # gt zero and lt max disparity
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        mask = mask.detach_().type_as(gtDisp)

        # NLL loss
        valid_pixel_number = mask.float().sum()
        if valid_pixel_number < 1.0:
            valid_pixel_number = 1.0
        loss = (-1.0 * F.logsigmoid(estConf) * mask).sum() / valid_pixel_number

        return loss 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:24,代码来源:conf_nll_loss.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, pos_u, pos_v, neg_v):
        ''' Do forward and backward. It is designed for future use. '''
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        emb_neg_v = self.v_embeddings(neg_v)

        score = torch.sum(torch.mul(emb_u, emb_v), dim=1)
        score = torch.clamp(score, max=6, min=-6)
        score = -F.logsigmoid(score)

        neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = torch.clamp(neg_score, max=6, min=-6)
        neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)

        #return torch.mean(score + neg_score)
        return torch.sum(score), torch.sum(neg_score) 
开发者ID:dmlc,项目名称:dgl,代码行数:18,代码来源:model.py

示例3: train_step_pairwise

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def train_step_pairwise(self, pos_h, pos_r, pos_t, neg_h, neg_r, neg_t):
        pos_preds = self.model(pos_h, pos_r, pos_t)
        neg_preds = self.model(neg_h, neg_r, neg_t)

        if self.config.sampling == 'adversarial_negative_sampling':
            # RotatE: Adversarial Negative Sampling and alpha is the temperature.
            pos_preds = -pos_preds
            neg_preds = -neg_preds
            pos_preds = F.logsigmoid(pos_preds)
            neg_preds = neg_preds.view((-1, self.config.neg_rate))
            softmax = nn.Softmax(dim=1)(neg_preds*self.config.alpha).detach()
            neg_preds = torch.sum(softmax * (F.logsigmoid(-neg_preds)), dim=-1)
            loss = -neg_preds.mean() - pos_preds.mean()
        else:
            # others that use margin-based & pairwise loss function. (uniform or bern)
            loss = pos_preds + self.config.margin - neg_preds
            loss = torch.max(loss, torch.zeros_like(loss)).sum()
            
        if hasattr(self.model, 'get_reg'):
            # now only NTN uses regularizer,
            # other pairwise based KGE methods use normalization to regularize parameters.
            loss += self.model.get_reg()

        return loss 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:26,代码来源:trainer.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, u, i, j):
        """Return loss value.
        
        Args:
            u(torch.LongTensor): tensor stored user indexes. [batch_size,]
            i(torch.LongTensor): tensor stored item indexes which is prefered by user. [batch_size,]
            j(torch.LongTensor): tensor stored item indexes which is not prefered by user. [batch_size,]
        
        Returns:
            torch.FloatTensor
        """
        u = self.W[u, :]
        i = self.H[i, :]
        j = self.H[j, :]
        x_ui = torch.mul(u, i).sum(dim=1)
        x_uj = torch.mul(u, j).sum(dim=1)
        x_uij = x_ui - x_uj
        log_prob = F.logsigmoid(x_uij).sum()
        regularization = self.weight_decay * (u.norm(dim=1).pow(2).sum() + i.norm(dim=1).pow(2).sum() + j.norm(dim=1).pow(2).sum())
        return -log_prob + regularization 
开发者ID:sh0416,项目名称:bpr,代码行数:22,代码来源:train.py

示例5: get_activation_function

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def get_activation_function(activation):
    if activation == "relu":
        return torch.relu
    elif activation == "tanh":
        return torch.tanh
    elif activation == "sigmoid":
        return torch.relu
    elif activation == "lrelu":
        return F.leaky_relu
    elif activation == "rrelu":
        return torch.rrelu
    elif activation == "prelu":
        return torch.prelu
    elif activation == "elu":
        return F.elu
    elif activation == "selu":
        return torch.selu
    elif activation == "log_sigmoid":
        return F.logsigmoid
    elif activation == "softplus":
        return F.softplus
    else:
        raise ValueError("Activation function %s unknown", activation) 
开发者ID:diana-hep,项目名称:madminer,代码行数:25,代码来源:utils.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, u_pos, v_pos, v_neg):
        batch_size = u_pos.size(0)
        positive_size = v_pos.size(1)
        negative_size = v_neg.size(1)

        embed_u = self.embedding(u_pos)
        embed_v = self.embedding(v_pos)

        score = torch.bmm(embed_v, embed_u.unsqueeze(2)).squeeze(-1)
        score = torch.sum(score, dim=1) / positive_size
        log_target = fnn.logsigmoid(score).squeeze()

        neg_embed_v = self.embedding(v_neg)

        neg_score = torch.bmm(neg_embed_v, embed_u.unsqueeze(2)).squeeze(-1)
        neg_score = torch.sum(neg_score, dim=1) / negative_size
        sum_log_sampled = fnn.logsigmoid(-1 * neg_score).squeeze()

        loss = log_target + sum_log_sampled

        return -1 * loss.sum() / batch_size 
开发者ID:naver,项目名称:kor2vec,代码行数:23,代码来源:skip_gram.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, pos_u, pos_v, neg_v):
        """Forward process.

        As pytorch designed, all variables must be batch format, so all input of this method is a list of word id.

        Args:
            pos_u: list of center word ids for positive word pairs.
            pos_v: list of neibor word ids for positive word pairs.
            neg_u: list of center word ids for negative word pairs.
            neg_v: list of neibor word ids for negative word pairs.

        Returns:
            Loss of this process, a pytorch variable.
        """
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        score = torch.mul(emb_u, emb_v).squeeze()
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        neg_emb_v = self.v_embeddings(neg_v)
        neg_score = torch.bmm(neg_emb_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = F.logsigmoid(-1 * neg_score)
        return -1 * (torch.sum(score)+torch.sum(neg_score)) 
开发者ID:Adoni,项目名称:word2vec_pytorch,代码行数:25,代码来源:model.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_u = []
        for i in range(len(pos_u)):
            emb_ui = self.u_embeddings(Variable(torch.LongTensor(pos_u[i])))
            emb_u.append(np.sum(emb_ui.data.numpy(), axis=0).tolist())
        emb_u = Variable(torch.FloatTensor(emb_u))
        emb_v = self.v_embeddings(Variable(torch.LongTensor(pos_v)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_u = []
        for i in range(len(neg_u)):
            neg_emb_ui = self.u_embeddings(Variable(torch.LongTensor(neg_u[i])))
            neg_emb_u.append(np.sum(neg_emb_ui.data.numpy(), axis=0).tolist())
        neg_emb_u = Variable(torch.FloatTensor(neg_emb_u))
        neg_emb_v = self.v_embeddings(Variable(torch.LongTensor(neg_v)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))

        return -1 * sum(losses) 
开发者ID:endymecy,项目名称:pytorch-nlp,代码行数:27,代码来源:model.py

示例9: compute_pseudo_rews

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def compute_pseudo_rews(data, rew_giver, state_only=False):
    if isinstance(data, Traj):
        epis = data.current_epis
    else:
        epis = data

    for epi in epis:
        obs = torch.tensor(epi['obs'], dtype=torch.float, device=get_device())
        if state_only:
            logits, _ = rew_giver(obs)
        else:
            acs = torch.tensor(
                epi['acs'], dtype=torch.float, device=get_device())
            logits, _ = rew_giver(obs, acs)
        with torch.no_grad():
            rews = -F.logsigmoid(-logits).cpu().numpy()
        epi['real_rews'] = copy.deepcopy(epi['rews'])
        epi['rews'] = rews

    return data 
开发者ID:DeepX-inc,项目名称:machina,代码行数:22,代码来源:epi_functional.py

示例10: step

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def step(self, x, n, total_computes=None, hard_decision=False, **kwargs):
        """
        n is the index of the previous block
        returns the binary decision, the halting signal and the logits
        """
        if self.detach_before_classifier:
            x = x.detach()

        # If adding an embedding of the total computes:
        if self.shift_block_input:
            computes_embed = F.embedding(total_computes, self.input_shifters)
            x = x + computes_embed
        x = self.halting_predictors[n if self.separate_halting_predictors else 0](x)
        if self.use_skewed_sigmoid:
            halt = F.logsigmoid(self.skewness * x)  # the log-p of halting
            halt_logits = torch.cat((halt, halt - self.skewnees * x), dim=-1)  # log-p of halting v. computing
        else:
            halt = F.logsigmoid(x)  # the log-p of halting
            halt_logits = torch.cat((halt, halt-x), dim=-1)  # log-p of halting v. computing
        if hard_decision:
            halt = torch.exp(halt.squeeze(-1))
            return halt.ge(self.thresholds[n])
        return halt_logits  # T, B, 2 
开发者ID:elbayadm,项目名称:attn2d,代码行数:25,代码来源:dynamic_halters.py

示例11: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, pred, target):
        zt = BF.logits_distribution(pred, target, self.classes)
        return BF.logits_nll_loss(- F.logsigmoid(zt),
                                  target, self.weight, self.reduction) 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:6,代码来源:loss.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, input, target):
        input = self.flatten_images(input)
        target = self.flatten_images(target)
        weights = torch.where(target > 0, torch.ones_like(target) * self.words_weights,  # words are 1
                              torch.ones_like(target) * self.background_weights)
        pt = F.logsigmoid(-input * (target * 2 - 1))
        loss = F.binary_cross_entropy_with_logits(input, target, weight=weights, size_average=True, reduce=False)

        loss = (pt * self.gamma).exp() * loss
        return loss.mean() 
开发者ID:yu45020,项目名称:Text_Segmentation_Image_Inpainting,代码行数:12,代码来源:loss.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def forward(self, pos_u, pos_v, neg_v):
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        emb_neg_v = self.v_embeddings(neg_v)

        score = torch.sum(torch.mul(emb_u, emb_v), dim=1)
        score = torch.clamp(score, max=10, min=-10)
        score = -F.logsigmoid(score)

        neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = torch.clamp(neg_score, max=10, min=-10)
        neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)

        return torch.mean(score + neg_score) 
开发者ID:dmlc,项目名称:dgl,代码行数:16,代码来源:model.py

示例14: fast_logsigmoid

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def fast_logsigmoid(self, score):
        """ do fast logsigmoid by looking up in a pre-defined table """
        idx = torch.floor((score + 6.01) / 0.01).long()
        return self.logsigmoid_table[idx] 
开发者ID:dmlc,项目名称:dgl,代码行数:6,代码来源:model.py

示例15: bernoulli_action_log_prob

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import logsigmoid [as 别名]
def bernoulli_action_log_prob(logit, action):
    """Calculate the log p of an action with respect to a Bernoulli
    distribution. Use logit rather than prob for numerical stability."""
    if action == 0:
        return F.logsigmoid(-logit)
    else:
        return F.logsigmoid(logit) 
开发者ID:dmlc,项目名称:dgl,代码行数:9,代码来源:model.py


注:本文中的torch.nn.functional.logsigmoid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。