当前位置: 首页>>代码示例>>Python>>正文


Python torch.logsumexp方法代码示例

本文整理汇总了Python中torch.logsumexp方法的典型用法代码示例。如果您正苦于以下问题:Python torch.logsumexp方法的具体用法?Python torch.logsumexp怎么用?Python torch.logsumexp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.logsumexp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def forward(self, x, target):
        similarity_matrix = x @ x.T  # need gard here
        label_matrix = target.unsqueeze(1) == target.unsqueeze(0)
        negative_matrix = label_matrix.logical_not()
        positive_matrix = label_matrix.fill_diagonal_(False)

        sp = torch.where(positive_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))
        sn = torch.where(negative_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))

        ap = torch.clamp_min(1 + self.m - sp.detach(), min=0.)
        an = torch.clamp_min(sn.detach() + self.m, min=0.)

        logit_p = -self.gamma * ap * (sp - self.dp)
        logit_n = self.gamma * an * (sn - self.dn)

        logit_p = torch.where(positive_matrix, logit_p,
                              torch.zeros_like(logit_p))
        logit_n = torch.where(negative_matrix, logit_n,
                              torch.zeros_like(logit_n))

        loss = F.softplus(torch.logsumexp(logit_p, dim=1) +
                          torch.logsumexp(logit_n, dim=1)).mean()
        return loss 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:27,代码来源:loss.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            proxynca loss (torch.Tensor(), batch-averaged)
        """
        #Normalize batch in case it is not normalized (which should never be the case for ProxyNCA, but still).
        #Same for the PROXIES. Note that the multiplication by 3 seems arbitrary, but helps the actual training.
        batch       = 3*torch.nn.functional.normalize(batch, dim=1)
        PROXIES     = 3*torch.nn.functional.normalize(self.PROXIES, dim=1)
        #Group required proxies
        pos_proxies = torch.stack([PROXIES[pos_label:pos_label+1,:] for pos_label in labels])
        neg_proxies = torch.stack([torch.cat([self.all_classes[:class_label],self.all_classes[class_label+1:]]) for class_label in labels])
        neg_proxies = torch.stack([PROXIES[neg_labels,:] for neg_labels in neg_proxies])
        #Compute Proxy-distances
        dist_to_neg_proxies = torch.sum((batch[:,None,:]-neg_proxies).pow(2),dim=-1)
        dist_to_pos_proxies = torch.sum((batch[:,None,:]-pos_proxies).pow(2),dim=-1)
        #Compute final proxy-based NCA loss
        negative_log_proxy_nca_loss = torch.mean(dist_to_pos_proxies[:,0] + torch.logsumexp(-dist_to_neg_proxies, dim=1))
        return negative_log_proxy_nca_loss 
开发者ID:Confusezius,项目名称:Deep-Metric-Learning-Baselines,代码行数:24,代码来源:losses.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def forward(self, model, sample, reduce=True):
        """Compute the loss for the given sample.

        Returns a tuple with three elements:
        1) the loss, as a Variable
        2) the sample size, which is used as the denominator for the gradient
        3) logging outputs to display while training
        """
        translations, bleu_scores = self.generate_translations(model, sample)
        nll_loss = self.compute_nll(model, sample, translations)
        loss = nll_loss[:, 0] + torch.logsumexp(-nll_loss, 1)
        if reduce:
            loss = loss.sum()
        sample_size = (
            sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
        )
        logging_output = {
            "loss": utils.item(loss.data) if reduce else loss.data,
            "ntokens": sample["ntokens"],
            "nsentences": sample["target"].size(0),
            "sample_size": sample_size,
        }
        return loss, sample_size, logging_output 
开发者ID:pytorch,项目名称:translate,代码行数:25,代码来源:sequence_criterions.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def forward(self, tensor):
        tensor = self.blur(tensor)
        tensor = self.nonlinearity(tensor)

        centerbias = self.centerbias(tensor)
        if self.nonlinearity_target == 'density':
            tensor *= centerbias
        elif self.nonlineary_target == 'logdensity':
            tensor += centerbias
        else:
            raise ValueError(self.nonlinearity_target)

        if self.nonlinearity_target == 'density':
            sums = torch.sum(tensor, dim=(2, 3), keepdim=True)
            tensor = tensor / sums
            tensor = torch.log(tensor)
        elif self.nonlineary_target == 'logdensity':
            logsums = torch.logsumexp(tensor, dim=(2, 3), keepdim=True)
            tensor = tensor - logsums
        else:
            raise ValueError(self.nonlinearity_target)

        return tensor 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:25,代码来源:saliency_map_conversion_torch.py

示例5: _log_density

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def _log_density(self, stimulus):
        smap = self.parent_model.log_density(stimulus)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:21,代码来源:models.py

示例6: conditional_log_density

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None):
        smap = self.parent_model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes=attributes, out=out)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
开发者ID:matthias-k,项目名称:pysaliency,代码行数:21,代码来源:models.py

示例7: _prior_log_likelihood

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def _prior_log_likelihood(self, z):
        """Computes the log likelihood of the prior, p(z), for different priors."""
        if self.prior == "weak":
            return self._sample_log_likelihood(z, torch.tensor([1.], device=self.device), 1)
        elif self.prior == "mog":
            mu, var = self._get_mixture_parameters()
            log_k = F.log_softmax(self.mixture_weights)
            # [batch_size, num_mixture]
            mixture_log = self._sample_log_likelihood(z.unsqueeze(
                1), torch.tensor([[1.]], device=self.device), dim=2, mu=mu, var=var)
            # [batch_size]
            return torch.logsumexp(mixture_log + log_k, dim=1)
        elif self.prior == "vamp":
            # [num_pseudo, z_dim]
            mu, var = self.encoder(self.pseudo_inputs, self.pseudo_lengths)
            # [num_pseudo, ]
            log_k = F.log_softmax(self.pseudo_weights)

            # [batch_size, num_pseudo]
            pseudo_log = self._sample_log_likelihood(z.unsqueeze(1), torch.tensor(
                [[1.]], device=self.device), dim=2, mu=mu, var=var)
            # [batch_size, ]
            return torch.logsumexp(pseudo_log + log_k, dim=1) 
开发者ID:tom-pelsmaeker,项目名称:deep-generative-lm,代码行数:25,代码来源:bowman_decoder.py

示例8: _computes_transition

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def _computes_transition(self, prev_log_prob, path, path_lens, cum_log_prob, y, skip_accum=False):
        bs, max_path_len = path.size()
        mat = prev_log_prob.new_zeros(3, bs, max_path_len).fill_(self.log0)
        mat[0, :, :] = prev_log_prob
        mat[1, :, 1:] = prev_log_prob[:, :-1]
        mat[2, :, 2:] = prev_log_prob[:, :-2]
        # disable transition between the same symbols
        # (including blank-to-blank)
        same_transition = (path[:, :-2] == path[:, 2:])
        mat[2, :, 2:][same_transition] = self.log0
        log_prob = torch.logsumexp(mat, dim=0)
        outside = torch.arange(max_path_len, dtype=torch.int64) >= path_lens.unsqueeze(1)
        log_prob[outside] = self.log0
        if not skip_accum:
            cum_log_prob += log_prob
        batch_index = torch.arange(bs, dtype=torch.int64).unsqueeze(1)
        log_prob += y[batch_index, path]
        return log_prob 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:20,代码来源:ctc.py

示例9: test_logsumexp

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def test_logsumexp():
    inputs = torch.tensor([
        0.5, 0.5, 0.0, -2.1, 3.2, 7.0, -1.0, -100.0,
        float('-inf'),
        float('-inf'), 0.0
    ])
    inputs.requires_grad_()
    index = torch.tensor([0, 0, 1, 1, 1, 2, 4, 4, 5, 6, 6])
    splits = [2, 3, 1, 0, 2, 1, 2]

    outputs = scatter_logsumexp(inputs, index)

    for src, out in zip(inputs.split(splits), outputs.unbind()):
        assert out.tolist() == torch.logsumexp(src, dim=0).tolist()

    outputs.backward(torch.randn_like(outputs)) 
开发者ID:rusty1s,项目名称:pytorch_scatter,代码行数:18,代码来源:test_logsumexp.py

示例10: semantic_loss_exactly_one

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def semantic_loss_exactly_one(log_prob):
    """Semantic loss to encourage the multinomial probability to be "peaked",
    i.e. only one class is picked.
    The loss has the form -log sum_{i=1}^n p_i prod_{j=1, j!=i}^n (1 - p_j).
    Paper: http://web.cs.ucla.edu/~guyvdb/papers/XuICML18.pdf
    Code: https://github.com/UCLA-StarAI/Semantic-Loss/blob/master/semi_supervised/semantic.py
    Parameters:
        log_prob: log probability of a multinomial distribution, shape (batch_size, n)
    Returns:
        semantic_loss: shape (batch_size)
    """
    _, argmaxes = torch.max(log_prob, dim=-1)
    # Compute log(1-p) separately for the largest probabilities, by doing
    # logsumexp on the rest of the log probabilities.
    log_prob_temp = log_prob.clone()
    log_prob_temp[range(log_prob.shape[0]), argmaxes] = torch.tensor(float('-inf'))
    log_1mprob_max = torch.logsumexp(log_prob_temp, dim=-1)
    # Compute log(1-p) normally for the rest of the probabilities
    log_1mprob = torch.log1p(-torch.exp(log_prob_temp))
    log_1mprob[range(log_prob.shape[0]), argmaxes] = log_1mprob_max
    loss = -(log_1mprob.sum(dim=-1) + torch.logsumexp(log_prob - log_1mprob, dim=-1))
    return loss 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:24,代码来源:semantic_loss.py

示例11: test_semantic_loss_exactly_one

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def test_semantic_loss_exactly_one():
    m = 5
    logit = torch.randn(m)
    p = nn.functional.softmax(logit, dim=-1)
    # Compute manually
    result = 0.0
    for i in range(m):
        prod = p[i].clone()
        for j in range(m):
            if j != i:
                prod *= 1 - p[j]
        result += prod
    result = -torch.log(result)
    result1 = -torch.logsumexp(torch.log(1 - p).sum() + torch.log(p / (1 - p)), dim=-1)
    result2 = semantic_loss_exactly_one(p.unsqueeze(0)).squeeze()
    assert torch.allclose(result, result1)
    assert torch.allclose(result, result2) 
开发者ID:HazyResearch,项目名称:learning-circuits,代码行数:19,代码来源:semantic_loss.py

示例12: compute_diag_log_prob

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def compute_diag_log_prob(preds_mean, preds_log_cov, true_outputs, n_samples):
    '''
        Compute log prob assuming diagonal Gaussian with some mean and log cov
    '''
    preds_cov = torch.exp(preds_log_cov)

    log_prob = -0.5 * torch.sum(
        (preds_mean - repeated_true_outputs)**2 / preds_cov
    )

    log_det = torch.logsumexp(torch.sum(preds_log_cov, 1))
    log_det += log_2pi
    log_det *= -0.5
    log_prob += log_det

    log_prob /= float(n_samples)
    return log_prob 
开发者ID:KamyarGh,项目名称:rl_swiss,代码行数:19,代码来源:enc_free_neural_process.py

示例13: _forward_alpha

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def _forward_alpha(self, emissions, M):
        Tt, B, Ts = emissions.size()
        alpha = utils.fill_with_neg_inf(torch.empty_like(emissions))  # Tt, B, Ts
        # initialization  t=1
        initial = torch.empty_like(alpha[0]).fill_(-math.log(Ts))  # log(1/Ts)
        # initial = utils.fill_with_neg_inf(torch.empty_like(alpha[0])) 
        # initial[:, 0] = 0
        alpha[0] = emissions[0] + initial
        # print('Initialize alpha:', alpha[0])
        # induction
        for i in range(1, Tt):
            alpha[i] = torch.logsumexp(alpha[i-1].unsqueeze(-1) + M[i-1], dim=1)
            alpha[i] = alpha[i] + emissions[i]
            # print('Emissions@', i, emissions[i])
            # print('alpha@',i, alpha[i])
        return alpha 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:hmm_controls.py

示例14: _forward_alpha

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def _forward_alpha(self, emissions, M):
        Tt, B, Ts = emissions.size()
        alpha = utils.fill_with_neg_inf(torch.empty_like(emissions))  # Tt, B, Ts
        # initialization  t=1
        # initial = torch.empty_like(alpha[0]).fill_(-math.log(Ts))  # log(1/Ts)
        initial = utils.fill_with_neg_inf(torch.empty_like(alpha[0])) 
        initial[:, 0] = 0
        alpha[0] = emissions[0] + initial
        # print('Initialize alpha:', alpha[0])
        # induction
        for i in range(1, Tt):
            alpha[i] = torch.logsumexp(alpha[i-1].unsqueeze(-1) + M[i-1], dim=1)
            alpha[i] = alpha[i] + emissions[i]
            # print('Emissions@', i, emissions[i])
            # print('alpha@',i, alpha[i])
        return alpha 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:hmm_controls3.py

示例15: log_prob

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logsumexp [as 别名]
def log_prob(self, value):
        # pad value for evaluation under component density
        value = value.permute(2, 0, 1) # [S, B, D]
        value = value[..., None].repeat(1, 1, 1, self.batch_shape[-1])  # [S, B, D, M]
        log_prob_components = self.components_distribution.log_prob(value).permute(1, 2, 3, 0)

        # calculate numerically stable log coefficients, and pad
        log_prob_mixture = self.mixture_distribution.logits
        log_prob_mixture = log_prob_mixture[..., None]
        return torch.logsumexp(log_prob_mixture + log_prob_components, dim=-2) 
开发者ID:bayesiains,项目名称:nsf,代码行数:12,代码来源:mixture.py


注:本文中的torch.logsumexp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。