當前位置: 首頁>>代碼示例>>Python>>正文


Python distributions.kl_divergence方法代碼示例

本文整理匯總了Python中torch.distributions.kl_divergence方法的典型用法代碼示例。如果您正苦於以下問題:Python distributions.kl_divergence方法的具體用法?Python distributions.kl_divergence怎麽用?Python distributions.kl_divergence使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributions的用法示例。


在下文中一共展示了distributions.kl_divergence方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_kernel_symkl

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def test_kernel_symkl(self):
        kernel = GaussianSymmetrizedKLKernel()
        kernel.lengthscale = 1.0

        values = torch.rand(100, 20)
        base_value = torch.zeros(1, 20)
        kernel_output = kernel(values, base_value)
        self.assertEqual(kernel_output.shape, torch.Size((100, 1)))

        value_means = values[..., :10]
        value_stds = (1e-8 + values[..., 10:].exp()) ** 0.5
        value_dist = Normal(value_means.unsqueeze(0), value_stds.unsqueeze(0))

        base_dist = Normal(torch.zeros(1, 10), torch.ones(1, 10))

        result = -(kl_divergence(value_dist, base_dist) + kl_divergence(base_dist, value_dist)).sum(-1)
        self.assertLessEqual((kernel_output.evaluate() - result.exp().transpose(-2, -1)).norm(), 1e-5) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:19,代碼來源:test_gaussian_symmetrized_kl_kernel.py

示例2: compute_elbo

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def compute_elbo(self, p, occ, inputs, **kwargs):
        ''' Computes the expectation lower bound.

        Args:
            p (tensor): sampled points
            occ (tensor): occupancy values for p
            inputs (tensor): conditioning input
        '''
        c = self.encode_inputs(inputs)
        q_z = self.infer_z(p, occ, c, **kwargs)
        z = q_z.rsample()
        p_r = self.decode(p, z, c, **kwargs)

        rec_error = -p_r.log_prob(occ).sum(dim=-1)
        kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
        elbo = -rec_error - kl

        return elbo, rec_error, kl 
開發者ID:autonomousvision,項目名稱:occupancy_networks,代碼行數:20,代碼來源:__init__.py

示例3: test_kl_divergence_diag_gaussian

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def test_kl_divergence_diag_gaussian():
    """
    Test kl divergence between multivariate gaussian distributions with a diagonal covariance matrix
    """
    head = DiagGaussianActionHead(1, 5)

    distrib1 = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]]))
    distrib2 = d.MultivariateNormal(torch.tensor([0.3, 0.7]), covariance_matrix=torch.tensor([[1.8, 0.0], [0.0, 5.5]]))

    pd_params1 = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t()
    pd_params2 = torch.tensor([[0.3, 0.7], [np.log(np.sqrt(1.8)), np.log(np.sqrt(5.5))]]).t()

    kl_div_1 = d.kl_divergence(distrib1, distrib2)
    kl_div_2 = head.kl_divergence(pd_params1[None], pd_params2[None])

    assert kl_div_1.item() == pytest.approx(kl_div_2.item(), 0.001) 
開發者ID:MillionIntegrals,項目名稱:vel,代碼行數:18,代碼來源:test_action_head.py

示例4: test_kl_divergence_categorical

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def test_kl_divergence_categorical():
    """
    Test KL divergence between categorical distributions
    """
    head = CategoricalActionHead(1, 5)

    logits1 = F.log_softmax(torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0]), dim=0)
    logits2 = F.log_softmax(torch.tensor([-1.0, 0.2, 5.0, 2.0, 8.0]), dim=0)

    distrib1 = d.Categorical(logits=logits1)
    distrib2 = d.Categorical(logits=logits2)

    kl_div_1 = d.kl_divergence(distrib1, distrib2)
    kl_div_2 = head.kl_divergence(logits1[None], logits2[None])

    nt.assert_allclose(kl_div_1.item(), kl_div_2.item(), rtol=1e-5) 
開發者ID:MillionIntegrals,項目名稱:vel,代碼行數:18,代碼來源:test_action_head.py

示例5: get_kl

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def get_kl(self, q_z):
        ''' Returns the KL divergence.

        Args:
            q_z (distribution): predicted distribution over latent codes
        '''
        loss_kl = dist.kl_divergence(q_z, self.model.p0_z).mean()
        if torch.isnan(loss_kl):
            loss_kl = torch.tensor([0.]).to(self.device)
        return loss_kl 
開發者ID:autonomousvision,項目名稱:occupancy_flow,代碼行數:12,代碼來源:training.py

示例6: compute_global_kl_divergence

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def compute_global_kl_divergence(self) -> torch.Tensor:

        outputs = self.get_alphas_betas(as_numpy=False)
        alpha_posterior = outputs["alpha_posterior"]
        beta_posterior = outputs["beta_posterior"]
        alpha_prior = outputs["alpha_prior"]
        beta_prior = outputs["beta_prior"]

        return kl(
            Beta(alpha_posterior, beta_posterior), Beta(alpha_prior, beta_prior)
        ).sum() 
開發者ID:YosefLab,項目名稱:scVI,代碼行數:13,代碼來源:autozivae.py

示例7: batch_vlb

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def batch_vlb(self, batch, mask):
        """
        Compute differentiable lower bound for the given batch of objects
        and mask.
        """
        proposal, prior = self.make_latent_distributions(batch, mask)
        prior_regularization = self.prior_regularization(prior)
        latent = proposal.rsample()
        rec_params = self.generative_network(latent)
        rec_loss = self.rec_log_prob(batch, rec_params, mask)
        kl = kl_divergence(proposal, prior).view(batch.shape[0], -1).sum(-1)
        return rec_loss - kl + prior_regularization 
開發者ID:tigvarts,項目名稱:vaeac,代碼行數:14,代碼來源:VAEAC.py

示例8: normal_kl_loss

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def normal_kl_loss(mean, logvar, r_mean=None, r_logvar=None):
  if r_mean is None or r_logvar is None:
    result = -0.5 * torch.mean(1 + logvar - mean.pow(2) - logvar.exp(), dim=0)
  else:
    distribution = Normal(mean, torch.exp(0.5 * logvar))
    reference = Normal(r_mean, torch.exp(0.5 * r_logvar))
    result = kl_divergence(distribution, reference)
  return result.sum() 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:10,代碼來源:vae.py

示例9: gumbel_kl_loss

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def gumbel_kl_loss(category, r_category=None):
  if r_category is None:
    result = torch.sum(category * torch.log(category + 1e-20), dim=1)
    result = result.mean(dim=0)
    result += torch.log(torch.tensor(category.size(-1), dtype=result.dtype))
  else:
    distribution = Categorical(category)
    reference = Categorical(r_category)
    result = kl_divergence(distribution, reference)
  return result 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:12,代碼來源:vae.py

示例10: _vmf_kl_divergence

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def _vmf_kl_divergence(self, location, kappa):
        """Get the estimated KL between the VMF function with a uniform hyperspherical prior."""
        return kl_divergence(VonMisesFisher(location, kappa), HypersphericalUniform(self.z_dim - 1, device=self.device)) 
開發者ID:tom-pelsmaeker,項目名稱:deep-generative-lm,代碼行數:5,代碼來源:base_decoder.py

示例11: elbo

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def elbo(self, image_real, depth, cam_K, cam_W, geometry):
        batch_size, _, N, M = depth.size()

        assert(depth.size(1) == 1)
        assert(cam_K.size() == (batch_size, 3, 4))
        assert(cam_W.size() == (batch_size, 3, 4))

        loc3d, mask = self.depth_map_to_3d(depth, cam_K, cam_W)
        geom_descr = self.encode_geometry(geometry)

        q_z = self.infer_z(image_real, geom_descr)
        z = q_z.rsample()

        loc3d = loc3d.view(batch_size, 3, N * M)
        x = self.decode(loc3d, geom_descr, z)
        x = x.view(batch_size, 3, N, M)

        if self.white_bg is False:
            x_bg = torch.zeros_like(x)
        else:
            x_bg = torch.ones_like(x)

        image_fake = (mask * x).permute(0, 1, 3, 2) + (1 - mask.permute(0, 1, 3, 2)) * x_bg

        recon_loss = F.mse_loss(image_fake, image_real).sum(dim=-1)
        kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
        elbo = recon_loss.mean() + kl.mean()/float(N*M*3)
        return elbo, recon_loss.mean(), kl.mean()/float(N*M*3), image_fake 
開發者ID:autonomousvision,項目名稱:texture_fields,代碼行數:30,代碼來源:__init__.py

示例12: compute_kl

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def compute_kl(self, q_z):
        ''' Compute the KL-divergence for predicted and prior distribution.

        Args:
            q_z (dist): predicted distribution
        '''
        if q_z.mean.shape[-1] != 0:
            loss_kl = self.vae_beta * dist.kl_divergence(
                q_z, self.model.p0_z).mean()
            if torch.isnan(loss_kl):
                loss_kl = torch.tensor([0.]).to(self.device)
        else:
            loss_kl = torch.tensor([0.]).to(self.device)
        return loss_kl 
開發者ID:autonomousvision,項目名稱:occupancy_flow,代碼行數:16,代碼來源:training.py

示例13: vae_objective

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def vae_objective(model, x, K=1, beta=1.0, components=False, analytical_kl=False, **kwargs):
    """Computes E_{p(x)}[ELBO] """
    qz_x, px_z, zs = model(x, K)
    _, B, D = zs.size()
    flat_rest = torch.Size([*px_z.batch_shape[:2], -1])
    lpx_z = px_z.log_prob(x.expand(px_z.batch_shape)).view(flat_rest).sum(-1)

    pz = model.pz(*model.pz_params)
    kld = dist.kl_divergence(qz_x, pz).unsqueeze(0).sum(-1) if \
        has_analytic_kl(type(qz_x), model.pz) and analytical_kl else \
        qz_x.log_prob(zs).sum(-1) - pz.log_prob(zs).sum(-1)

    obj = -lpx_z.mean(0).sum() + beta * kld.mean(0).sum()
    return (qz_x, px_z, lpx_z, kld, obj) if components else obj 
開發者ID:emilemathieu,項目名稱:pvae,代碼行數:16,代碼來源:objectives.py

示例14: kl_pq

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def kl_pq(self, p_params, q_params):
        p_pi = p_params['pi']
        q_pi = q_params['pi']
        return kl_divergence(Categorical(p_pi), Categorical(q_pi)) 
開發者ID:DeepX-inc,項目名稱:machina,代碼行數:6,代碼來源:categorical_pd.py

示例15: kl_pq

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import kl_divergence [as 別名]
def kl_pq(self, p_params, q_params):
        p_mean, p_log_std = p_params['mean'], p_params['log_std']
        q_mean, q_log_std = q_params['mean'], q_params['log_std']
        p_std = torch.exp(p_log_std)
        q_std = torch.exp(q_log_std)
        return torch.sum(kl_divergence(Normal(loc=p_mean, scale=p_std), Normal(loc=q_mean, scale=q_std)), dim=-1) 
開發者ID:DeepX-inc,項目名稱:machina,代碼行數:8,代碼來源:gaussian_pd.py


注:本文中的torch.distributions.kl_divergence方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。