當前位置: 首頁>>代碼示例>>Python>>正文


Python distributions.MultivariateNormal方法代碼示例

本文整理匯總了Python中torch.distributions.MultivariateNormal方法的典型用法代碼示例。如果您正苦於以下問題:Python distributions.MultivariateNormal方法的具體用法?Python distributions.MultivariateNormal怎麽用?Python distributions.MultivariateNormal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributions的用法示例。


在下文中一共展示了distributions.MultivariateNormal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _kernel_2d

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def _kernel_2d(self, y, loc, h_var_inv, o_var_inv, c):
        tc = c if self._model.obs_ndim > 0 else c.unsqueeze(-2)

        # ===== Define covariance ===== #
        ttc = tc.transpose(-2, -1)
        diag_o_var_inv = construct_diag(o_var_inv if self._model.observable.ndim > 0 else o_var_inv.unsqueeze(-1))
        t2 = torch.matmul(ttc, torch.matmul(diag_o_var_inv, tc))

        cov = (construct_diag(h_var_inv) + t2).inverse()

        # ===== Get mean ===== #
        t1 = h_var_inv * loc

        t2 = torch.matmul(diag_o_var_inv, y if y.dim() > 0 else y.unsqueeze(-1))
        t3 = torch.matmul(ttc, t2.unsqueeze(-1))[..., 0]

        m = torch.matmul(cov, (t1 + t3).unsqueeze(-1))[..., 0]

        return MultivariateNormal(m, scale_tril=torch.cholesky(cov)) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:21,代碼來源:linear.py

示例2: test_UnscentedTransform2D

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def test_UnscentedTransform2D(self):
        # ===== 2D model ===== #
        mat = torch.eye(2)
        scale = torch.diag(mat)

        norm = Normal(0., 1.)
        mvn = MultivariateNormal(torch.zeros(2), torch.eye(2))
        mvnlinear = AffineProcess((fmvn, g), (mat, scale), mvn, mvn)
        mvnoblinear = AffineObservations((fomvn, gomvn), (1.,), norm)

        mvnmodel = StateSpaceModel(mvnlinear, mvnoblinear)

        # ===== Perform unscented transform ===== #
        uft = UnscentedFilterTransform(mvnmodel)
        res = uft.initialize(3000)
        p = uft.predict(res)
        c = uft.correct(0., p)

        assert isinstance(c.x_dist(), MultivariateNormal) and c.x_dist().mean.shape == torch.Size([3000, 2]) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:21,代碼來源:utils.py

示例3: test_log_prob

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def test_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.randn(4, device=device, dtype=dtype)
            var = torch.randn(4, device=device, dtype=dtype).abs_()
            values = torch.randn(4, device=device, dtype=dtype)

            res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = TMultivariateNormal(mean, torch.eye(4, device=device, dtype=dtype) * var).log_prob(values)
            self.assertLess((res - actual).div(res).abs().item(), 1e-2)

            mean = torch.randn(3, 4, device=device, dtype=dtype)
            var = torch.randn(3, 4, device=device, dtype=dtype).abs_()
            values = torch.randn(3, 4, device=device, dtype=dtype)

            res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = TMultivariateNormal(
                mean, var.unsqueeze(-1) * torch.eye(4, device=device, dtype=dtype).repeat(3, 1, 1)
            ).log_prob(values)
            self.assertLess((res - actual).div(res).abs().norm(), 1e-2) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:22,代碼來源:test_multivariate_normal.py

示例4: __init__

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def __init__(self, mean, covariance_matrix, validate_args=False):
        self._islazy = isinstance(mean, LazyTensor) or isinstance(covariance_matrix, LazyTensor)
        if self._islazy:
            if validate_args:
                ms = mean.size(-1)
                cs1 = covariance_matrix.size(-1)
                cs2 = covariance_matrix.size(-2)
                if not (ms == cs1 and ms == cs2):
                    raise ValueError(f"Wrong shapes in {self._repr_sizes(mean, covariance_matrix)}")
            self.loc = mean
            self._covar = covariance_matrix
            self.__unbroadcasted_scale_tril = None
            self._validate_args = validate_args
            batch_shape = _mul_broadcast_shape(self.loc.shape[:-1], covariance_matrix.shape[:-2])
            event_shape = self.loc.shape[-1:]
            # TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic
            super(TMultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=False)
        else:
            super().__init__(loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:21,代碼來源:multivariate_normal.py

示例5: _fitting_multivari

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def _fitting_multivari(self, best_samples):
        """
        Fit multivariate gaussian and sampling from it

        Parameters
        ----------
        best_samples : torch.Tensor
            shape (self.cem_batch_size, self.num_best_sampling, self.dim_ac)

        Returns
        -------
        samples : torch.Tensor
        """
        def fitting(best_samples):
            mean = best_samples.mean(dim=0)
            fs_m = best_samples.sub(mean.expand_as(best_samples))
            cov_mat = fs_m.transpose(0, 1).mm(fs_m) / (self.num_sampling - 1)
            cov_mat = cov_mat + self.delta * torch.eye(cov_mat.shape[0])
            pd = MultivariateNormal(mean, cov_mat)
            samples = pd.sample((self.num_sampling,))
            return samples
        samples = torch.cat([fitting(best_sample)
                             for best_sample in best_samples], dim=0)
        return samples 
開發者ID:DeepX-inc,項目名稱:machina,代碼行數:26,代碼來源:cem_state_action_vfunc.py

示例6: test_kl_divergence_diag_gaussian

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def test_kl_divergence_diag_gaussian():
    """
    Test kl divergence between multivariate gaussian distributions with a diagonal covariance matrix
    """
    head = DiagGaussianActionHead(1, 5)

    distrib1 = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]]))
    distrib2 = d.MultivariateNormal(torch.tensor([0.3, 0.7]), covariance_matrix=torch.tensor([[1.8, 0.0], [0.0, 5.5]]))

    pd_params1 = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t()
    pd_params2 = torch.tensor([[0.3, 0.7], [np.log(np.sqrt(1.8)), np.log(np.sqrt(5.5))]]).t()

    kl_div_1 = d.kl_divergence(distrib1, distrib2)
    kl_div_2 = head.kl_divergence(pd_params1[None], pd_params2[None])

    assert kl_div_1.item() == pytest.approx(kl_div_2.item(), 0.001) 
開發者ID:MillionIntegrals,項目名稱:vel,代碼行數:18,代碼來源:test_action_head.py

示例7: _helper

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def _helper(m, c):
        if m.shape[-1] > 1:
            return MultivariateNormal(m, c)

        return Normal(m[..., 0], c[..., 0, 0].sqrt()) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:7,代碼來源:uft.py

示例8: _construct_mvn

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def _construct_mvn(x: torch.Tensor, w: torch.Tensor):
    """
    Constructs a multivariate normal distribution of weighted samples.
    :param x: The samples
    :param w: The weights
    """

    mean = (x * w.unsqueeze(-1)).sum(0)
    centralized = x - mean
    cov = torch.matmul(w * centralized.t(), centralized)

    return MultivariateNormal(mean, scale_tril=torch.cholesky(cov)) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:14,代碼來源:utils.py

示例9: pre_weight

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def pre_weight(self, y, x):
        hloc, hscale = self._model.hidden.mean_scale(x)
        oloc, oscale = self._model.observable.mean_scale(hloc)

        c = self._model.observable.theta_vals[0]
        ovar = oscale ** 2
        hvar = hscale ** 2

        if self._model.obs_ndim < 1:
            if self._model.hidden_ndim < 1:
                cov = ovar + c ** 2 * hvar
            else:
                tc = c.unsqueeze(-2)
                cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0]

            return Normal(oloc, cov.sqrt()).log_prob(y)

        if self._model.hidden_ndim < 1:
            tc = c.unsqueeze(-2)
            cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0]
        else:
            diag_ovar = construct_diag(ovar)
            diag_hvar = construct_diag(hvar)
            cov = diag_ovar + c.matmul(diag_hvar).matmul(c.transpose(-2, -1))

        return MultivariateNormal(oloc, cov).log_prob(y) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:28,代碼來源:linear.py

示例10: __init__

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def __init__(self, dim, ill_conditioned):
        cov = torch.eye(dim)
        # cov = torch.range(1, dim).diag()
        if ill_conditioned:
            cov[dim // 2:, dim // 2:] = 0.0001 * torch.eye(dim // 2)
        # mean = 0 * torch.ones(dim)
        mean = torch.range(1, dim) / 10
        m = MultivariateNormal(mean, cov)
        self.gmm = m 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:11,代碼來源:gmm.py

示例11: deterministic_sample_mvnorm

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def deterministic_sample_mvnorm(distribution: MultivariateNormal, eps: Optional[Tensor] = None) -> Tensor:
    if isinstance(eps, Tensor):
        if eps.shape[-len(distribution.event_shape):] != distribution.event_shape:
            raise RuntimeError(f"Expected shape ending in {distribution.event_shape}, got {eps.shape}.")

    else:
        shape = distribution.batch_shape + distribution.event_shape
        if eps is None:
            eps = 1.0
        eps *= _standard_normal(shape, dtype=distribution.loc.dtype, device=distribution.loc.device)
    return distribution.loc + _batch_mv(distribution._unbroadcasted_scale_tril, eps) 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:13,代碼來源:utils.py

示例12: sample_transition

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def sample_transition(self, eps: Optional[Tensor] = None) -> Tensor:
        distribution = MultivariateNormal(loc=self.means, covariance_matrix=self.covs)
        return deterministic_sample_mvnorm(distribution, eps=eps) 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:5,代碼來源:gaussian.py

示例13: sample_measurements

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def sample_measurements(self, eps: Optional[Tensor] = None) -> Tensor:
        distribution = MultivariateNormal(self.predictions, self.prediction_uncertainty)
        return deterministic_sample_mvnorm(distribution, eps=eps) 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:5,代碼來源:gaussian.py

示例14: _log_prob_with_subsetting

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def _log_prob_with_subsetting(self,
                                  obs: Tensor,
                                  group_idx: Selector,
                                  time_idx: Selector,
                                  measure_idx: Selector,
                                  **kwargs) -> Tensor:
        self._check_lp_sub_input(group_idx, time_idx)

        idx_3d = bmat_idx(group_idx, time_idx, measure_idx)
        idx_4d = bmat_idx(group_idx, time_idx, measure_idx, measure_idx)

        dist = MultivariateNormal(self.predictions[idx_3d], self.prediction_uncertainty[idx_4d])
        return dist.log_prob(obs[idx_3d]) 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:15,代碼來源:gaussian.py

示例15: act

# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import MultivariateNormal [as 別名]
def act(self, state, memory):
        action_mean = self.actor(state)
        cov_mat = torch.diag(self.action_var).to(device)
        
        dist = MultivariateNormal(action_mean, cov_mat)
        action = dist.sample()
        action_logprob = dist.log_prob(action)
        
        memory.states.append(state)
        memory.actions.append(action)
        memory.logprobs.append(action_logprob)
        
        return action.detach() 
開發者ID:nikhilbarhate99,項目名稱:PPO-PyTorch,代碼行數:15,代碼來源:PPO_continuous.py


注:本文中的torch.distributions.MultivariateNormal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。