本文整理汇总了Python中torch.distributions.MultivariateNormal方法的典型用法代码示例。如果您正苦于以下问题:Python distributions.MultivariateNormal方法的具体用法?Python distributions.MultivariateNormal怎么用?Python distributions.MultivariateNormal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.distributions
的用法示例。
在下文中一共展示了distributions.MultivariateNormal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _kernel_2d
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def _kernel_2d(self, y, loc, h_var_inv, o_var_inv, c):
tc = c if self._model.obs_ndim > 0 else c.unsqueeze(-2)
# ===== Define covariance ===== #
ttc = tc.transpose(-2, -1)
diag_o_var_inv = construct_diag(o_var_inv if self._model.observable.ndim > 0 else o_var_inv.unsqueeze(-1))
t2 = torch.matmul(ttc, torch.matmul(diag_o_var_inv, tc))
cov = (construct_diag(h_var_inv) + t2).inverse()
# ===== Get mean ===== #
t1 = h_var_inv * loc
t2 = torch.matmul(diag_o_var_inv, y if y.dim() > 0 else y.unsqueeze(-1))
t3 = torch.matmul(ttc, t2.unsqueeze(-1))[..., 0]
m = torch.matmul(cov, (t1 + t3).unsqueeze(-1))[..., 0]
return MultivariateNormal(m, scale_tril=torch.cholesky(cov))
示例2: test_UnscentedTransform2D
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def test_UnscentedTransform2D(self):
# ===== 2D model ===== #
mat = torch.eye(2)
scale = torch.diag(mat)
norm = Normal(0., 1.)
mvn = MultivariateNormal(torch.zeros(2), torch.eye(2))
mvnlinear = AffineProcess((fmvn, g), (mat, scale), mvn, mvn)
mvnoblinear = AffineObservations((fomvn, gomvn), (1.,), norm)
mvnmodel = StateSpaceModel(mvnlinear, mvnoblinear)
# ===== Perform unscented transform ===== #
uft = UnscentedFilterTransform(mvnmodel)
res = uft.initialize(3000)
p = uft.predict(res)
c = uft.correct(0., p)
assert isinstance(c.x_dist(), MultivariateNormal) and c.x_dist().mean.shape == torch.Size([3000, 2])
示例3: test_log_prob
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def test_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
mean = torch.randn(4, device=device, dtype=dtype)
var = torch.randn(4, device=device, dtype=dtype).abs_()
values = torch.randn(4, device=device, dtype=dtype)
res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = TMultivariateNormal(mean, torch.eye(4, device=device, dtype=dtype) * var).log_prob(values)
self.assertLess((res - actual).div(res).abs().item(), 1e-2)
mean = torch.randn(3, 4, device=device, dtype=dtype)
var = torch.randn(3, 4, device=device, dtype=dtype).abs_()
values = torch.randn(3, 4, device=device, dtype=dtype)
res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = TMultivariateNormal(
mean, var.unsqueeze(-1) * torch.eye(4, device=device, dtype=dtype).repeat(3, 1, 1)
).log_prob(values)
self.assertLess((res - actual).div(res).abs().norm(), 1e-2)
示例4: __init__
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def __init__(self, mean, covariance_matrix, validate_args=False):
self._islazy = isinstance(mean, LazyTensor) or isinstance(covariance_matrix, LazyTensor)
if self._islazy:
if validate_args:
ms = mean.size(-1)
cs1 = covariance_matrix.size(-1)
cs2 = covariance_matrix.size(-2)
if not (ms == cs1 and ms == cs2):
raise ValueError(f"Wrong shapes in {self._repr_sizes(mean, covariance_matrix)}")
self.loc = mean
self._covar = covariance_matrix
self.__unbroadcasted_scale_tril = None
self._validate_args = validate_args
batch_shape = _mul_broadcast_shape(self.loc.shape[:-1], covariance_matrix.shape[:-2])
event_shape = self.loc.shape[-1:]
# TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic
super(TMultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=False)
else:
super().__init__(loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args)
示例5: _fitting_multivari
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def _fitting_multivari(self, best_samples):
"""
Fit multivariate gaussian and sampling from it
Parameters
----------
best_samples : torch.Tensor
shape (self.cem_batch_size, self.num_best_sampling, self.dim_ac)
Returns
-------
samples : torch.Tensor
"""
def fitting(best_samples):
mean = best_samples.mean(dim=0)
fs_m = best_samples.sub(mean.expand_as(best_samples))
cov_mat = fs_m.transpose(0, 1).mm(fs_m) / (self.num_sampling - 1)
cov_mat = cov_mat + self.delta * torch.eye(cov_mat.shape[0])
pd = MultivariateNormal(mean, cov_mat)
samples = pd.sample((self.num_sampling,))
return samples
samples = torch.cat([fitting(best_sample)
for best_sample in best_samples], dim=0)
return samples
示例6: test_kl_divergence_diag_gaussian
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def test_kl_divergence_diag_gaussian():
"""
Test kl divergence between multivariate gaussian distributions with a diagonal covariance matrix
"""
head = DiagGaussianActionHead(1, 5)
distrib1 = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]]))
distrib2 = d.MultivariateNormal(torch.tensor([0.3, 0.7]), covariance_matrix=torch.tensor([[1.8, 0.0], [0.0, 5.5]]))
pd_params1 = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t()
pd_params2 = torch.tensor([[0.3, 0.7], [np.log(np.sqrt(1.8)), np.log(np.sqrt(5.5))]]).t()
kl_div_1 = d.kl_divergence(distrib1, distrib2)
kl_div_2 = head.kl_divergence(pd_params1[None], pd_params2[None])
assert kl_div_1.item() == pytest.approx(kl_div_2.item(), 0.001)
示例7: _helper
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def _helper(m, c):
if m.shape[-1] > 1:
return MultivariateNormal(m, c)
return Normal(m[..., 0], c[..., 0, 0].sqrt())
示例8: _construct_mvn
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def _construct_mvn(x: torch.Tensor, w: torch.Tensor):
"""
Constructs a multivariate normal distribution of weighted samples.
:param x: The samples
:param w: The weights
"""
mean = (x * w.unsqueeze(-1)).sum(0)
centralized = x - mean
cov = torch.matmul(w * centralized.t(), centralized)
return MultivariateNormal(mean, scale_tril=torch.cholesky(cov))
示例9: pre_weight
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def pre_weight(self, y, x):
hloc, hscale = self._model.hidden.mean_scale(x)
oloc, oscale = self._model.observable.mean_scale(hloc)
c = self._model.observable.theta_vals[0]
ovar = oscale ** 2
hvar = hscale ** 2
if self._model.obs_ndim < 1:
if self._model.hidden_ndim < 1:
cov = ovar + c ** 2 * hvar
else:
tc = c.unsqueeze(-2)
cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0]
return Normal(oloc, cov.sqrt()).log_prob(y)
if self._model.hidden_ndim < 1:
tc = c.unsqueeze(-2)
cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0]
else:
diag_ovar = construct_diag(ovar)
diag_hvar = construct_diag(hvar)
cov = diag_ovar + c.matmul(diag_hvar).matmul(c.transpose(-2, -1))
return MultivariateNormal(oloc, cov).log_prob(y)
示例10: __init__
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def __init__(self, dim, ill_conditioned):
cov = torch.eye(dim)
# cov = torch.range(1, dim).diag()
if ill_conditioned:
cov[dim // 2:, dim // 2:] = 0.0001 * torch.eye(dim // 2)
# mean = 0 * torch.ones(dim)
mean = torch.range(1, dim) / 10
m = MultivariateNormal(mean, cov)
self.gmm = m
示例11: deterministic_sample_mvnorm
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def deterministic_sample_mvnorm(distribution: MultivariateNormal, eps: Optional[Tensor] = None) -> Tensor:
if isinstance(eps, Tensor):
if eps.shape[-len(distribution.event_shape):] != distribution.event_shape:
raise RuntimeError(f"Expected shape ending in {distribution.event_shape}, got {eps.shape}.")
else:
shape = distribution.batch_shape + distribution.event_shape
if eps is None:
eps = 1.0
eps *= _standard_normal(shape, dtype=distribution.loc.dtype, device=distribution.loc.device)
return distribution.loc + _batch_mv(distribution._unbroadcasted_scale_tril, eps)
示例12: sample_transition
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def sample_transition(self, eps: Optional[Tensor] = None) -> Tensor:
distribution = MultivariateNormal(loc=self.means, covariance_matrix=self.covs)
return deterministic_sample_mvnorm(distribution, eps=eps)
示例13: sample_measurements
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def sample_measurements(self, eps: Optional[Tensor] = None) -> Tensor:
distribution = MultivariateNormal(self.predictions, self.prediction_uncertainty)
return deterministic_sample_mvnorm(distribution, eps=eps)
示例14: _log_prob_with_subsetting
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def _log_prob_with_subsetting(self,
obs: Tensor,
group_idx: Selector,
time_idx: Selector,
measure_idx: Selector,
**kwargs) -> Tensor:
self._check_lp_sub_input(group_idx, time_idx)
idx_3d = bmat_idx(group_idx, time_idx, measure_idx)
idx_4d = bmat_idx(group_idx, time_idx, measure_idx, measure_idx)
dist = MultivariateNormal(self.predictions[idx_3d], self.prediction_uncertainty[idx_4d])
return dist.log_prob(obs[idx_3d])
示例15: act
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import MultivariateNormal [as 别名]
def act(self, state, memory):
action_mean = self.actor(state)
cov_mat = torch.diag(self.action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
action = dist.sample()
action_logprob = dist.log_prob(action)
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
return action.detach()