当前位置: 首页>>代码示例>>Python>>正文


Python distributions.Independent方法代码示例

本文整理汇总了Python中torch.distributions.Independent方法的典型用法代码示例。如果您正苦于以下问题:Python distributions.Independent方法的具体用法?Python distributions.Independent怎么用?Python distributions.Independent使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.distributions的用法示例。


在下文中一共展示了distributions.Independent方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def __init__(self, hidden, a=1., scale=1.):
        """
        Implements a State Space model that's linear in the observation equation but has arbitrary dynamics in the
        state process.
        :param hidden: The hidden dynamics
        :param a: The A-matrix
        :param scale: The variance of the observations
        """

        # ===== Convoluted way to decide number of dimensions ===== #
        dim, is_1d = _get_shape(a)

        # ====== Define distributions ===== #
        n = dists.Normal(0., 1.) if is_1d else dists.Independent(dists.Normal(torch.zeros(dim), torch.ones(dim)), 1)

        if not isinstance(scale, (torch.Tensor, float, dists.Distribution)):
            raise ValueError(f'`scale` parameter must be numeric type!')

        super().__init__(hidden, a, scale, n) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:21,代码来源:linear.py

示例2: __init__

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def __init__(self, theta, initial_dist, dt, num_steps=10):
        """
        Similar as `OneFactorFractionalStochasticSIR`, but we now have two sources of randomness originating from shocks
        to both paramters `beta` and `gamma`.
        :param theta: The parameters (beta, gamma, sigma, eta)
        """

        if initial_dist.event_shape != torch.Size([3]):
            raise NotImplementedError('Must be of size 3!')

        def g(x, gamma, beta, sigma, eps):
            s = torch.zeros((*x.shape[:-1], 3, 2), device=x.device)

            s[..., 0, 0] = -sigma * x[..., 0] * x[..., 1]
            s[..., 1, 0] = -s[..., 0, 0]
            s[..., 1, 1] = -eps * x[..., 1]
            s[..., 2, 1] = -s[..., 1, 1]

            return s

        f_ = lambda u, beta, gamma, sigma, eps: f(u, beta, gamma, sigma)
        inc_dist = Independent(Normal(torch.zeros(2), math.sqrt(dt) * torch.ones(2)), 1)

        super().__init__((f_, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:26,代码来源:sir.py

示例3: __init__

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def __init__(self, kappa, gamma, sigma, ndim: int, dt: float):
        """
        Implements the Ornstein-Uhlenbeck process.
        :param kappa: The reversion parameter
        :param gamma: The mean parameter
        :param sigma: The standard deviation
        :param ndim: The number of dimensions for the Brownian motion
        """

        def f(x: torch.Tensor, reversion: object, level: object, std: object):
            return level + (x - level) * torch.exp(-reversion * dt)

        def g(x: torch.Tensor, reversion: object, level: object, std: object):
            return std / (2 * reversion).sqrt() * (1 - torch.exp(-2 * reversion * dt)).sqrt()

        if ndim > 1:
            dist = Independent(Normal(torch.zeros(ndim), torch.ones(ndim)), 1)
        else:
            dist = Normal(0., 1)

        super().__init__((f, g), (kappa, gamma, sigma), dist, dist) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:23,代码来源:ou.py

示例4: test_MultiDimensional

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def test_MultiDimensional(self):
        mu = torch.zeros(2)
        scale = torch.ones_like(mu)

        shape = 1000, 100

        mvn = Independent(Normal(mu, scale), 1)
        mvn = AffineProcess((f, g), (1., 1.), mvn, mvn)

        # ===== Initialize ===== #
        x = mvn.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(mvn.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape, *mu.shape]))

        # ===== Sample path ===== #
        path = mvn.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:26,代码来源:timeseries.py

示例5: forward

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def forward(self, input, params=None):
        if params is None:
            params = OrderedDict(self.named_parameters())

        output = input
        for i in range(1, self.num_layers):
            output = F.linear(output,
                              weight=params['layer{0}.weight'.format(i)],
                              bias=params['layer{0}.bias'.format(i)])
            output = self.nonlinearity(output)

        mu = F.linear(output,
                      weight=params['mu.weight'],
                      bias=params['mu.bias'])
        scale = torch.exp(torch.clamp(params['sigma'], min=self.min_log_std))

        return Independent(Normal(loc=mu, scale=scale), 1) 
开发者ID:tristandeleu,项目名称:pytorch-maml-rl,代码行数:19,代码来源:normal_mlp.py

示例6: dist

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def dist(self):
        return Independent(Normal(self._mean, self._log_std.exp()), self._model.ndim + 1) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:4,代码来源:meanfield.py

示例7: __init__

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def __init__(self, std: Union[torch.Tensor, float, Distribution]):
        """
        Defines a random walk.
        :param std: The vector of standard deviations
        :type std: torch.Tensor|float|Distribution
        """

        if not isinstance(std, torch.Tensor):
            normal = Normal(0., 1.)
        else:
            normal = Normal(0., 1.) if std.shape[-1] < 2 else Independent(Normal(torch.zeros_like(std), std), 1)

        super().__init__((_f, _g), (std,), normal, normal) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:15,代码来源:affine.py

示例8: prop_state

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def prop_state(x, beta, gamma, eta, dt):
    f = _f(x, beta, gamma, eta, dt)

    bins = Independent(Binomial(x[..., :-1], f), 1)
    samp = bins.sample()

    s = x[..., 0] - samp[..., 0]
    i = x[..., 1] + samp[..., 0] - samp[..., 1]
    r = x[..., 2] + samp[..., 1]

    return concater(s, i, r) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:13,代码来源:sir.py

示例9: construct

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def construct(self, y, x):
        # ===== Mean of propagated dist ===== #
        h_loc, h_scale = self._model.hidden.mean_scale(x)
        h_loc.requires_grad_(True)

        # ===== Get gradients ===== #
        logl = self._model.observable.log_prob(y, h_loc) + self._model.hidden.log_prob(h_loc, x)
        g = grad(logl, h_loc, grad_outputs=torch.ones_like(logl), create_graph=self._alpha is None)[-1]

        # ===== Define mean and scale ===== #
        if self._alpha is None:
            step = -1 / grad(g, h_loc, grad_outputs=torch.ones_like(g))[-1]
            std = step.sqrt()
        else:
            std = h_scale.detach()
            step = self._alpha

        mean = h_loc.detach() + step * g.detach()
        x.detach_()

        if self._model.hidden_ndim == 0:
            self._kernel = Normal(mean, std)
        else:
            self._kernel = Independent(Normal(mean, std), self._model.hidden_ndim)

        return self 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:28,代码来源:linearized.py

示例10: test_StochasticSIR

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def test_StochasticSIR(self):
        dist = Independent(Binomial(torch.tensor([1000, 1, 0]), torch.tensor([1, 1, 1e-6])), 1)
        sir = m.StochasticSIR((0.1, 0.05, 0.01), dist, 1e-1)

        x = sir.sample_path(1000, 10)

        self.assertEqual(x.shape, torch.Size([1000, 10, 3])) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:9,代码来源:timeseries.py

示例11: forward

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def forward(self, input, segm=None):

        #If segmentation is not none, concatenate the mask to the channel axis of the input
        if segm is not None:
            self.show_img = input
            self.show_seg = segm
            input = torch.cat((input, segm), dim=1)
            self.show_concat = input
            self.sum_input = torch.sum(input)

        encoding = self.encoder(input)
        self.show_enc = encoding

        #We only want the mean of the resulting hxw image
        encoding = torch.mean(encoding, dim=2, keepdim=True)
        encoding = torch.mean(encoding, dim=3, keepdim=True)

        #Convert encoding to 2 x latent dim and split up for mu and log_sigma
        mu_log_sigma = self.conv_layer(encoding)

        #We squeeze the second dimension twice, since otherwise it won't work when batch size is equal to 1
        mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)
        mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)

        mu = mu_log_sigma[:,:self.latent_dim]
        log_sigma = mu_log_sigma[:,self.latent_dim:]

        #This is a multivariate normal with diagonal covariance matrix sigma
        #https://github.com/pytorch/pytorch/pull/11178
        dist = Independent(Normal(loc=mu, scale=torch.exp(log_sigma)),1)
        return dist 
开发者ID:stefanknegt,项目名称:Probabilistic-Unet-Pytorch,代码行数:33,代码来源:probabilistic_unet.py

示例12: gaussian_log_likelihood

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def gaussian_log_likelihood(mu_2d, data_2d, obsrv_std, indices = None):
	n_data_points = mu_2d.size()[-1]

	if n_data_points > 0:
		gaussian = Independent(Normal(loc = mu_2d, scale = obsrv_std.repeat(n_data_points)), 1)
		log_prob = gaussian.log_prob(data_2d) 
		log_prob = log_prob / n_data_points 
	else:
		log_prob = torch.zeros([1]).to(get_device(data_2d)).squeeze()
	return log_prob 
开发者ID:YuliaRubanova,项目名称:latent_ode,代码行数:12,代码来源:likelihood_eval.py

示例13: detach_distribution

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def detach_distribution(pi):
    if isinstance(pi, Independent):
        distribution = Independent(detach_distribution(pi.base_dist),
                                   pi.reinterpreted_batch_ndims)
    elif isinstance(pi, Categorical):
        distribution = Categorical(logits=pi.logits.detach())
    elif isinstance(pi, Normal):
        distribution = Normal(loc=pi.loc.detach(), scale=pi.scale.detach())
    else:
        raise NotImplementedError('Only `Categorical`, `Independent` and '
                                  '`Normal` policies are valid policies. Got '
                                  '`{0}`.'.format(type(pi)))
    return distribution 
开发者ID:tristandeleu,项目名称:pytorch-maml-rl,代码行数:15,代码来源:torch_utils.py

示例14: MultivariateNormalDiag

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def MultivariateNormalDiag(loc, scale_diag):
    if loc.dim() < 1:
        raise ValueError("loc must be at least one-dimensional.")
    return Independent(Normal(loc, scale_diag), 1) 
开发者ID:asyml,项目名称:texar-pytorch,代码行数:6,代码来源:distributions.py

示例15: test_Inference

# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Independent [as 别名]
def test_Inference(self):
        # ===== Distributions ===== #
        dist = Normal(0., 1.)
        mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1)

        # ===== Define model ===== #
        linear = AffineProcess((f, g), (1., 0.25), dist, dist)
        model = LinearGaussianObservations(linear, scale=0.1)

        mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn)
        mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1)

        # ===== Test for multiple models ===== #
        priors = Exponential(1.), LogNormal(0., 1.)

        hidden1d = AffineProcess((f, g), priors, dist, dist)
        oned = LinearGaussianObservations(hidden1d, 1., scale=0.1)

        hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn)
        twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2))

        particles = 1000
        # ====== Run inference ===== #
        for trumod, model in [(model, oned), (mvnmodel, twod)]:
            x, y = trumod.sample_path(1000)

            algs = [
                (NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (NESS, {'particles': particles, 'filter_': UKF(model.copy())}),
                (SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)})
            ]

            for alg, props in algs:
                alg = alg(**props).initialize()

                alg = alg.fit(y)

                w = normalize(alg._w_rec if hasattr(alg, '_w_rec') else torch.ones(particles))

                tru_params = trumod.hidden.theta._cont + trumod.observable.theta._cont
                inf_params = alg.filter.ssm.hidden.theta._cont + alg.filter.ssm.observable.theta._cont

                for trup, p in zip(tru_params, inf_params):
                    if not p.trainable:
                        continue

                    kde = p.get_kde(weights=w)

                    transed = p.bijection.inv(trup)
                    densval = kde.logpdf(transed.numpy().reshape(-1, 1))
                    priorval = p.distr.log_prob(trup)

                    assert (densval > priorval.numpy()).all() 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:57,代码来源:inference.py


注:本文中的torch.distributions.Independent方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。