本文整理匯總了Python中torch.distributions.constraints.positive方法的典型用法代碼示例。如果您正苦於以下問題:Python constraints.positive方法的具體用法?Python constraints.positive怎麽用?Python constraints.positive使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.distributions.constraints
的用法示例。
在下文中一共展示了constraints.positive方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_constraints
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def test_constraints(backend, jit):
data = torch.tensor(0.5)
def model():
locs = pyro.param("locs", torch.randn(3), constraint=constraints.real)
scales = pyro.param("scales", torch.randn(3).exp(), constraint=constraints.positive)
p = torch.tensor([0.5, 0.3, 0.2])
x = pyro.sample("x", dist.Categorical(p))
pyro.sample("obs", dist.Normal(locs[x], scales[x]), obs=data)
def guide():
q = pyro.param("q", torch.randn(3).exp(), constraint=constraints.simplex)
pyro.sample("x", dist.Categorical(q))
with pyro_backend(backend):
Elbo = infer.JitTrace_ELBO if jit else infer.Trace_ELBO
elbo = Elbo(ignore_jit_warnings=True)
assert_ok(model, guide, elbo)
示例2: __init__
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def __init__(self, n, eta, validate_args=False):
TModule.__init__(self)
if not isinstance(n, int) or n < 1:
raise ValueError("n must be a positive integer")
if isinstance(eta, Number):
eta = torch.tensor(float(eta))
self.n = torch.tensor(n, dtype=torch.long, device=eta.device)
batch_shape = eta.shape
event_shape = torch.Size([n, n])
# Normalization constant(s)
i = torch.arange(n, dtype=eta.dtype, device=eta.device)
C = (((2 * eta.view(-1, 1) - 2 + i) * i).sum(1) * math.log(2)).view_as(eta)
C += n * torch.sum(2 * torch.lgamma(i / 2 + 1) - torch.lgamma(i + 2))
# need to assign values before registering as buffers to make argument validation work
self.eta = eta
self.C = C
super(LKJPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args)
# now need to delete to be able to register buffer
del self.eta, self.C
self.register_buffer("eta", eta)
self.register_buffer("C", C)
示例3: _is_valid_correlation_matrix
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def _is_valid_correlation_matrix(Sigma, tol=1e-6):
"""Check if supplied matrix is a valid correlation matrix
A matrix is a valid correlation matrix if it is positive semidefinite, and
if all diagonal elements are equal to 1.
Args:
Sigma: A n x n correlation matrix, or a batch of b correlation matrices
with shape b x n x n
tol: The tolerance with which to check unit value of the diagonal elements
Returns:
True if Sigma is a valid correlation matrix, False otherwise (in batch
mode, all matrices in the batch need to be valid correlation matrices)
"""
pdef = torch.all(constraints.positive_definite.check(Sigma))
return pdef and all(torch.all(torch.abs(S.diag() - 1) < tol) for S in Sigma.view(-1, *Sigma.shape[-2:]))
示例4: _is_valid_correlation_matrix_cholesky_factor
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def _is_valid_correlation_matrix_cholesky_factor(L, tol=1e-6):
"""Check if supplied matrix is a Cholesky factor of a valid correlation matrix
A matrix is a Cholesky fator of a valid correlation matrix if it is lower
triangular, has positive diagonal, and unit row-sum
Args:
L: A n x n lower-triangular matrix, or a batch of b lower-triangular
matrices with shape b x n x n
tol: The tolerance with which to check positivity of the diagonal and
unit-sum of the rows
Returns:
True if L is a Cholesky factor of a valid correlation matrix, False
otherwise (in batch mode, all matrices in the batch need to be
Cholesky factors of valid correlation matrices)
"""
unit_row_length = torch.all((torch.norm(L, dim=-1) - 1).abs() < tol)
return unit_row_length and torch.all(constraints.lower_cholesky.check(L))
示例5: initialize_params
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def initialize_params(self):
# dictionary of guide random effect parameters
params = {
"eps_g": {},
"eps_i": {},
}
N_state = self.config["sizes"]["state"]
# initialize group-level parameters
if self.config["group"]["random"] == "continuous":
params["eps_g"]["loc"] = Tensor(
pyro.param("loc_group",
lambda: torch.zeros((N_state, N_state))),
OrderedDict([("y_prev", bint(N_state))]),
)
params["eps_g"]["scale"] = Tensor(
pyro.param("scale_group",
lambda: torch.ones((N_state, N_state)),
constraint=constraints.positive),
OrderedDict([("y_prev", bint(N_state))]),
)
# initialize individual-level random effect parameters
N_c = self.config["sizes"]["group"]
if self.config["individual"]["random"] == "continuous":
params["eps_i"]["loc"] = Tensor(
pyro.param("loc_individual",
lambda: torch.zeros((N_c, N_state, N_state))),
OrderedDict([("g", bint(N_c)), ("y_prev", bint(N_state))]),
)
params["eps_i"]["scale"] = Tensor(
pyro.param("scale_individual",
lambda: torch.ones((N_c, N_state, N_state)),
constraint=constraints.positive),
OrderedDict([("g", bint(N_c)), ("y_prev", bint(N_state))]),
)
self.params = params
return self.params
示例6: main
# 需要導入模塊: from torch.distributions import constraints [as 別名]
# 或者: from torch.distributions.constraints import positive [as 別名]
def main(args):
# Define a basic model with a single Normal latent random variable `loc`
# and a batch of Normally distributed observations.
def model(data):
loc = pyro.sample("loc", dist.Normal(0., 1.))
with pyro.plate("data", len(data), dim=-1):
pyro.sample("obs", dist.Normal(loc, 1.), obs=data)
# Define a guide (i.e. variational distribution) with a Normal
# distribution over the latent random variable `loc`.
def guide(data):
guide_loc = pyro.param("guide_loc", torch.tensor(0.))
guide_scale = pyro.param("guide_scale", torch.tensor(1.),
constraint=constraints.positive)
pyro.sample("loc", dist.Normal(guide_loc, guide_scale))
# Generate some data.
torch.manual_seed(0)
data = torch.randn(100) + 3.0
# Because the API in minipyro matches that of Pyro proper,
# training code works with generic Pyro implementations.
with pyro_backend(args.backend), interpretation(monte_carlo):
# Construct an SVI object so we can do variational inference on our
# model/guide pair.
Elbo = infer.JitTrace_ELBO if args.jit else infer.Trace_ELBO
elbo = Elbo()
adam = optim.Adam({"lr": args.learning_rate})
svi = infer.SVI(model, guide, adam, elbo)
# Basic training loop
pyro.get_param_store().clear()
for step in range(args.num_steps):
loss = svi.step(data)
if args.verbose and step % 100 == 0:
print("step {} loss = {}".format(step, loss))
# Report the final values of the variational parameters
# in the guide after training.
if args.verbose:
for name in pyro.get_param_store():
value = pyro.param(name).data
print("{} = {}".format(name, value.detach().cpu().numpy()))
# For this simple (conjugate) model we know the exact posterior. In
# particular we know that the variational distribution should be
# centered near 3.0. So let's check this explicitly.
assert (pyro.param("guide_loc") - 3.0).abs() < 0.1