本文整理汇总了Python中pyro.clear_param_store函数的典型用法代码示例。如果您正苦于以下问题:Python clear_param_store函数的具体用法?Python clear_param_store怎么用?Python clear_param_store使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clear_param_store函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_elbo_nonreparameterized
def test_elbo_nonreparameterized(self):
pyro.clear_param_store()
def model():
p_latent = pyro.sample("p_latent", dist.beta, self.alpha0, self.beta0)
pyro.map_data("aaa",
self.data, lambda i, x: pyro.observe(
"obs_{}".format(i), dist.bernoulli, x, p_latent),
batch_size=self.batch_size)
return p_latent
def guide():
alpha_q_log = pyro.param("alpha_q_log",
Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
beta_q_log = pyro.param("beta_q_log",
Variable(self.log_beta_n.data - 0.143, requires_grad=True))
alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
pyro.sample("p_latent", dist.beta, alpha_q, beta_q)
pyro.map_data("aaa", self.data, lambda i, x: None, batch_size=self.batch_size)
adam = optim.Adam({"lr": .001, "betas": (0.97, 0.999)})
svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)
for k in range(10001):
svi.step()
alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
beta_error = param_abs_error("beta_q_log", self.log_beta_n)
self.assertEqual(0.0, alpha_error, prec=0.08)
self.assertEqual(0.0, beta_error, prec=0.08)
示例2: do_elbo_test
def do_elbo_test(self, reparameterized, n_steps):
pyro.clear_param_store()
pt_guide = LogNormalNormalGuide(self.log_mu_n.data + 0.17,
self.log_tau_n.data - 0.143)
def model():
mu_latent = pyro.sample("mu_latent", dist.normal,
self.mu0, torch.pow(self.tau0, -0.5))
sigma = torch.pow(self.tau, -0.5)
pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
return mu_latent
def guide():
pyro.module("mymodule", pt_guide)
mu_q, tau_q = torch.exp(pt_guide.mu_q_log), torch.exp(pt_guide.tau_q_log)
sigma = torch.pow(tau_q, -0.5)
pyro.sample("mu_latent", dist.Normal(mu_q, sigma, reparameterized=reparameterized))
adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)
for k in range(n_steps):
svi.step()
mu_error = param_abs_error("mymodule$$$mu_q_log", self.log_mu_n)
tau_error = param_abs_error("mymodule$$$tau_q_log", self.log_tau_n)
self.assertEqual(0.0, mu_error, prec=0.07)
self.assertEqual(0.0, tau_error, prec=0.07)
示例3: test_module_nn
def test_module_nn(nn_module):
pyro.clear_param_store()
nn_module = nn_module()
assert pyro.get_param_store()._params == {}
pyro.module("module", nn_module)
for name in pyro.get_param_store().get_all_param_names():
assert pyro.params.user_param_name(name) in nn_module.state_dict().keys()
示例4: test_gmm_iter_discrete_traces
def test_gmm_iter_discrete_traces(data_size, graph_type, model):
pyro.clear_param_store()
data = torch.arange(0, data_size)
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data, verbose=True))
# This non-vectorized version is exponential in data_size:
assert len(traces) == 2**data_size
示例5: main
def main(args):
pyro.clear_param_store()
data = build_linear_dataset(N, p)
if args.cuda:
# make tensors and modules CUDA
data = data.cuda()
softplus.cuda()
regression_model.cuda()
for j in range(args.num_epochs):
if args.batch_size == N:
# use the entire data set
epoch_loss = svi.step(data)
else:
# mini batch
epoch_loss = 0.0
perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
# shuffle data
data = data[perm]
# get indices of each batch
all_batches = get_batch_indices(N, args.batch_size)
for ix, batch_start in enumerate(all_batches[:-1]):
batch_end = all_batches[ix + 1]
batch_data = data[batch_start: batch_end]
epoch_loss += svi.step(batch_data)
if j % 100 == 0:
print("epoch avg loss {}".format(epoch_loss/float(N)))
示例6: test_bern_elbo_gradient
def test_bern_elbo_gradient(enum_discrete, trace_graph):
pyro.clear_param_store()
num_particles = 2000
def model():
p = Variable(torch.Tensor([0.25]))
pyro.sample("z", dist.Bernoulli(p))
def guide():
p = pyro.param("p", Variable(torch.Tensor([0.5]), requires_grad=True))
pyro.sample("z", dist.Bernoulli(p))
print("Computing gradients using surrogate loss")
Elbo = TraceGraph_ELBO if trace_graph else Trace_ELBO
elbo = Elbo(enum_discrete=enum_discrete,
num_particles=(1 if enum_discrete else num_particles))
with xfail_if_not_implemented():
elbo.loss_and_grads(model, guide)
params = sorted(pyro.get_param_store().get_all_param_names())
assert params, "no params found"
actual_grads = {name: pyro.param(name).grad.clone() for name in params}
print("Computing gradients using finite difference")
elbo = Trace_ELBO(num_particles=num_particles)
expected_grads = finite_difference(lambda: elbo.loss(model, guide))
for name in params:
print("{} {}{}{}".format(name, "-" * 30, actual_grads[name].data,
expected_grads[name].data))
assert_equal(actual_grads, expected_grads, prec=0.1)
示例7: test_iter_discrete_traces_vector
def test_iter_discrete_traces_vector(graph_type):
pyro.clear_param_store()
def model():
p = pyro.param("p", Variable(torch.Tensor([[0.05], [0.15]])))
ps = pyro.param("ps", Variable(torch.Tensor([[0.1, 0.2, 0.3, 0.4],
[0.4, 0.3, 0.2, 0.1]])))
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(ps, one_hot=False))
assert x.size() == (2, 1)
assert y.size() == (2, 1)
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
p = pyro.param("p").data
ps = pyro.param("ps").data
assert len(traces) == 2 * ps.size(-1)
for scale, trace in traces:
x = trace.nodes["x"]["value"].data.squeeze().long()[0]
y = trace.nodes["y"]["value"].data.squeeze().long()[0]
expected_scale = torch.exp(dist.Bernoulli(p).log_pdf(x) *
dist.Categorical(ps, one_hot=False).log_pdf(y))
expected_scale = expected_scale.data.view(-1)[0]
assert_equal(scale, expected_scale)
示例8: assert_ok
def assert_ok(model, guide, elbo):
"""
Assert that inference works without warnings or errors.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
inference.step()
示例9: test_elbo_bern
def test_elbo_bern(quantity, enumerate1):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.001 if enumerate1 else 0.1
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
kl = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
def model():
with pyro.iarange("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
def guide():
q = pyro.param("q")
with pyro.iarange("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
elbo = TraceEnum_ELBO(max_iarange_nesting=1,
strict_enumeration_warning=any([enumerate1]))
if quantity == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item()
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0]
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
示例10: test_dynamic_lr
def test_dynamic_lr(scheduler, num_steps):
pyro.clear_param_store()
def model():
sample = pyro.sample('latent', Normal(torch.tensor(0.), torch.tensor(0.3)))
return pyro.sample('obs', Normal(sample, torch.tensor(0.2)), obs=torch.tensor(0.1))
def guide():
loc = pyro.param('loc', torch.tensor(0.))
scale = pyro.param('scale', torch.tensor(0.5))
pyro.sample('latent', Normal(loc, scale))
svi = SVI(model, guide, scheduler, loss=TraceGraph_ELBO())
for epoch in range(2):
scheduler.set_epoch(epoch)
for _ in range(num_steps):
svi.step()
if epoch == 1:
loc = pyro.param('loc')
scale = pyro.param('scale')
opt = scheduler.optim_objs[loc].optimizer
assert opt.state_dict()['param_groups'][0]['lr'] == 0.02
assert opt.state_dict()['param_groups'][0]['initial_lr'] == 0.01
opt = scheduler.optim_objs[scale].optimizer
assert opt.state_dict()['param_groups'][0]['lr'] == 0.02
assert opt.state_dict()['param_groups'][0]['initial_lr'] == 0.01
示例11: test_dirichlet_bernoulli
def test_dirichlet_bernoulli(Elbo, vectorized):
pyro.clear_param_store()
data = torch.tensor([1.0] * 6 + [0.0] * 4)
def model1(data):
concentration0 = torch.tensor([10.0, 10.0])
f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
for i in pyro.irange("irange", len(data)):
pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i])
def model2(data):
concentration0 = torch.tensor([10.0, 10.0])
f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
pyro.sample("obs", dist.Bernoulli(f).expand_by(data.shape).independent(1),
obs=data)
model = model2 if vectorized else model1
def guide(data):
concentration_q = pyro.param("concentration_q", torch.tensor([15.0, 15.0]),
constraint=constraints.positive)
pyro.sample("latent_fairness", dist.Dirichlet(concentration_q))
elbo = Elbo(num_particles=7, strict_enumeration_warning=False)
optim = Adam({"lr": 0.0005, "betas": (0.90, 0.999)})
svi = SVI(model, guide, optim, elbo)
for step in range(40):
svi.step(data)
示例12: test_gmm_batch_iter_discrete_traces
def test_gmm_batch_iter_discrete_traces(model, data_size, graph_type):
pyro.clear_param_store()
data = torch.arange(0, data_size)
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data))
# This vectorized version is independent of data_size:
assert len(traces) == 2
示例13: test_random_module
def test_random_module(self):
pyro.clear_param_store()
lifted_tr = poutine.trace(pyro.random_module("name", self.model, prior=self.prior)).get_trace()
for name in lifted_tr.nodes.keys():
if lifted_tr.nodes[name]["type"] == "param":
assert lifted_tr.nodes[name]["type"] == "sample"
assert not lifted_tr.nodes[name]["is_observed"]
示例14: test_elbo_hmm_in_guide
def test_elbo_hmm_in_guide(enumerate1, num_steps):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in enumerate(data):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default=enumerate1)
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in enumerate(data):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
elbo = TraceEnum_ELBO(max_iarange_nesting=0)
elbo.loss_and_grads(model, guide, data)
# These golden values simply test agreement between parallel and sequential.
expected_grads = {
2: {
"transition_probs": [[0.1029949, -0.1029949], [0.1029949, -0.1029949]],
"emission_probs": [[0.75, -0.75], [0.25, -0.25]],
},
3: {
"transition_probs": [[0.25748726, -0.25748726], [0.25748726, -0.25748726]],
"emission_probs": [[1.125, -1.125], [0.375, -0.375]],
},
10: {
"transition_probs": [[1.64832076, -1.64832076], [1.64832076, -1.64832076]],
"emission_probs": [[3.75, -3.75], [1.25, -1.25]],
},
20: {
"transition_probs": [[3.70781687, -3.70781687], [3.70781687, -3.70781687]],
"emission_probs": [[7.5, -7.5], [2.5, -2.5]],
},
}
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = torch.tensor(expected_grads[num_steps][name])
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
示例15: assert_error
def assert_error(model, guide, elbo):
"""
Assert that inference fails with an error.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with pytest.raises((NotImplementedError, UserWarning, KeyError, ValueError, RuntimeError)):
inference.step()