本文整理汇总了Python中edward.models.Normal类的典型用法代码示例。如果您正苦于以下问题:Python Normal类的具体用法?Python Normal怎么用?Python Normal使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Normal类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_normal_normal
def _test_normal_normal(self, Inference, default, *args, **kwargs):
with self.test_session() as sess:
x_data = np.array([0.0] * 50, dtype=np.float32)
mu = Normal(loc=0.0, scale=1.0)
x = Normal(loc=mu, scale=1.0, sample_shape=50)
if not default:
qmu_loc = tf.Variable(tf.random_normal([]))
qmu_scale = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qmu = Normal(loc=qmu_loc, scale=qmu_scale)
# analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
inference = Inference({mu: qmu}, data={x: x_data})
else:
inference = Inference([mu], data={x: x_data})
qmu = inference.latent_vars[mu]
inference.run(*args, **kwargs)
self.assertAllClose(qmu.mean().eval(), 0, rtol=0.1, atol=0.6)
self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
rtol=0.15, atol=0.5)
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='optimizer')
old_t, old_variables = sess.run([inference.t, variables])
self.assertEqual(old_t, inference.n_iter)
sess.run(inference.reset)
new_t, new_variables = sess.run([inference.t, variables])
self.assertEqual(new_t, 0)
self.assertNotEqual(old_variables, new_variables)
示例2: build_update
def build_update(self):
"""Simulate Langevin dynamics using a discretized integrator. Its
discretization error goes to zero as the learning rate decreases.
#### Notes
The updates assume each Empirical random variable is directly
parameterized by `tf.Variable`s.
"""
old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
# Simulate Langevin dynamics.
learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
grad_log_joint = tf.gradients(self._log_joint(old_sample),
list(six.itervalues(old_sample)))
sample = {}
for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint):
qz = self.latent_vars[z]
event_shape = qz.event_shape
normal = Normal(loc=tf.zeros(event_shape),
scale=learning_rate * tf.ones(event_shape))
sample[z] = old_sample[z] + \
0.5 * learning_rate * tf.convert_to_tensor(grad_log_p) + \
normal.sample()
# Update Empirical random variables.
assign_ops = []
for z, qz in six.iteritems(self.latent_vars):
variable = qz.get_variables()[0]
assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
# Increment n_accept.
assign_ops.append(self.n_accept.assign_add(1))
return tf.group(*assign_ops)
示例3: test_neg
def test_neg(self):
with self.test_session() as sess:
x = Normal(0.0, 1.0)
z = -x
z_value = -x.value()
z_eval, z_value_eval = sess.run([z, z_value])
self.assertAllEqual(z_eval, z_value_eval)
示例4: test_abs
def test_abs(self):
with self.test_session() as sess:
x = Normal(0.0, 1.0)
z = abs(x)
z_value = abs(x.value())
z_eval, z_value_eval = sess.run([z, z_value])
self.assertAllEqual(z_eval, z_value_eval)
示例5: build_update
def build_update(self):
"""
Simulate Langevin dynamics using a discretized integrator. Its
discretization error goes to zero as the learning rate decreases.
"""
old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
# Simulate Langevin dynamics.
learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
grad_log_joint = tf.gradients(self._log_joint(old_sample),
list(six.itervalues(old_sample)))
sample = {}
for z, qz, grad_log_p in \
zip(six.iterkeys(self.latent_vars),
six.itervalues(self.latent_vars),
grad_log_joint):
event_shape = qz.get_event_shape()
normal = Normal(mu=tf.zeros(event_shape),
sigma=learning_rate * tf.ones(event_shape))
sample[z] = old_sample[z] + 0.5 * learning_rate * grad_log_p + \
normal.sample()
# Update Empirical random variables.
assign_ops = []
variables = {x.name: x for x in
tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
for z, qz in six.iteritems(self.latent_vars):
variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
# Increment n_accept.
assign_ops.append(self.n_accept.assign_add(1))
return tf.group(*assign_ops)
示例6: test_getitem
def test_getitem(self):
with self.test_session() as sess:
x = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
z = x[0:2, 2:3]
z_value = x.value()[0:2, 2:3]
z_eval, z_value_eval = sess.run([z, z_value])
self.assertAllEqual(z_eval, z_value_eval)
示例7: AutoRegressive
class AutoRegressive(RandomVariable, Distribution):
# a 1-D AR(1) process
# a[t + 1] = a[t] + eps with eps ~ N(0, sig**2)
def __init__(self, T, a, sig, *args, **kwargs):
self.a = a
self.sig = sig
self.T = T
self.shocks = Normal(tf.zeros(T), scale=sig)
self.z = tf.scan(lambda acc, x: self.a * acc + x, self.shocks)
if 'dtype' not in kwargs:
kwargs['dtype'] = tf.float32
if 'allow_nan_stats' not in kwargs:
kwargs['allow_nan_stats'] = False
if 'reparameterization_type' not in kwargs:
kwargs['reparameterization_type'] = FULLY_REPARAMETERIZED
if 'validate_args' not in kwargs:
kwargs['validate_args'] = False
if 'name' not in kwargs:
kwargs['name'] = 'AutoRegressive'
super(AutoRegressive, self).__init__(*args, **kwargs)
self._args = (T, a, sig)
def _log_prob(self, value):
err = value - self.a * tf.pad(value[:-1], [[1, 0]], 'CONSTANT')
lpdf = self.shocks._log_prob(err)
return tf.reduce_sum(lpdf)
def _sample_n(self, n, seed=None):
return tf.scan(lambda acc, x: self.a * acc + x,
self.shocks._sample_n(n, seed))
示例8: main
def main(_):
ed.set_seed(42)
N = 5000 # number of data points
D = 10 # number of features
# DATA
w_true = np.random.randn(D)
X_data = np.random.randn(N, D)
p = expit(np.dot(X_data, w_true))
y_data = np.array([np.random.binomial(1, i) for i in p])
# MODEL
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
y = Bernoulli(logits=ed.dot(X, w))
# INFERENCE
qw = Normal(loc=tf.get_variable("qw/loc", [D]),
scale=tf.nn.softplus(tf.get_variable("qw/scale", [D])))
inference = IWVI({w: qw}, data={X: X_data, y: y_data})
inference.run(K=5, n_iter=1000)
# CRITICISM
print("Mean squared error in true values to inferred posterior mean:")
print(tf.reduce_mean(tf.square(w_true - qw.mean())).eval())
示例9: test_rfloordiv
def test_rfloordiv(self):
with self.test_session() as sess:
x = Normal(0.0, 1.0)
y = 5.0
z = y // x
z_value = y // x.value()
z_eval, z_value_eval = sess.run([z, z_value])
self.assertAllEqual(z_eval, z_value_eval)
示例10: test_div
def test_div(self):
with self.test_session() as sess:
x = Normal(0.0, 1.0)
y = 5.0
z = x / y
z_value = x.value() / y
z_eval, z_value_eval = sess.run([z, z_value])
self.assertAllEqual(z_eval, z_value_eval)
示例11: test_swap_tensor_rv
def test_swap_tensor_rv(self):
with self.test_session():
ed.set_seed(95258)
x = Normal(0.0, 0.1)
y = tf.constant(1.0)
z = x * y
qx = Normal(10.0, 0.1)
z_new = ed.copy(z, {x.value(): qx})
self.assertGreater(z_new.eval(), 5.0)
示例12: test_dict_tensor_rv
def test_dict_tensor_rv(self):
with self.test_session():
set_seed(95258)
x = Normal(mu=0.0, sigma=0.1)
y = tf.constant(1.0)
z = x * y
qx = Normal(mu=10.0, sigma=0.1)
z_new = copy(z, {x.value(): qx})
self.assertGreater(z_new.eval(), 5.0)
示例13: _test
def _test(mu, sigma, n):
rv = Normal(mu=mu, sigma=sigma)
rv_sample = rv.sample(n)
x = rv_sample.eval()
x_tf = tf.constant(x, dtype=tf.float32)
mu = mu.eval()
sigma = sigma.eval()
assert np.allclose(rv.log_prob(x_tf).eval(),
stats.norm.logpdf(x, mu, sigma))
示例14: test_list
def test_list(self):
with self.test_session() as sess:
x = Normal(tf.constant(0.0), tf.constant(0.1))
y = Normal(tf.constant(10.0), tf.constant(0.1))
cat = Categorical(logits=tf.zeros(5))
components = [Normal(x, tf.constant(0.1))
for _ in range(5)]
z = Mixture(cat=cat, components=components)
z_new = ed.copy(z, {x: y.value()})
self.assertGreater(z_new.value().eval(), 5.0)
示例15: build_update
def build_update(self):
"""Simulate Hamiltonian dynamics using a numerical integrator.
Correct for the integrator's discretization error using an
acceptance ratio.
#### Notes
The updates assume each Empirical random variable is directly
parameterized by `tf.Variable`s.
"""
old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
old_sample = OrderedDict(old_sample)
# Sample momentum.
old_r_sample = OrderedDict()
for z, qz in six.iteritems(self.latent_vars):
event_shape = qz.event_shape
normal = Normal(loc=tf.zeros(event_shape), scale=tf.ones(event_shape))
old_r_sample[z] = normal.sample()
# Simulate Hamiltonian dynamics.
new_sample, new_r_sample = leapfrog(old_sample, old_r_sample,
self.step_size, self._log_joint,
self.n_steps)
# Calculate acceptance ratio.
ratio = tf.reduce_sum([0.5 * tf.reduce_sum(tf.square(r))
for r in six.itervalues(old_r_sample)])
ratio -= tf.reduce_sum([0.5 * tf.reduce_sum(tf.square(r))
for r in six.itervalues(new_r_sample)])
ratio += self._log_joint(new_sample)
ratio -= self._log_joint(old_sample)
# Accept or reject sample.
u = Uniform().sample()
accept = tf.log(u) < ratio
sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
lambda: list(six.itervalues(old_sample)))
if not isinstance(sample_values, list):
# `tf.cond` returns tf.Tensor if output is a list of size 1.
sample_values = [sample_values]
sample = {z: sample_value for z, sample_value in
zip(six.iterkeys(new_sample), sample_values)}
# Update Empirical random variables.
assign_ops = []
for z, qz in six.iteritems(self.latent_vars):
variable = qz.get_variables()[0]
assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
# Increment n_accept (if accepted).
assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
return tf.group(*assign_ops)