本文整理汇总了Python中bayespy.nodes.GaussianARD.observe方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianARD.observe方法的具体用法?Python GaussianARD.observe怎么用?Python GaussianARD.observe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bayespy.nodes.GaussianARD
的用法示例。
在下文中一共展示了GaussianARD.observe方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_lower_bound
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def check_lower_bound(shape_mu, shape_alpha, plates_mu=(), **kwargs):
M = GaussianARD(np.ones(plates_mu + shape_mu),
np.ones(plates_mu + shape_mu),
shape=shape_mu,
plates=plates_mu)
if not ('ndim' in kwargs or 'shape' in kwargs):
kwargs['ndim'] = len(shape_mu)
X = GaussianARD(M,
2*np.ones(shape_alpha),
**kwargs)
Y = GaussianARD(X,
3*np.ones(X.get_shape(0)),
**kwargs)
Y.observe(4*np.ones(Y.get_shape(0)))
X.update()
Cov = 1/(2+3)
mu = Cov * (2*1 + 3*4)
x2 = mu**2 + Cov
logH_X = (+ 0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(Cov))
logp_X = (- 0.5*np.log(2*np.pi)
+ 0.5*np.log(2)
- 0.5*2*(x2 - 2*mu*1 + 1**2+1))
r = np.prod(X.get_shape(0))
self.assertAllClose(r * (logp_X + logH_X),
X.lower_bound_contribution())
示例2: _setup_linear_regression
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def _setup_linear_regression():
"""
Setup code for the pdf and contour tests.
This code is from http://www.bayespy.org/examples/regression.html
"""
np.random.seed(1)
k = 2 # slope
c = 5 # bias
s = 2 # noise standard deviation
x = np.arange(10)
y = k*x + c + s*np.random.randn(10)
X = np.vstack([x, np.ones(len(x))]).T
B = GaussianARD(0, 1e-6, shape=(2,))
F = SumMultiply('i,i', B, X)
tau = Gamma(1e-3, 1e-3)
Y = GaussianARD(F, tau)
Y.observe(y)
Q = VB(Y, B, tau)
Q.update(repeat=1000)
xh = np.linspace(-5, 15, 100)
Xh = np.vstack([xh, np.ones(len(xh))]).T
Fh = SumMultiply('i,i', B, Xh)
return locals()
示例3: run
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def run():
k = 2
c = 5
s = 2
x = np.arange(10)
y = k * x + c + s * np.random.randn(10)
X=np.vstack([x,np.ones(len(x))]).T
B = GaussianARD(0, 1e-6, shape=(2,))
F = SumMultiply('i,i', B, X)
tau = Gamma(1e-3, 1e-3)
Y = GaussianARD(F, tau)
Y.observe(y)
from bayespy.inference import VB
Q = VB(Y, B, tau)
Q.update(repeat=1000)
xh = np.linspace(-5, 15, 100)
Xh = np.vstack([xh, np.ones(len(xh))]).T
Fh = SumMultiply('i,i', B, Xh)
bpplt.pyplot.figure()
bpplt.plot(Fh, x=xh, scale=2)
bpplt.plot(y, x=x, color='r', marker='x', linestyle='None')
bpplt.plot(k*xh+c, x=xh, color='r');
bpplt.pyplot.show()
示例4: test_mask_to_parent
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_mask_to_parent(self):
"""
Test the mask handling in Mixture node
"""
K = 3
Z = Categorical(np.ones(K)/K,
plates=(4,5))
Mu = GaussianARD(0, 1,
shape=(2,),
plates=(4,K,5))
Alpha = Gamma(1, 1,
plates=(4,K,5,2))
X = Mixture(Z, GaussianARD, Mu, Alpha, cluster_plate=-2)
Y = GaussianARD(X, 1)
mask = np.reshape((np.mod(np.arange(4*5), 2) == 0),
(4,5))
Y.observe(np.ones((4,5,2)),
mask=mask)
self.assertArrayEqual(Z.mask,
mask)
self.assertArrayEqual(Mu.mask,
mask[:,None,:])
self.assertArrayEqual(Alpha.mask,
mask[:,None,:,None])
pass
示例5: test_riemannian_gradient
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_riemannian_gradient(self):
"""Test Riemannian gradient of a Gamma node."""
#
# Without observations
#
# Construct model
a = np.random.rand()
b = np.random.rand()
tau = Gamma(a, b)
# Random initialization
tau.initialize_from_parameters(np.random.rand(),
np.random.rand())
# Initial parameters
phi0 = tau.phi
# Gradient
g = tau.get_riemannian_gradient()
# Parameters after VB-EM update
tau.update()
phi1 = tau.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
#
# With observations
#
# Construct model
a = np.random.rand()
b = np.random.rand()
tau = Gamma(a, b)
mu = np.random.randn()
Y = GaussianARD(mu, tau)
Y.observe(np.random.randn())
# Random initialization
tau.initialize_from_parameters(np.random.rand(),
np.random.rand())
# Initial parameters
phi0 = tau.phi
# Gradient
g = tau.get_riemannian_gradient()
# Parameters after VB-EM update
tau.update()
phi1 = tau.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
pass
示例6: test_lower_bound_contribution
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_lower_bound_contribution(self):
a = 15
b = 21
y = 4
x = Gamma(a, b)
x.observe(y)
testing.assert_allclose(
x.lower_bound_contribution(),
(
a * np.log(b) +
(a - 1) * np.log(y) -
b * y -
special.gammaln(a)
)
)
# Just one latent node so we'll get exact marginal likelihood
#
# p(Y) = p(Y,X)/p(X|Y) = p(Y|X) * p(X) / p(X|Y)
a = 2.3
b = 4.1
x = 1.9
y = 4.8
tau = Gamma(a, b)
Y = GaussianARD(x, tau)
Y.observe(y)
mu = x
nu = 2 * a
s2 = b / a
a_post = a + 0.5
b_post = b + 0.5*(y - x)**2
tau.update()
testing.assert_allclose(
[-b_post, a_post],
tau.phi
)
testing.assert_allclose(
Y.lower_bound_contribution() + tau.lower_bound_contribution(), # + tau.g,
(
special.gammaln((nu+1)/2)
- special.gammaln(nu/2)
- 0.5 * np.log(nu)
- 0.5 * np.log(np.pi)
- 0.5 * np.log(s2)
- 0.5 * (nu + 1) * np.log(
1 + (y - mu)**2 / (nu * s2)
)
)
)
return
示例7: test_message_to_parents
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
D = 3
X = Gaussian(np.random.randn(D), random.covariance(D))
a = Gamma(np.random.rand(D), np.random.rand(D))
Y = GaussianARD(X, a)
Y.observe(np.random.randn(D))
self.assert_message_to_parent(Y, X)
self.assert_message_to_parent(Y, a)
pass
示例8: pca
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def pca():
np.random.seed(41)
M = 10
N = 3000
D = 5
# Construct the PCA model
alpha = Gamma(1e-3, 1e-3, plates=(D,), name='alpha')
W = GaussianARD(0, alpha, plates=(M,1), shape=(D,), name='W')
X = GaussianARD(0, 1, plates=(1,N), shape=(D,), name='X')
tau = Gamma(1e-3, 1e-3, name='tau')
W.initialize_from_random()
F = SumMultiply('d,d->', W, X)
Y = GaussianARD(F, tau, name='Y')
# Observe data
data = np.sum(np.random.randn(M,1,D-1) * np.random.randn(1,N,D-1), axis=-1) + 1e-1 * np.random.randn(M,N)
Y.observe(data)
# Initialize VB engine
Q = VB(Y, X, W, alpha, tau)
# Take one update step (so phi is ok)
Q.update(repeat=1)
Q.save()
# Run VB-EM
Q.update(repeat=200)
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')
# Restore the state
Q.load()
# Run Riemannian conjugate gradient
#Q.optimize(X, alpha, maxiter=100, collapsed=[W, tau])
Q.optimize(W, tau, maxiter=100, collapsed=[X, alpha])
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.show()
示例9: test_message_to_parent
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_message_to_parent(self):
"""
Test the message to parents of Gate node.
"""
# Unobserved and broadcasting
Z = 2
X = GaussianARD(0, 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
m = F._message_to_parent(0)
self.assertEqual(len(m), 1)
self.assertAllClose(m[0], 0*np.ones(3))
m = F._message_to_parent(1)
self.assertEqual(len(m), 2)
self.assertAllClose(m[0]*np.ones(3), [0, 0, 0])
self.assertAllClose(m[1]*np.ones(3), [0, 0, 0])
# Gating scalar node
Z = 2
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0, 0, 10])
self.assertAllClose(m[1], [0, 0, -0.5])
# Fixed X
Z = 2
X = [1,2,3]
F = Gate(Z, X, moments=GaussianMoments(0))
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*1, 10*2-0.5*4, 10*3-0.5*9])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0, 0, 10])
self.assertAllClose(m[1], [0, 0, -0.5])
# Uncertain gating
Z = Categorical([0.2, 0.3, 0.5])
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0.2*10, 0.3*10, 0.5*10])
self.assertAllClose(m[1], [-0.5*0.2, -0.5*0.3, -0.5*0.5])
# Plates in Z
Z = [2, 0]
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe([10, 20])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [[10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10],
[20*1-0.5*2, 20*2-0.5*5, 20*3-0.5*10]])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [20, 0, 10])
self.assertAllClose(m[1], [-0.5, 0, -0.5])
# Plates in X
Z = 2
X = GaussianARD([[1,2,3], [4,5,6]], 1, shape=(), plates=(2,3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe([10, 20])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2 + 20*4-0.5*17,
10*2-0.5*5 + 20*5-0.5*26,
10*3-0.5*10 + 20*6-0.5*37])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [[0, 0, 10],
[0, 0, 20]])
self.assertAllClose(m[1]*np.ones((2,3)), [[0, 0, -0.5],
[0, 0, -0.5]])
# Gating non-default plate
Z = 2
X = GaussianARD([[1],[2],[3]], 1, shape=(), plates=(3,1))
F = Gate(Z, X, gated_plate=-2)
Y = GaussianARD(F, 1)
Y.observe([10])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [[0], [0], [10]])
self.assertAllClose(m[1], [[0], [0], [-0.5]])
# Gating non-scalar node
Z = 2
X = GaussianARD([[1,4],[2,5],[3,6]], 1, shape=(2,), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
#.........这里部分代码省略.........
示例10: test_message_to_child
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_message_to_child(self):
"""
Test moments of GaussianARD.
"""
# Check that moments have full shape when broadcasting
X = GaussianARD(np.zeros((2,)),
np.ones((3,2)),
shape=(4,3,2))
(u0, u1) = X._message_to_child()
self.assertEqual(np.shape(u0),
(4,3,2))
self.assertEqual(np.shape(u1),
(4,3,2,4,3,2))
# Check the formula
X = GaussianARD(2, 3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2)
self.assertAllClose(u1, 2**2 + 1/3)
# Check the formula for multidimensional arrays
X = GaussianARD(2*np.ones((2,1,4)),
3*np.ones((2,3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((2,3,4)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted alpha
X = GaussianARD(2*np.ones((2,3,4)),
3*np.ones((3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu and alpha
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((3,1)),
shape=(2,3,4))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu with plates
mu = GaussianARD(2*np.ones((5,1,3,4)),
np.ones((5,1,3,4)),
shape=(3,4),
plates=(5,1))
X = GaussianARD(mu,
3*np.ones((5,2,3,4)),
shape=(2,3,4),
plates=(5,))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((5,2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((5,2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check posterior
X = GaussianARD(2, 3)
Y = GaussianARD(X, 1)
Y.observe(10)
X.update()
(u0, u1) = X._message_to_child()
self.assertAllClose(u0,
1/(3+1) * (3*2 + 1*10))
self.assertAllClose(u1,
(1/(3+1) * (3*2 + 1*10))**2 + 1/(3+1))
pass
示例11: check
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def check(indices, plates, shape, axis=-1, use_mask=False):
mu = np.random.rand(*(plates+shape))
alpha = np.random.rand(*(plates+shape))
X = GaussianARD(mu, alpha, shape=shape, plates=plates)
Y = Take(X, indices, plate_axis=axis)
Z = GaussianARD(Y, 1, shape=shape)
z = np.random.randn(*(Z.get_shape(0)))
if use_mask:
mask = np.mod(np.reshape(np.arange(np.prod(Z.plates)), Z.plates), 2) != 0
else:
mask = True
Z.observe(z, mask=mask)
X.update()
(x0, x1) = X.get_moments()
# For comparison, build the same model brute force
X = GaussianARD(mu, alpha, shape=shape, plates=plates)
# Number of trailing plate axes before the take axis
N = len(X.plates) + axis
# Reshape the take axes into a single axis
z_shape = X.plates[:axis] + (-1,)
if axis < -1:
z_shape = z_shape + X.plates[(axis+1):]
z_shape = z_shape + shape
z = np.reshape(z, z_shape)
# Reshape the take axes into a single axis
if use_mask:
mask_shape = X.plates[:axis] + (-1,)
if axis < -1:
mask_shape = mask_shape + X.plates[(axis+1):]
mask = np.reshape(mask, mask_shape)
for (j, i) in enumerate(range(np.size(indices))):
ind = np.array(indices).flatten()[i]
index_x = N*(slice(None),) + (ind,)
index_z = N*(slice(None),) + (j,)
# print(index)
Xi = X[index_x]
zi = z[index_z]
Zi = GaussianARD(Xi, 1, ndim=len(shape))
if use_mask:
maski = mask[index_z]
else:
maski = True
Zi.observe(zi, mask=maski)
X.update()
self.assertAllClose(
x0,
X.get_moments()[0],
)
self.assertAllClose(
x1,
X.get_moments()[1],
)
return
示例12: test_gradient
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_gradient(self):
"""Test standard gradient of a Gamma node."""
D = 3
np.random.seed(42)
#
# Without observations
#
# Construct model
a = np.random.rand(D)
b = np.random.rand(D)
tau = Gamma(a, b)
Q = VB(tau)
# Random initialization
tau.initialize_from_parameters(np.random.rand(D),
np.random.rand(D))
# Initial parameters
phi0 = tau.phi
# Gradient
rg = tau.get_riemannian_gradient()
g = tau.get_gradient(rg)
# Numerical gradient
eps = 1e-8
p0 = tau.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [np.zeros(D), np.zeros(D)]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
tau.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[1] + e
tau.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1][i] = (l1 - l0) / eps
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# With observations
#
# Construct model
a = np.random.rand(D)
b = np.random.rand(D)
tau = Gamma(a, b)
mu = np.random.randn(D)
Y = GaussianARD(mu, tau)
Y.observe(np.random.randn(D))
Q = VB(Y, tau)
# Random initialization
tau.initialize_from_parameters(np.random.rand(D),
np.random.rand(D))
# Initial parameters
phi0 = tau.phi
# Gradient
rg = tau.get_riemannian_gradient()
g = tau.get_gradient(rg)
# Numerical gradient
eps = 1e-8
p0 = tau.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [np.zeros(D), np.zeros(D)]
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[0] + e
tau.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0][i] = (l1 - l0) / eps
for i in range(D):
e = np.zeros(D)
e[i] = eps
p1 = p0[1] + e
tau.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1][i] = (l1 - l0) / eps
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
pass
示例13: precision
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
#
# where $\mathcal{N}$ is the Gaussian distribution parameterized by its mean and precision (i.e., inverse variance), and $\mathcal{G}$ is the gamma distribution parameterized by its shape and rate parameters. Note that we have given quite uninformative priors for the variables $\mu$ and $\tau$. This simple model can also be shown as a directed factor graph:
# This model can be constructed in BayesPy as follows:
# In[2]:
from bayespy.nodes import GaussianARD, Gamma
mu = GaussianARD(0, 1e-6)
tau = Gamma(1e-6, 1e-6)
y = GaussianARD(mu, tau, plates=(10,))
# In[3]:
y.observe(data)
# Next we want to estimate the posterior distribution. In principle, we could use different inference engines (e.g., MCMC or EP) but currently only variational Bayesian (VB) engine is implemented. The engine is initialized by giving all the nodes of the model:
# In[4]:
from bayespy.inference import VB
Q = VB(mu, tau, y)
# The inference algorithm can be run as long as wanted (max. 20 iterations in this case):
# In[5]:
Q.update(repeat=20)
示例14: test_message_to_parent
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_message_to_parent(self):
"""
Test the message to parents of Concatenate node.
"""
# Two parents without shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,)))[:2],
m1[0]*np.ones((2,)))
self.assertAllClose((m[1]*np.ones((5,)))[:2],
m1[1]*np.ones((2,)))
self.assertAllClose((m[0]*np.ones((5,)))[2:],
m2[0]*np.ones((3,)))
self.assertAllClose((m[1]*np.ones((5,)))[2:],
m2[1]*np.ones((3,)))
# Two parents with shapes
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,6))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
# Two parents with non-default concatenation axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Z = Concatenate(X1, X2, axis=-2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4)))[:2],
m1[0]*np.ones((2,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[:2],
m1[1]*np.ones((2,4)))
self.assertAllClose((m[0]*np.ones((5,4)))[2:],
m2[0]*np.ones((3,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[2:],
m2[1]*np.ones((3,4)))
# Constant parent
X1 = np.random.randn(2,4,6)
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = Z._message_to_parent(0)
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
pass
示例15: test_message_to_parent
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import observe [as 别名]
def test_message_to_parent(self):
"""
Test the message to parents of Mixture node.
"""
K = 3
# Broadcasting the moments on the cluster axis
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1,
plates=(K,))
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K)/K)
X = Mixture(z, GaussianARD, Mu, Alpha)
tau = 4
Y = GaussianARD(X, tau)
y = 5
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0],
random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0))
m = X._message_to_parent(1)
self.assertAllClose(m[0],
1/K * (alpha*x) * np.ones(3))
self.assertAllClose(m[1],
-0.5 * 1/K * alpha * np.ones(3))
# Some parameters do not have cluster plate axis
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1) # Note: no cluster plate axis!
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K)/K)
X = Mixture(z, GaussianARD, Mu, Alpha)
tau = 4
Y = GaussianARD(X, tau)
y = 5
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0],
random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0))
m = X._message_to_parent(1)
self.assertAllClose(m[0],
1/K * (alpha*x) * np.ones(3))
self.assertAllClose(m[1],
-0.5 * 1/K * alpha * np.ones(3))
# Cluster assignments do not have as many plate axes as parameters.
M = 2
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,M))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1,
plates=(K,M))
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K)/K)
X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
tau = 4
Y = GaussianARD(X, tau)
y = 5 * np.ones(M)
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0]*np.ones(K),
np.sum(random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0) *
np.ones((K,M)),
axis=-1))
m = X._message_to_parent(1)
self.assertAllClose(m[0] * np.ones((K,M)),
1/K * (alpha*x) * np.ones((K,M)))
self.assertAllClose(m[1] * np.ones((K,M)),
-0.5 * 1/K * alpha * np.ones((K,M)))
pass