本文整理汇总了Python中bayespy.nodes.GaussianARD.initialize_from_parameters方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianARD.initialize_from_parameters方法的具体用法?Python GaussianARD.initialize_from_parameters怎么用?Python GaussianARD.initialize_from_parameters使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bayespy.nodes.GaussianARD
的用法示例。
在下文中一共展示了GaussianARD.initialize_from_parameters方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_initialization
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import initialize_from_parameters [as 别名]
def test_initialization(self):
"""
Test initialization methods of GaussianARD
"""
X = GaussianARD(1, 2, shape=(2,), plates=(3,))
# Prior initialization
mu = 1 * np.ones((3, 2))
alpha = 2 * np.ones((3, 2))
X.initialize_from_prior()
u = X._message_to_child()
self.assertAllClose(u[0]*np.ones((3,2)),
mu)
self.assertAllClose(u[1]*np.ones((3,2,2)),
linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Parameter initialization
mu = np.random.randn(3, 2)
alpha = np.random.rand(3, 2)
X.initialize_from_parameters(mu, alpha)
u = X._message_to_child()
self.assertAllClose(u[0], mu)
self.assertAllClose(u[1], linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Value initialization
x = np.random.randn(3, 2)
X.initialize_from_value(x)
u = X._message_to_child()
self.assertAllClose(u[0], x)
self.assertAllClose(u[1], linalg.outer(x, x, ndim=1))
# Random initialization
X.initialize_from_random()
pass
示例2: test_annealing
# 需要导入模块: from bayespy.nodes import GaussianARD [as 别名]
# 或者: from bayespy.nodes.GaussianARD import initialize_from_parameters [as 别名]
def test_annealing(self):
X = GaussianARD(3, 4)
X.initialize_from_parameters(-1, 6)
Q = VB(X)
Q.set_annealing(0.1)
#
# Check that the gradient is correct
#
# Initial parameters
phi0 = X.phi
# Gradient
rg = X.get_riemannian_gradient()
g = X.get_gradient(rg)
# Numerical gradient of the first parameter
eps = 1e-6
p0 = X.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [(), ()]
e = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0] = (l1 - l0) / eps
# Numerical gradient of the second parameter
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1] = (l1 - l0) / (eps)
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# Gradient should be zero after updating
#
X.update()
# Initial parameters
phi0 = X.phi
# Numerical gradient of the first parameter
eps = 1e-8
p0 = X.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [(), ()]
e = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0] = (l1 - l0) / eps
# Numerical gradient of the second parameter
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1] = (l1 - l0) / (eps)
# Check
self.assertAllClose(0,
g_num[0],
atol=1e-5)
self.assertAllClose(0,
g_num[1],
atol=1e-5)
# Not at the optimum
X.initialize_from_parameters(-1, 6)
# Initial parameters
phi0 = X.phi
# Gradient
g = X.get_riemannian_gradient()
# Parameters after VB-EM update
X.update()
phi1 = X.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
pass