本文整理汇总了Python中tensorflow.matrix_diag函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_diag函数的具体用法?Python matrix_diag怎么用?Python matrix_diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_diag函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _expand_independent_outputs
def _expand_independent_outputs(fvar, full_cov, full_output_cov):
"""
Reshapes fvar to the correct shape, specified by `full_cov` and `full_output_cov`.
:param fvar: has shape N x P (full_cov = False) or P x N x N (full_cov = True).
:return:
1. full_cov: True and full_output_cov: True
fvar N x P x N x P
2. full_cov: True and full_output_cov: False
fvar P x N x N
3. full_cov: False and full_output_cov: True
fvar N x P x P
4. full_cov: False and full_output_cov: False
fvar N x P
"""
if full_cov and full_output_cov:
fvar = tf.matrix_diag(tf.transpose(fvar)) # N x N x P x P
fvar = tf.transpose(fvar, [0, 2, 1, 3]) # N x P x N x P
if not full_cov and full_output_cov:
fvar = tf.matrix_diag(fvar) # N x P x P
if full_cov and not full_output_cov:
pass # P x N x N
if not full_cov and not full_output_cov:
pass # N x P
return fvar
示例2: _expectation
def _expectation(p, rbf_kern, feat1, lin_kern, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
- K_lin_{.,.} :: RBF kernel
- K_rbf_{.,.} :: Linear kernel
Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint
active_dims, in which case the joint expectations simplify into a product of expectations
:return: NxM1xM2
"""
if rbf_kern.on_separate_dims(lin_kern) and isinstance(p, DiagonalGaussian): # no joint expectations required
eKxz1 = expectation(p, (rbf_kern, feat1))
eKxz2 = expectation(p, (lin_kern, feat2))
return eKxz1[:, :, None] * eKxz2[:, None, :]
if feat1 != feat2:
raise NotImplementedError("Features have to be the same for both kernels.")
if rbf_kern.active_dims != lin_kern.active_dims:
raise NotImplementedError("active_dims have to be the same for both kernels.")
with params_as_tensors_for(rbf_kern), params_as_tensors_for(lin_kern), \
params_as_tensors_for(feat1), params_as_tensors_for(feat2):
# use only active dimensions
Xcov = rbf_kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov)
Z, Xmu = rbf_kern._slice(feat1.Z, p.mu)
N = tf.shape(Xmu)[0]
D = tf.shape(Xmu)[1]
lin_kern_variances = lin_kern.variance if lin_kern.ARD \
else tf.zeros((D,), dtype=settings.tf_float) + lin_kern.variance
rbf_kern_lengthscales = rbf_kern.lengthscales if rbf_kern.ARD \
else tf.zeros((D,), dtype=settings.tf_float) + rbf_kern.lengthscales ## Begin RBF eKxz code:
chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(rbf_kern_lengthscales ** 2) + Xcov) # NxDxD
Z_transpose = tf.transpose(Z)
all_diffs = Z_transpose - tf.expand_dims(Xmu, 2) # NxDxM
exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True) # NxDxM
exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1) # NxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM
sqrt_det_L = tf.reduce_prod(rbf_kern_lengthscales)
sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N
eKxz_rbf = rbf_kern.variance * (determinants[:, None] * exponent_mahalanobis) ## NxM <- End RBF eKxz code
tiled_Z = tf.tile(tf.expand_dims(Z_transpose, 0), (N, 1, 1)) # NxDxM
z_L_inv_Xcov = tf.matmul(tiled_Z, Xcov / rbf_kern_lengthscales[:, None] ** 2., transpose_a=True) # NxMxD
cross_eKzxKxz = tf.cholesky_solve(
chol_L_plus_Xcov, (lin_kern_variances * rbf_kern_lengthscales ** 2.)[..., None] * tiled_Z) # NxDxM
cross_eKzxKxz = tf.matmul((z_L_inv_Xcov + Xmu[:, None, :]) * eKxz_rbf[..., None], cross_eKzxKxz) # NxMxM
return cross_eKzxKxz
示例3: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
a += a.T
a = np.tile(a, batch_shape + (1, 1))
if dtype_ == np.float32:
atol = 1e-4
else:
atol = 1e-12
for compute_v in False, True:
np_e, np_v = np.linalg.eig(a)
with self.test_session():
if compute_v:
tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))
# Check that V*diag(E)*V^T is close to A.
a_ev = tf.batch_matmul(
tf.batch_matmul(tf_v, tf.matrix_diag(tf_e)), tf_v, adj_y=True)
self.assertAllClose(a_ev.eval(), a, atol=atol)
# Compare to numpy.linalg.eig.
CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
atol)
else:
tf_e = tf.self_adjoint_eigvals(tf.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
示例4: testSampleWithBroadcastScale
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.test_session():
dist = tfd.VectorExponentialDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu + diag, mean.eval())
n = int(1e4)
samps = dist.sample(n, seed=0).eval()
samps_centered = samps - samps.mean(axis=0)
cov_mat = tf.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps_centered.transpose([1, 2, 0]),
samps_centered.transpose([1, 0, 2])) / n
self.assertAllClose(mu + diag, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
示例5: test_broadcast_apply_and_solve
def test_broadcast_apply_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.matmul cannot handle.
# In particular, tf.matmul does not broadcast.
with self.test_session() as sess:
x = tf.random_normal(shape=(2, 2, 3, 4))
# This LinearOperatorDiag will be brodacast to (2, 2, 3, 3) during solve
# and apply with 'x' as the argument.
diag = tf.random_uniform(shape=(2, 1, 3))
operator = linalg.LinearOperatorDiag(diag)
self.assertAllEqual((2, 1, 3, 3), operator.shape)
# Create a batch matrix with the broadcast shape of operator.
diag_broadcast = tf.concat(1, (diag, diag))
mat = tf.matrix_diag(diag_broadcast)
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
operator_apply = operator.apply(x)
mat_apply = tf.matmul(mat, x)
self.assertAllEqual(operator_apply.get_shape(), mat_apply.get_shape())
self.assertAllClose(*sess.run([operator_apply, mat_apply]))
operator_solve = operator.solve(x)
mat_solve = tf.matrix_solve(mat, x)
self.assertAllEqual(operator_solve.get_shape(), mat_solve.get_shape())
self.assertAllClose(*sess.run([operator_solve, mat_solve]))
示例6: K
def K(self, X, X2=None, presliced=False):
if X2 is None:
d = tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
return tf.matrix_diag(d)
else:
shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0]])
return tf.zeros(shape, settings.float_type)
示例7: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
"""
# compute kernel things
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X)
# predictive mean
f_mean = tf.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.matrix_diag(tf.transpose(1. / tf.square(self.q_lambda)))
L = tf.cholesky(A)
Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
LiKx = tf.matrix_triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kern.K(Xnew) - tf.matmul(LiKx, LiKx, transpose_a=True)
else:
f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
return f_mean, tf.transpose(f_var)
示例8: testVector
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = tf.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
示例9: _quadrature_expectation
def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points):
"""
General handling of quadrature expectations for Gaussians and DiagonalGaussians
Fallback method for missing analytic expectations
"""
num_gauss_hermite_points = 100 if num_gauss_hermite_points is None else num_gauss_hermite_points
warnings.warn("Quadrature is used to calculate the expectation. This means that "
"an analytical implementations is not available for the given combination.")
if obj2 is None:
eval_func = lambda x: get_eval_func(obj1, feature1)(x)
elif obj1 is None:
raise NotImplementedError("First object cannot be None.")
else:
eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(x) *
get_eval_func(obj2, feature2, np.s_[:, None, :])(x))
if isinstance(p, DiagonalGaussian):
if isinstance(obj1, kernels.Kernel) and isinstance(obj2, kernels.Kernel) \
and obj1.on_separate_dims(obj2): # no joint expectations required
eKxz1 = quadrature_expectation(p, (obj1, feature1),
num_gauss_hermite_points=num_gauss_hermite_points)
eKxz2 = quadrature_expectation(p, (obj2, feature2),
num_gauss_hermite_points=num_gauss_hermite_points)
return eKxz1[:, :, None] * eKxz2[:, None, :]
else:
cov = tf.matrix_diag(p.cov)
else:
cov = p.cov
return mvnquad(eval_func, p.mu, cov, num_gauss_hermite_points)
示例10: K
def K(self, X, X2=None, full_output_cov=True):
K = self.kern.K(X, X2) # N x N2
if full_output_cov:
Ks = tf.tile(K[..., None], [1, 1, self.P]) # N x N2 x P
return tf.transpose(tf.matrix_diag(Ks), [0, 2, 1, 3]) # N x P x N2 x P
else:
return tf.tile(K[None, ...], [self.P, 1, 1]) # P x N x N2
示例11: _operator_and_mat_and_feed_dict
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
shape = list(shape)
diag_shape = shape[:-1]
diag = tf.random_normal(diag_shape, dtype=dtype.real_dtype)
if dtype.is_complex:
diag = tf.complex(
diag, tf.random_normal(diag_shape, dtype=dtype.real_dtype))
diag_ph = tf.placeholder(dtype=dtype)
if use_placeholder:
# Evaluate the diag here because (i) you cannot feed a tensor, and (ii)
# diag is random and we want the same value used for both mat and
# feed_dict.
diag = diag.eval()
operator = linalg.LinearOperatorDiag(diag_ph)
feed_dict = {diag_ph: diag}
else:
operator = linalg.LinearOperatorDiag(diag)
feed_dict = None
mat = tf.matrix_diag(diag)
return operator, mat, feed_dict
示例12: testSample
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
samps = self.evaluate(dist.sample(int(1e4), seed=0))
cov_mat = 2. * self.evaluate(tf.matrix_diag(diag))**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.05, rtol=0.05)
示例13: testGrad
def testGrad(self):
shapes = ((3,), (7, 4))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), np.float32)
y = tf.matrix_diag(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
示例14: K
def K(self, X, X2=None, presliced=False):
if X2 is None:
d = tf.fill(tf.shape(X)[:-1], tf.squeeze(self.variance))
return tf.matrix_diag(d)
else:
shape = tf.concat([tf.shape(X)[:-2],
tf.reshape(tf.shape(X)[-2], [1]),
tf.reshape(tf.shape(X2)[-2], [1])], 0)
return tf.zeros(shape, settings.float_type)
示例15: testMultivariateNormalDiagWithSoftplusStDev
def testMultivariateNormalDiagWithSoftplusStDev(self):
mu = [-1.0, 1.0]
diag = [-1.0, -2.0]
with self.test_session():
dist = distributions.MultivariateNormalDiagWithSoftplusStDev(mu, diag)
samps = dist.sample(1000, seed=0).eval()
cov_mat = tf.matrix_diag(tf.nn.softplus(diag)).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)