本文整理汇总了Python中tensorflow.cholesky方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.cholesky方法的具体用法?Python tensorflow.cholesky怎么用?Python tensorflow.cholesky使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.cholesky方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def build_model(hps, kernel, z_pos, x, n_particles, full_cov=False):
"""
Build the SVGP model.
Note that for inference, we only need the diagonal part of Cov[Y], as
ELBO equals sum over individual observations.
For visualization etc we may want a full covariance. Thus the argument
`full_cov`.
"""
bn = zs.BayesianNet()
Kzz_chol = tf.cholesky(kernel(z_pos, z_pos))
fz = bn.multivariate_normal_cholesky(
'fz', tf.zeros([hps.n_z], dtype=hps.dtype), Kzz_chol,
n_samples=n_particles)
# f(X)|f(Z) follows GP(0, K) gp_conditional
fx_given_fz = bn.stochastic(
'fx', gp_conditional(z_pos, fz, x, full_cov, kernel, Kzz_chol))
# Y|f(X) ~ N(f(X), noise_level * I)
noise_level = tf.get_variable(
'noise_level', shape=[], dtype=hps.dtype,
initializer=tf.constant_initializer(0.05))
noise_level = tf.nn.softplus(noise_level)
bn.normal('y', mean=fx_given_fz, std=noise_level, group_ndims=1)
return bn
示例2: build_backward_variance
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def build_backward_variance(self, Yvar):
"""
Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal
variances returned by predict_f, as well as full covariance matrices.
:param Yvar: size N x N x P or size N x P
:return: Yvar scaled, same rank and size as input
"""
rank = tf.rank(Yvar)
# Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov
# matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3.
# Splitting it in two steps however works fine.
Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar)
Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar)
N = tf.shape(Yvar)[0]
D = tf.shape(Yvar)[2]
L = tf.cholesky(tf.square(tf.transpose(self.A)))
Yvar = tf.reshape(Yvar, [N * N, D])
scaled_var = tf.reshape(tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D])
return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var)
示例3: _define_full_covariance_probs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = tf.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(cholesky)), 1)
x_mu_cov = tf.square(
tf.matrix_triangular_solve(
cholesky, tf.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = tf.transpose(tf.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (
diag_m + tf.to_float(self._dimensions) * tf.log(2 * np.pi) +
log_det_covs)
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def __init__(self, x, y, kern, mean_function=gpflow.mean_functions.Zero(),
scale=1., name='GPRCached'):
"""Initialize GP and cholesky decomposition."""
# Make sure gpflow is imported
if not isinstance(gpflow, ModuleType):
raise gpflow
# self.scope_name = scope.original_name_scope
gpflow.gpr.GPR.__init__(self, x, y, kern, mean_function, name)
# Create new dataholders for the cached data
# TODO zero-dim dataholders cause strange allocator errors in
# tensorflow with MKL
dtype = config.np_dtype
self.cholesky = gpflow.param.DataHolder(np.empty((0, 0), dtype=dtype),
on_shape_change='pass')
self.alpha = gpflow.param.DataHolder(np.empty((0, 0), dtype=dtype),
on_shape_change='pass')
self._scale = scale
self.update_cache()
示例5: build_likelihood_terms
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def build_likelihood_terms(self):
Kdiag = self.kern.Kdiag(self.X)
Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
sigma2 = self.likelihood.variance
# Compute intermediate matrices
P = self.KufKfu / sigma2 + Kuu.get()
L = tf.cholesky(P)
log_det_P = tf.reduce_sum(tf.log(tf.square(tf.diag_part(L))))
c = tf.matrix_triangular_solve(L, self.KufY) / sigma2
# compute log marginal bound
ND = tf.cast(tf.size(self.Y), float_type)
D = tf.cast(tf.shape(self.Y)[1], float_type)
return (-0.5 * ND * tf.log(2 * np.pi * sigma2),
-0.5 * D * log_det_P,
0.5 * D * Kuu.logdet(),
-0.5 * self.tr_YTY / sigma2,
0.5 * tf.reduce_sum(tf.square(c)),
-0.5 * tf.reduce_sum(Kdiag)/sigma2,
0.5 * Kuu.trace_KiX(self.KufKfu) / sigma2)
示例6: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def __init__(self, kern, X, num_outputs, mean_function, input_prop_dim=None, **kwargs):
"""
A dense layer with fixed inputs. NB X does not change here, and must be the inputs. Minibatches not possible
"""
Layer.__init__(self, input_prop_dim, **kwargs)
self.num_data = X.shape[0]
q_mu = np.zeros((self.num_data, num_outputs))
self.q_mu = Parameter(q_mu)
self.q_mu.prior = Gaussian_prior(0., 1.)
self.kern = kern
self.mean_function = mean_function
self.num_outputs = num_outputs
Ku = self.kern.compute_K_symm(X) + np.eye(self.num_data) * settings.jitter
self.Lu = tf.constant(np.linalg.cholesky(Ku))
self.X = tf.constant(X)
示例7: conditional_ND
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def conditional_ND(self, Xnew, full_cov=False):
## modified from GPR
Kx = self.kern.K(self._X_mean, Xnew)
K = self.kern.K(self._X_mean) + tf.eye(tf.shape(self._X_mean)[0], dtype=settings.float_type) * self._lik_variance
L = tf.cholesky(K)
A = tf.matrix_triangular_solve(L, Kx, lower=True)
V = tf.matrix_triangular_solve(L, self._Y - self.mean_function(self._X_mean))
fmean = tf.matmul(A, V, transpose_a=True) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(A, A, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self._Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, tf.shape(self._Y)[1]])
return fmean, fvar
示例8: gp_conditional
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def gp_conditional(z, fz, x, full_cov, kernel, Kzz_chol=None):
'''
GP gp_conditional f(x) | f(z)==fz
:param z: shape [n_z, n_covariates]
:param fz: shape [n_particles, n_z]
:param x: shape [n_x, n_covariates]
:return: a distribution with shape [n_particles, n_x]
'''
n_z = int(z.shape[0])
n_particles = tf.shape(fz)[0]
if Kzz_chol is None:
Kzz_chol = tf.cholesky(kernel(z, z))
# Mean[fx|fz] = Kxz @ inv(Kzz) @ fz; Cov[fx|z] = Kxx - Kxz @ inv(Kzz) @ Kzx
# With ill-conditioned Kzz, the inverse is often asymmetric, which
# breaks further cholesky decomposition. We compute a symmetric one.
Kzz_chol_inv = tf.matrix_triangular_solve(Kzz_chol, tf.eye(n_z))
Kzz_inv = tf.matmul(tf.transpose(Kzz_chol_inv), Kzz_chol_inv)
Kxz = kernel(x, z) # [n_x, n_z]
Kxziz = tf.matmul(Kxz, Kzz_inv)
mean_fx_given_fz = tf.matmul(fz, tf.matrix_transpose(Kxziz))
if full_cov:
cov_fx_given_fz = kernel(x, x) - tf.matmul(Kxziz, tf.transpose(Kxz))
cov_fx_given_fz = tf.tile(
tf.expand_dims(tf.cholesky(cov_fx_given_fz), 0),
[n_particles, 1, 1])
fx_given_fz = zs.distributions.MultivariateNormalCholesky(
mean_fx_given_fz, cov_fx_given_fz)
else:
# diag(AA^T) = sum(A**2, axis=-1)
var = kernel.Kdiag(x) - \
tf.reduce_sum(tf.matmul(
Kxz, tf.matrix_transpose(Kzz_chol_inv)) ** 2, axis=-1)
std = tf.sqrt(var)
fx_given_fz = zs.distributions.Normal(
mean=mean_fx_given_fz, std=std, group_ndims=1)
return fx_given_fz
示例9: test_Cholesky
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def test_Cholesky(self):
t = tf.cholesky(np.array(3 * [8, 3, 3, 8]).reshape(3, 2, 2).astype("float32"))
self.check(t)
示例10: build_backward
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def build_backward(self, Y):
"""
TensorFlow implementation of the inverse mapping
"""
L = tf.cholesky(tf.transpose(self.A))
XT = tf.cholesky_solve(L, tf.transpose(Y-self.b))
return tf.transpose(XT)
示例11: _to_normal2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL:
"""
:param output_batch: (n_samples, 5)
:return
"""
# mean of x and y
x_mean = Lambda(lambda o: o[:, 0])(output_batch)
y_mean = Lambda(lambda o: o[:, 1])(output_batch)
# std of x and y
# std is must be 0 or positive
x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch)
y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch)
# correlation coefficient
# correlation coefficient range is [-1, 1]
cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch)
loc = Concatenate()([
Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean),
Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean)
])
x_var = Lambda(lambda x_std: K.square(x_std))(x_std)
y_var = Lambda(lambda y_std: K.square(y_std))(y_std)
xy_cor = Multiply()([x_std, y_std, cor])
cov = Lambda(lambda inputs: K.stack(inputs, axis=0))(
[x_var, xy_cor, xy_cor, y_var])
cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov)
cov = Reshape((2, 2))(cov)
scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov)
mvn = ds.MultivariateNormalTriL(loc, scale_tril)
return mvn
示例12: _verifyCholeskyBase
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
示例13: _verifyCholesky
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.test_session() as sess:
chol = tf.cholesky(x)
verification = tf.batch_matmul(chol, chol, adj_x=False, adj_y=True)
self._verifyCholeskyBase(sess, x, chol, verification)
示例14: testNonSquareMatrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
tf.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
示例15: testWrongDimensions
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cholesky [as 别名]
def testWrongDimensions(self):
tensor3 = tf.constant([1., 2.])
with self.assertRaises(ValueError):
tf.cholesky(tensor3)
with self.assertRaises(ValueError):
tf.cholesky(tensor3)