本文整理汇总了Python中tensorflow.matrix_diag_part函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_diag_part函数的具体用法?Python matrix_diag_part怎么用?Python matrix_diag_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_diag_part函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSampleWithSameSeed
def testSampleWithSameSeed(self):
if tf.executing_eagerly():
return
scale = make_pd(1., 2)
df = 4
chol_w = tfd.Wishart(
df, scale_tril=chol(scale), input_output_cholesky=False)
x = self.evaluate(chol_w.sample(1, seed=42))
chol_x = [chol(x[0])]
full_w = tfd.Wishart(df, scale, input_output_cholesky=False)
self.assertAllClose(x, self.evaluate(full_w.sample(1, seed=42)))
chol_w_chol = tfd.Wishart(
df, scale_tril=chol(scale), input_output_cholesky=True)
self.assertAllClose(chol_x, self.evaluate(chol_w_chol.sample(1, seed=42)))
eigen_values = tf.matrix_diag_part(chol_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., self.evaluate(eigen_values))
full_w_chol = tfd.Wishart(df, scale=scale, input_output_cholesky=True)
self.assertAllClose(chol_x, self.evaluate(full_w_chol.sample(1, seed=42)))
eigen_values = tf.matrix_diag_part(full_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., self.evaluate(eigen_values))
示例2: testInvalidShapeAtEval
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
tf.matrix_diag_part(v).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("last two dimensions must be equal"):
tf.matrix_diag_part(v).eval(feed_dict={v: [[0, 1], [1, 0], [0, 0]]})
示例3: testRectangular
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = tf.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = tf.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
示例4: _variance
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * tf.square(self.scale.diag_part())
elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return tf.matrix_diag_part(2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * tf.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
示例5: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
tf.assert_positive(
tf.matrix_diag_part(x), message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
tf.assert_none_equal(
tf.matrix_diag_part(x),
tf.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
示例6: testSample
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample_n(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample_n(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample_n(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(chol_w_chol.sample_n(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample_n(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(full_w_chol.sample_n(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample_n(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = tf.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(),
moment1_estimate,
rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (
tf.reduce_mean(tf.square(x), reduction_indices=[0]) -
tf.square(moment1_estimate)).eval()
self.assertAllClose(chol_w.variance().eval(),
variance_estimate,
rtol=0.05)
示例7: _expectation
def _expectation(p, mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM
"""
Xmu, Xcov = p.mu, p.cov
with tf.control_dependencies([tf.assert_equal(
tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.tf_int),
message="Currently cannot handle slicing in exKxz.")]):
Xmu = tf.identity(Xmu)
with params_as_tensors_for(kern), params_as_tensors_for(feat):
D = tf.shape(Xmu)[1]
lengthscales = kern.lengthscales if kern.ARD \
else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales
chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD
all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu, 2) # NxDxM
sqrt_det_L = tf.reduce_prod(lengthscales)
sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N
exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM
non_exponent_term = tf.matmul(Xcov, exponent_mahalanobis, transpose_a=True)
non_exponent_term = tf.expand_dims(Xmu, 2) + non_exponent_term # NxDxM
exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM
return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term
示例8: _build_likelihood
def _build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
K_alpha = tf.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(self.X)
# compute the variance for each of the outputs
I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
[self.num_latent, 1, 1])
A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, I)
tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
tf.reduce_sum(K_alpha * self.q_alpha))
v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
return tf.reduce_sum(v_exp) - KL
示例9: multivariate_normal
def multivariate_normal(x, mu, L):
"""
Computes the log-density of a multivariate normal.
:param x : Dx1 or DxN sample(s) for which we want the density
:param mu : Dx1 or DxN mean(s) of the normal distribution
:param L : DxD Cholesky decomposition of the covariance matrix
:return p : (1,) or (N,) vector of log densities for each of the N x's and/or mu's
x and mu are either vectors or matrices. If both are vectors (N,1):
p[0] = log pdf(x) where x ~ N(mu, LL^T)
If at least one is a matrix, we assume independence over the *columns*:
the number of rows must match the size of L. Broadcasting behaviour:
p[n] = log pdf of:
x[n] ~ N(mu, LL^T) or x ~ N(mu[n], LL^T) or x[n] ~ N(mu[n], LL^T)
"""
if x.shape.ndims is None:
warnings.warn('Shape of x must be 2D at computation.')
elif x.shape.ndims != 2:
raise ValueError('Shape of x must be 2D.')
if mu.shape.ndims is None:
warnings.warn('Shape of mu may be unknown or not 2D.')
elif mu.shape.ndims != 2:
raise ValueError('Shape of mu must be 2D.')
d = x - mu
alpha = tf.matrix_triangular_solve(L, d, lower=True)
num_dims = tf.cast(tf.shape(d)[0], L.dtype)
p = - 0.5 * tf.reduce_sum(tf.square(alpha), 0)
p -= 0.5 * num_dims * np.log(2 * np.pi)
p -= tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
return p
示例10: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# We formulate the Jacobian with respect to the flattened matrices
# `vec(x)` and `vec(y)`. Suppose for notational convenience that
# the first `n` entries of `vec(x)` are the diagonal of `x`, and
# the remaining `n**2-n` entries are the off-diagonals in
# arbitrary order. Then the Jacobian is a block-diagonal matrix,
# with the Jacobian of the diagonal bijector in the first block,
# and the identity Jacobian for the remaining entries (since this
# bijector acts as the identity on non-diagonal entries):
#
# J_vec(x) (vec(y)) =
# -------------------------------
# | J_diag(x) (diag(y)) 0 | n entries
# | |
# | 0 I | n**2-n entries
# -------------------------------
# n n**2-n
#
# Since the log-det of the second (identity) block is zero, the
# overall log-det-jacobian is just the log-det of first block,
# from the diagonal bijector.
#
# Note that for elementwise operations (exp, softplus, etc) the
# first block of the Jacobian will itself be a diagonal matrix,
# but our implementation does not require this to be true.
return self._diag_bijector.forward_log_det_jacobian(
tf.matrix_diag_part(x), event_ndims=1)
示例11: testMatrix
def testMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = tf.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
示例12: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
示例13: _variance
def _variance(self):
# Because df is a scalar, we need to expand dimensions to match
# scale_operator. We use ellipses notation (...) to select all dimensions
# and add two dimensions to the end.
df = self.df[..., tf.newaxis, tf.newaxis]
x = tf.sqrt(df) * self._square_scale_operator()
d = tf.expand_dims(tf.matrix_diag_part(x), -1)
v = tf.square(x) + tf.matmul(d, d, adjoint_b=True)
return v
示例14: testGrad
def testGrad(self):
shapes = ((3, 3), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), dtype=np.float32)
y = tf.matrix_diag_part(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
示例15: testRectangularBatch
def testRectangularBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0],
[4.0, 5.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = tf.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)