本文整理汇总了Python中tensorflow.matrix_determinant函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_determinant函数的具体用法?Python matrix_determinant怎么用?Python matrix_determinant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_determinant函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: invertible_1x1_conv
def invertible_1x1_conv(z, logdet, reverse=False, name=None, use_bias=False):
with tf.variable_scope(name, "invconv"):
shape = z.get_shape().as_list()
w_shape = [shape[3], shape[3]]
# Sample a random orthogonal matrix:
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype('float32')
w = tf.get_variable("W", dtype=tf.float32, initializer=w_init)
det_w = tf.matrix_determinant(tf.cast(w, 'float64'))
dlogdet = tf.cast(tf.log(abs(det_w)), 'float32') * shape[1] * shape[2]
if use_bias:
b = tf.get_variable("bias", [1, 1, 1, shape[3]])
if not reverse:
_w = w[tf.newaxis, tf.newaxis, ...]
z = tf.nn.conv2d(z, _w, [1, 1, 1, 1], 'SAME', data_format='NHWC')
logdet += dlogdet
if use_bias:
z += b
else:
if use_bias:
z -= b
w_inv = tf.matrix_inverse(w)
_w = w_inv[tf.newaxis, tf.newaxis, ...]
z = tf.nn.conv2d(z, _w, [1, 1, 1, 1], 'SAME', data_format='NHWC')
logdet -= dlogdet
return z, logdet
示例2: _get_fldj_numerical
def _get_fldj_numerical(self, bijector, x, event_ndims,
eps=1.e-6,
input_to_vector=tfb.Identity,
output_to_vector=tfb.Identity):
"""Numerically approximate the forward log det Jacobian of a bijector.
Args:
bijector: the bijector whose Jacobian we wish to approximate
x: the value for which we want to approximate the Jacobian
event_ndims: number of dimensions in an event
eps: epsilon to add when forming (f(x+eps)-f(x)) / eps
input_to_vector: a bijector that maps the input value to a vector
output_to_vector: a bijector that maps the output value to a vector
Returns:
A numerical approximation to the log det Jacobian of bijector.forward
evaluated at x.
"""
x_vector = input_to_vector.forward(x)
n = tf.shape(x_vector)[-1]
x_plus_eps_vector = x_vector + eps * tf.eye(n, dtype=x_vector.dtype)
x_plus_eps = input_to_vector.inverse(x_plus_eps_vector)
f_x = bijector.forward(x)
f_x_vector = output_to_vector.forward(f_x)
f_x_plus_eps = bijector.forward(x_plus_eps)
f_x_plus_eps_vector = output_to_vector.forward(f_x_plus_eps)
jacobian_numerical = (f_x_plus_eps_vector - f_x_vector) / eps
return (
tf.log(tf.abs(tf.matrix_determinant(jacobian_numerical))) +
input_to_vector.forward_log_det_jacobian(x, event_ndims=event_ndims) -
output_to_vector.forward_log_det_jacobian(f_x, event_ndims=event_ndims))
示例3: test_logjac
def test_logjac(self):
"""
We have hand-crafted the log-jacobians for speed. Check they're correct
wrt a tensorflow derived version
"""
# there is no jacobian: loop manually
def jacobian(f):
return tf.pack([tf.gradients(f(self.x)[i], self.x)[0] for i in range(10)])
tf_jacs = [
tf.log(tf.matrix_determinant(jacobian(t.tf_forward)))
for t in self.transforms
if type(t) is not GPflow.transforms.LowerTriangular
]
hand_jacs = [
t.tf_log_jacobian(self.x) for t in self.transforms if type(t) is not GPflow.transforms.LowerTriangular
]
for j1, j2 in zip(tf_jacs, hand_jacs):
self.assertTrue(
np.allclose(
self.session.run(j1, feed_dict={self.x: self.x_np}),
self.session.run(j2, feed_dict={self.x: self.x_np}),
)
)
示例4: _det_large_enough_mask
def _det_large_enough_mask(x, det_bounds):
"""Returns whether the input matches the given determinant limit.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
det_bounds: A floating-point `Tensor` that must broadcast to shape
`[B1, ..., Bn]`, giving the desired lower bound on the
determinants in `x`.
Returns:
mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each
scalar is 1 if the corresponding matrix had determinant above
the corresponding bound, otherwise 0.
"""
# For the curious: I wonder whether it is possible and desirable to
# use a Cholesky decomposition-based algorithm for this, since the
# only matrices whose determinant this code cares about will be PSD.
# Didn't figure out how to code that in TensorFlow.
#
# Expert opinion is that it would be about twice as fast since
# Cholesky is roughly half the cost of Gaussian Elimination with
# Partial Pivoting. But this is less of an impact than the switch in
# _psd_mask.
return tf.cast(
tf.matrix_determinant(x) > det_bounds, dtype=x.dtype)
示例5: Test
def Test(self):
with self.test_session():
np.random.seed(1)
m = np.random.uniform(low=1.0,
high=100.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = tf.constant(m)
epsilon = np.finfo(dtype_).eps
# Optimal stepsize for central difference is O(epsilon^{1/3}).
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-3
if len(shape_) == 2:
c = tf.matrix_determinant(a)
else:
c = tf.batch_matrix_determinant(a)
out_shape = shape_[:-2] # last two dimensions hold matrices
theoretical, numerical = tf.test.compute_gradient(a,
shape_,
c,
out_shape,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
示例6: entropy
def entropy(self, mean=None, cov=1):
"""
Note entropy does not depend on its mean.
Arguments
----------
mean: tf.Tensor, optional
vector. Defaults to zero mean.
cov: tf.Tensor, optional
vector or matrix. Defaults to identity.
Returns
-------
tf.Tensor
scalar
"""
if cov == 1:
d = 1
det_cov = 1.0
else:
cov = tf.cast(cov, dtype=tf.float32)
d = get_dims(cov)[0]
if len(cov.get_shape()) == 1:
det_cov = tf.reduce_prod(cov)
else:
det_cov = tf.matrix_determinant(cov)
return 0.5 * (d + d*tf.log(2*np.pi) + tf.log(det_cov))
示例7: entropy
def entropy(self, mean=None, cov=1):
"""Entropy of probability distribution.
This is not vectorized with respect to any arguments.
Parameters
----------
mean : tf.Tensor, optional
A 1-D tensor. Defaults to zero mean.
cov : tf.Tensor, optional
A 1-D or 2-D tensor. Defaults to identity matrix.
Returns
-------
tf.Tensor
A tensor of one dimension less than the input.
"""
if cov is 1:
d = 1
det_cov = 1.0
else:
cov = tf.cast(cov, dtype=tf.float32)
d = get_dims(cov)[0]
if len(cov.get_shape()) == 1:
det_cov = tf.reduce_prod(cov)
else:
det_cov = tf.matrix_determinant(cov)
return 0.5 * (d + d*tf.log(2*np.pi) + tf.log(det_cov))
示例8: _compareDeterminant
def _compareDeterminant(self, matrix_x):
with self.test_session():
# Check the batch version, which should work for ndim >= 2
self._compareDeterminantBase(
matrix_x, tf.batch_matrix_determinant(matrix_x))
if matrix_x.ndim == 2:
# Check the simple version
self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x))
示例9: testBatchGradientUnknownSize
def testBatchGradientUnknownSize(self):
with self.test_session():
batch_size = tf.constant(3)
matrix_size = tf.constant(4)
batch_identity = tf.tile(tf.expand_dims(tf.diag(tf.ones([matrix_size])), 0), [batch_size, 1, 1])
determinants = tf.matrix_determinant(batch_identity)
reduced = tf.reduce_sum(determinants)
sum_grad = tf.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
示例10: test_determinants
def test_determinants(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
expected_det = tf.matrix_determinant(mat).eval()
self._compare_results(expected_det, operator.det())
self._compare_results(np.log(expected_det), operator.log_det())
示例11: test_det
def test_det(self):
with self.test_session() as sess:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
operator, mat, _ = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=False)
op_det = operator.determinant()
self.assertAllEqual(shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run([op_det, tf.matrix_determinant(mat)])
self.assertAllClose(op_det_v, mat_det_v)
示例12: test_det_dynamic
def test_det_dynamic(self):
with self.test_session() as sess:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=True)
op_det_v, mat_det_v = sess.run(
[operator.determinant(), tf.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAllClose(op_det_v, mat_det_v)
示例13: construct_loss_graph
def construct_loss_graph(self):
x = self.xp
y = self.yp
xs = x/self.ls
K, Ki = self.construct_covariance_graph(xs)
yT = tf.transpose(y)
Kiy = tf.matmul(Ki, y)
lK = tf.log(tf.matrix_determinant(K))
L = tf.matmul(yT, Kiy) + lK
ones = tf.ones(tf.pack([tf.shape(xs)[0]]), dtype=tf.float64)
L = L/tf.reduce_sum(ones) * 0.5
return L
示例14: logpdf
def logpdf(self, x, mean=None, cov=1):
"""
Parameters
----------
x : np.array or tf.Tensor
vector or matrix
mean : np.array or tf.Tensor, optional
vector. Defaults to zero mean.
cov : np.array or tf.Tensor, optional
vector or matrix. Defaults to identity.
"""
x = tf.cast(tf.convert_to_tensor(x), dtype=tf.float32)
x_shape = get_dims(x)
if len(x_shape) == 1:
d = x_shape[0]
else:
d = x_shape[1]
if mean is None:
r = x
else:
mean = tf.cast(tf.convert_to_tensor(mean), dtype=tf.float32)
r = x - mean
if cov is 1:
cov_inv = tf.diag(tf.ones([d]))
det_cov = tf.constant(1.0)
else:
cov = tf.cast(tf.convert_to_tensor(cov), dtype=tf.float32)
if len(cov.get_shape()) == 1: # vector
cov_inv = tf.diag(1.0 / cov)
det_cov = tf.reduce_prod(cov)
else: # matrix
cov_inv = tf.matrix_inverse(cov)
det_cov = tf.matrix_determinant(cov)
lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
if len(x_shape) == 1:
r = tf.reshape(r, shape=(d, 1))
lps -= 0.5 * tf.matmul(tf.matmul(r, cov_inv, transpose_a=True), r)
return tf.squeeze(lps)
else:
# TODO vectorize further
out = []
for r_vec in tf.unpack(r):
r_vec = tf.reshape(r_vec, shape=(d, 1))
out += [tf.squeeze(lps - 0.5 * tf.matmul(
tf.matmul(r_vec, cov_inv, transpose_a=True),
r_vec))]
return tf.pack(out)
"""
示例15: _compareDeterminant
def _compareDeterminant(self, matrix_x):
with self.test_session():
if matrix_x.ndim == 2:
tf_ans = tf.matrix_determinant(matrix_x)
else:
tf_ans = tf.batch_matrix_determinant(matrix_x)
out = tf_ans.eval()
shape = matrix_x.shape
if shape[-1] == 0 and shape[-2] == 0:
np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
else:
np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)