本文整理汇总了Python中tensorflow.matrix_determinant方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.matrix_determinant方法的具体用法?Python tensorflow.matrix_determinant怎么用?Python tensorflow.matrix_determinant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.matrix_determinant方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: internal_novelty_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def internal_novelty_loss(grams, mul=1.0):
gram = keras.layers.Concatenate(axis=0)(grams)
# gram will be something like (5, 64, 64)
flat = keras.layers.Flatten()(gram)
flat = PrintLayerShape("flat shape")(flat)
# ~ (5, 4096)
covar = Lambda(lambda x: K.dot(x,K.transpose(x)),
output_shape = lambda input_shape: [input_shape[0], input_shape[0]])(flat)
covar = PrintLayer("covar")(covar)
# ~ (5, 5)
#det = Lambda(lambda x: -tf.matrix_determinant(x),
#output_shape = lambda input_shape: [1])(covar)
#det = Lambda(lambda x: -2*tf.reduce_sum(tf.log(tf.diag(tf.cholesky(x)))),
#output_shape = lambda input_shape: [1])(covar)
def eye_diff(x):
shape = K.shape(x)
return x - mul * tf.eye(shape[0], shape[1])
det = Lambda(lambda x: K.sum(K.square(eye_diff(x))),
output_shape = lambda input_shape: [1])(covar)
det = PrintLayer("det")(det)
return det
示例2: dpp_style
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def dpp_style(self, submethod):
"""Computes the DPP of a matrix."""
det_entries = []
if submethod == "inverse_dist":
for i in range(self.total_CFs):
for j in range(self.total_CFs):
det_temp_entry = tf.divide(1.0, tf.add(
1.0, self.compute_dist(self.cfs_frozen[i], self.cfs_frozen[j])))
if i == j:
det_temp_entry = tf.add(det_temp_entry, 0.0001)
det_entries.append(det_temp_entry)
elif submethod == "exponential_dist":
for i in range(self.total_CFs):
for j in range(self.total_CFs):
det_temp_entry = tf.divide(1.0, tf.exp(
self.compute_dist(self.cfs_frozen[i], self.cfs_frozen[j])))
det_entries.append(det_temp_entry)
det_entries = tf.reshape(det_entries, [self.total_CFs, self.total_CFs])
diversity_loss = tf.matrix_determinant(det_entries)
return diversity_loss
示例3: test_MatrixDeterminant
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def test_MatrixDeterminant(self):
t = tf.matrix_determinant(self.random(2, 3, 4, 3, 3))
self.check(t)
示例4: get_log_det_jacobian
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def get_log_det_jacobian(self, o):
J = self.compute_jacobian(o)
def step(J_i):
return tf.log(tf.abs(tf.matrix_determinant(J_i)))
return tf.map_fn(step, J)
示例5: testBatchGradientUnknownSize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def testBatchGradientUnknownSize(self):
with self.test_session():
batch_size = tf.constant(3)
matrix_size = tf.constant(4)
batch_identity = tf.tile(
tf.expand_dims(
tf.diag(tf.ones([matrix_size])), 0), [batch_size, 1, 1])
determinants = tf.matrix_determinant(batch_identity)
reduced = tf.reduce_sum(determinants)
sum_grad = tf.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
示例6: _compareDeterminant
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def _compareDeterminant(self, matrix_x):
with self.test_session():
self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x))
示例7: testNonSquareMatrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def testNonSquareMatrix(self):
# When the determinant of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
tf.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
示例8: testWrongDimensions
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor.
tensor1 = tf.constant([1., 2.])
with self.assertRaises(ValueError):
tf.matrix_determinant(tensor1)
示例9: testDeterminants
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def testDeterminants(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
expected_det = tf.matrix_determinant(mat).eval()
self._compare_results(expected_det, operator.det())
self._compare_results(np.log(expected_det), operator.log_det())
示例10: novelty_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def novelty_loss(grams, mul=1.0):
dets = []
for gram in grams:
# gram will be something like (5, 64, 64)
flat = keras.layers.Flatten()(gram)
# ~ (5, 4096)
covar = Lambda(lambda x: K.dot(x,K.transpose(x)),
output_shape = lambda input_shape: [input_shape[0], input_shape[0]])(flat)
covar = PrintLayer("covar")(covar)
# ~ (5, 5)
#det = Lambda(lambda x: -tf.matrix_determinant(x),
#output_shape = lambda input_shape: [1])(covar)
#det = Lambda(lambda x: -2*tf.reduce_sum(tf.log(tf.diag(tf.cholesky(x)))),
#output_shape = lambda input_shape: [1])(covar)
def eye_diff(x):
shape = K.shape(x)
return x - mul * tf.eye(shape[0], shape[1])
det = Lambda(lambda x: K.sum(K.square(eye_diff(x))),
output_shape = lambda input_shape: [1])(covar)
det = PrintLayer("det")(det)
dets.append(det)
if len(dets) > 1:
return keras.layers.add(dets)
else:
return dets[0]
示例11: _calc_component_density
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def _calc_component_density(z, phi, mu, sigma):
sig_inv = tf.matrix_inverse(sigma)
sig_sqrt_det = K.sqrt(tf.matrix_determinant(2 * np.pi * sigma) + K.epsilon())
density = phi * (K.exp(-0.5 * K.sum(K.dot(z - mu, sig_inv) * (z - mu),
axis=-1,
keepdims=True)) / sig_sqrt_det) + K.epsilon()
return density
示例12: estimate_rotation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import matrix_determinant [as 别名]
def estimate_rotation(xyz0, xyz1, pconf, noise):
"""Estimates the rotation between two sets of keypoints.
The rotation is estimated by first subtracting mean from each set of keypoints
and computing SVD of the covariance matrix.
Args:
xyz0: [batch, num_kp, 3] The first set of keypoints.
xyz1: [batch, num_kp, 3] The second set of keypoints.
pconf: [batch, num_kp] The weights used to compute the rotation estimate.
noise: A number indicating the noise added to the keypoints.
Returns:
[batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
"""
xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)
pconf2 = tf.expand_dims(pconf, 2)
cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)
x = xyz0 - cen0
y = xyz1 - cen1
cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
_, u, v = tf.svd(cov, full_matrices=True)
d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
ud = tf.concat(
[u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
axis=2)
return tf.matmul(ud, v, transpose_b=True)