本文整理汇总了Python中tensorflow.python.ops.linalg_ops.cholesky方法的典型用法代码示例。如果您正苦于以下问题:Python linalg_ops.cholesky方法的具体用法?Python linalg_ops.cholesky怎么用?Python linalg_ops.cholesky使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.linalg_ops
的用法示例。
在下文中一共展示了linalg_ops.cholesky方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _define_full_covariance_probs
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
示例2: __init__
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def __init__(self, matrix, verify_pd=True, name="OperatorPDFull"):
"""Initialize an OperatorPDFull.
Args:
matrix: Shape `[N1,...,Nb, k, k]` tensor with `b >= 0`, `k >= 1`. The
last two dimensions should be `k x k` symmetric positive definite
matrices.
verify_pd: Whether to check that `matrix` is symmetric positive definite.
If `verify_pd` is `False`, correct behavior is not guaranteed.
name: A name to prepend to all ops created by this class.
"""
with ops.name_scope(name):
with ops.name_scope("init", values=[matrix]):
matrix = ops.convert_to_tensor(matrix)
# Check symmetric here. Positivity will be verified by checking the
# diagonal of the Cholesky factor inside the parent class. The Cholesky
# factorization linalg_ops.cholesky() does not always fail for non PSD
# matrices, so don't rely on that.
if verify_pd:
matrix = distribution_util.assert_symmetric(matrix)
chol = linalg_ops.cholesky(matrix)
super(OperatorPDFull, self).__init__(chol, verify_pd=verify_pd)
示例3: _chol_capacitance
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _chol_capacitance(self, batch_mode):
"""Cholesky factorization of the capacitance term."""
# Cholesky factor for (D^{-1} + V^T M^{-1} V), which is sometimes
# known as the "capacitance" matrix.
# We can do a Cholesky decomposition, since a priori M is a
# positive-definite Hermitian matrix, which causes the "capacitance" to
# also be positive-definite Hermitian, and thus have a Cholesky
# decomposition.
# self._operator will use batch if need be. Automatically. We cannot force
# that here.
# M^{-1} V
minv_v = self._operator.solve(self._v)
# V^T M^{-1} V
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
# D^{-1} + V^T M^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vt_minv_v)
# Cholesky[D^{-1} + V^T M^{-1} V]
return linalg_ops.cholesky(capacitance)
示例4: _chol_capacitance
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _chol_capacitance(self, batch_mode):
"""Cholesky factorization of the capacitance term."""
# Cholesky factor for (D^{-1} + V^T M^{-1} V), which is sometimes
# known as the "capacitance" matrix.
# self._operator will use batch if need be. Automatically. We cannot force
# that here.
# M^{-1} V
minv_v = self._operator.solve(self._v)
# V^T M^{-1} V
if batch_mode:
vt_minv_v = math_ops.batch_matmul(self._v, minv_v, adj_x=True)
else:
vt_minv_v = math_ops.matmul(self._v, minv_v, transpose_a=True)
# D^{-1} + V^T M^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vt_minv_v)
# Cholesky[D^{-1} + V^T M^{-1} V]
return linalg_ops.cholesky(capacitance)
示例5: _inverse
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _inverse(self, y):
return (math_ops.sqrt(y) if self._static_event_ndims == 0
else linalg_ops.cholesky(y))
示例6: _variance
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
示例7: _can_use_cholesky
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _can_use_cholesky(self):
# TODO(langmore) Add complex types when tf.cholesky can use them.
return (not self.dtype.is_complex and self.is_self_adjoint and
self.is_positive_definite)
示例8: _get_cached_chol
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _get_cached_chol(self):
if not hasattr(self, "_cached_chol"):
self._cached_chol = linalg_ops.cholesky(self._get_cached_dense_matrix())
return self._cached_chol
示例9: _inverse_and_inverse_log_det_jacobian
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _inverse_and_inverse_log_det_jacobian(self, y):
x = (math_ops.sqrt(y) if self._static_event_ndims == 0
else linalg_ops.cholesky(y))
return x, -self._forward_log_det_jacobian(x)
示例10: _variance
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
示例11: _std
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def _std(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
示例12: posdef_inv_cholesky
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def posdef_inv_cholesky(tensor, identity, damping):
"""Computes inverse(tensor + damping * identity) with Cholesky."""
chol = linalg_ops.cholesky(tensor + damping * identity)
return linalg_ops.cholesky_solve(chol, identity)
示例13: __init__
# 需要导入模块: from tensorflow.python.ops import linalg_ops [as 别名]
# 或者: from tensorflow.python.ops.linalg_ops import cholesky [as 别名]
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name="LinearOperatorMatrix"):
"""Initialize a `LinearOperatorMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float32`, `float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = self._matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
# Special treatment for (real) Symmetric Positive Definite.
self._is_spd = (
(not dtype.is_complex) and is_self_adjoint and is_positive_definite)
if self._is_spd:
self._chol = linalg_ops.cholesky(self._matrix)
super(LinearOperatorMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)