本文整理汇总了Python中scipy.linalg.cholesky方法的典型用法代码示例。如果您正苦于以下问题:Python linalg.cholesky方法的具体用法?Python linalg.cholesky怎么用?Python linalg.cholesky使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.linalg
的用法示例。
在下文中一共展示了linalg.cholesky方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: linear_hotelling_test
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def linear_hotelling_test(X, Y, reg=0):
n, p = X.shape
Z = X - Y
Z_bar = Z.mean(axis=0)
Z -= Z_bar
S = Z.T.dot(Z)
S /= (n - 1)
if reg:
S[::p + 1] += reg
# z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
L = linalg.cholesky(S, lower=True, overwrite_a=True)
Linv_Z_bar = linalg.solve_triangular(L, Z_bar, lower=True, overwrite_b=True)
stat = n * Linv_Z_bar.dot(Linv_Z_bar)
p_val = stats.chi2.sf(stat, p)
return p_val, stat
示例2: cho_log_det
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def cho_log_det(L):
"""
Compute the log of the determinant of :math:`A`, given its (upper or lower)
Cholesky factorization :math:`LL^T`.
Parameters
----------
L: ndarray
an upper or lower Cholesky factor
Examples
--------
>>> A = np.array([[ 2, -1, 0],
... [-1, 2, -1],
... [ 0, -1, 2]])
>>> Lt = cholesky(A)
>>> np.isclose(cho_log_det(Lt), np.log(np.linalg.det(A)))
True
>>> L = cholesky(A, lower=True)
>>> np.isclose(cho_log_det(L), np.log(np.linalg.det(A)))
True
"""
return 2 * np.sum(np.log(L.diagonal()))
示例3: _b_orthonormalize
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = np.dot(blockVectorV.T, blockVectorBV)
gramVBV = cholesky(gramVBV)
gramVBV = inv(gramVBV, overwrite_a=True)
# gramVBV is now R^{-1}.
blockVectorV = np.dot(blockVectorV, gramVBV)
if B is not None:
blockVectorBV = np.dot(blockVectorBV, gramVBV)
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
示例4: log_multivariate_normal_density
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices. """
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例5: b_orthonormalize
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def b_orthonormalize(B, blockVectorV,
blockVectorBV=None, retInvR=False):
"""Internal."""
import scipy.linalg as sla
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = sp.dot(blockVectorV.T, blockVectorBV)
gramVBV = sla.cholesky(gramVBV)
gramVBV = sla.inv(gramVBV, overwrite_a=True)
# gramVBV is now R^{-1}.
blockVectorV = sp.dot(blockVectorV, gramVBV)
if B is not None:
blockVectorBV = sp.dot(blockVectorBV, gramVBV)
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
示例6: _log_multivariate_normal_density_full
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例7: ols_cholesky
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def ols_cholesky(A, b):
"""
Ordinary Least-Squares Regression Coefficients
Estimation.
If (A.T @ A) @ x = A.T @ b and A is full rank
then there exists an upper triangular matrix
R such that:
(R.T @ R) @ x = A.T @ b
R.T @ w = A.T @ b
R @ x = w
Find R using Cholesky decomposition.
"""
R = cholesky(A.T @ A)
w = solve_triangular(R, A.T @ b, trans='T')
return solve_triangular(R, w)
示例8: predict
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def predict(self, a_hist, t):
"""
This function implements the prediction formula discussed is section 6 (1.59)
It takes a realization for a^N, and the period in which the prediciton is formed
Output: E[abar | a_t, a_{t-1}, ..., a_1, a_0]
"""
N = np.asarray(a_hist).shape[0] - 1
a_hist = np.asarray(a_hist).reshape(N + 1, 1)
V = self.construct_V(N + 1)
aux_matrix = np.zeros((N + 1, N + 1))
aux_matrix[:(t + 1), :(t + 1)] = np.eye(t + 1)
L = la.cholesky(V).T
Ea_hist = la.inv(L) @ aux_matrix @ L @ a_hist
return Ea_hist
示例9: _b_orthonormalize
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV)
gramVBV = cholesky(gramVBV)
gramVBV = inv(gramVBV, overwrite_a=True)
# gramVBV is now R^{-1}.
blockVectorV = np.dot(blockVectorV, gramVBV)
if B is not None:
blockVectorBV = np.dot(blockVectorBV, gramVBV)
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
示例10: _log_multivariate_normal_density_full
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
'''Log probability for full covariance matrices.
'''
n_samples, n_dimensions = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dimensions),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dimensions * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例11: log_determinant
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def log_determinant(A, inverse=False):
"""
Calculates the natural logarithm of a determinant of the given matrix '
according to the properties of a triangular matrix.
Parameters
----------
A : n x n :class:`numpy.ndarray`
inverse : boolean
If true calculates the log determinant of the inverse of the colesky
decomposition, which is equvalent to taking the determinant of the
inverse of the matrix.
L.T* L = R inverse=False
L-1*(L-1)T = R-1 inverse=True
Returns
-------
float logarithm of the determinant of the input Matrix A
"""
cholesky = linalg.cholesky(A, lower=True)
if inverse:
cholesky = num.linalg.inv(cholesky)
return num.log(num.diag(cholesky)).sum() * 2.
示例12: pred_constraint_voilation
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def pred_constraint_voilation(self, cand, comp, vals):
# The primary covariances for prediction.
comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp)
cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp,
cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), self.ff)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
func_m = sps.norm.cdf(func_m*self.constraint_gain)
return func_m
# Compute EI over hyperparameter samples
示例13: sample_constraint_hypers
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def sample_constraint_hypers(self, comp, labels):
# The latent GP projection
# The latent GP projection
if (self.ff is None or self.ff.shape[0] < comp.shape[0]):
self.ff_samples = []
comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp)
obsv_cov = comp_cov + 1e-6*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
self.ff = np.dot(obsv_chol,npr.randn(obsv_chol.shape[0]))
self._sample_constraint_noisy(comp, labels)
self._sample_constraint_ls(comp, labels)
self.constraint_hyper_samples.append((self.constraint_mean,
self.constraint_gain,
self.constraint_amp2,
self.constraint_ls))
self.ff_samples.append(self.ff)
示例14: load_2d_hard
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def load_2d_hard():
"""
Returns non-isotropoic data to motivate the use of non-euclidean norms (as
well as the ground truth).
"""
centres = np.array([[3., -1.], [-2., 1.], [2., 5.]])
covs = []
covs.append(np.array([[4., 2.], [2., 1.5]]))
covs.append(np.array([[1, -1.5], [-1.5, 3.]]))
covs.append(np.array([[1., 0.], [0., 1.]]))
N = [1000, 500, 300]
X = [np.random.randn(n, 2).dot(la.cholesky(c, lower=True)) + m
for n, m, c in zip(N, centres, covs)]
X = np.vstack(X)
labels = np.concatenate((np.zeros(N[0]), np.ones(N[1]), 2*np.ones(N[2])))
return X, labels
示例15: first_fit
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import cholesky [as 别名]
def first_fit(self, train_x, train_y):
""" Fit the regressor for the first time. """
train_x, train_y = np.array(train_x), np.array(train_y)
self._x = np.copy(train_x)
self._y = np.copy(train_y)
self._distance_matrix = edit_distance_matrix(self._x)
k_matrix = bourgain_embedding_matrix(self._distance_matrix)
k_matrix[np.diag_indices_from(k_matrix)] += self.alpha
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
self._alpha_vector = cho_solve(
(self._l_matrix, True), self._y) # Line 3
self._first_fitted = True
return self