本文整理汇总了Python中scipy.linalg.cholesky函数的典型用法代码示例。如果您正苦于以下问题:Python cholesky函数的具体用法?Python cholesky怎么用?Python cholesky使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cholesky函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: balreal
def balreal(sys):
"""Computes the balanced realization of sys and returns its eigenvalues.
References:
[1] http://www.mathworks.com/help/control/ref/balreal.html
[2] Laub, A.J., M.T. Heath, C.C. Paige, and R.C. Ward, "Computation of
System Balancing Transformations and Other Applications of
Simultaneous Diagonalization Algorithms," *IEEE Trans. Automatic
Control*, AC-32 (1987), pp. 115-122.
"""
sys = LinearSystem(sys) # cast first to memoize sys2ss
if not sys.analog:
raise NotImplementedError("balanced digital filters not supported")
R = control_gram(sys)
O = observe_gram(sys)
LR = cholesky(R, lower=True)
LO = cholesky(O, lower=True)
U, S, V = svd(np.dot(LO.T, LR))
T = np.dot(LR, V.T) * S ** (-1. / 2)
Tinv = (S ** (-1. / 2))[:, None] * np.dot(U.T, LO.T)
return similarity_transform(sys, T, Tinv), S
示例2: log_mvnpdf
def log_mvnpdf(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T,
lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例3: update
def update(self, ncov):
"""updates the covariance matrix and recalculates internals
:Parameters:
ncov : ndarray
symetric matrix, noise covariance
"""
# checks
if ncov.ndim != 2 or ncov.shape[0] != ncov.shape[1]:
raise ValueError('noise covariance is not a symmetric, '
'pos. definite matrix')
# inits
self.input_dim = ncov.shape[0]
self._ncov = ncov
self._chol_ncov = None
self._inv_chol_ncov = None
# compute cholesky decomposition
try:
self._chol_ncov = sp_la.cholesky(self._ncov)
except:
self._ncov = coloured_loading(self._ncov, 50)
self._chol_ncov = sp_la.cholesky(self._ncov)
# invert
self._inv_chol_ncov = sp_la.inv(self._chol_ncov)
# set ready flag
self._is_ready = True
示例4: __init__
def __init__(self, mean, sigma):
self.mean = mean
self.sigma = sigma
self.sigmainv = sigmainv
self.cholsigma = linalg.cholesky(sigma)
# the following makes it lower triangular with increasing time
self.cholsigmainv = linalg.cholesky(sigmainv)[::-1, ::-1]
示例5: LMLdebug
def LMLdebug(self):
"""
LML function for debug
"""
assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000'
y = SP.reshape(self.Y,(self.N*self.P), order='F')
V = SP.kron(SP.eye(self.P),self.F)
XX = SP.dot(self.Xr,self.Xr.T)
K = SP.kron(self.Cr.K(),XX)
K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))
# inverse of K
cholK = LA.cholesky(K)
Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P))
# Areml and inverse
Areml = SP.dot(V.T,SP.dot(Ki,V))
cholAreml = LA.cholesky(Areml)
Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P))
# effect sizes and z
b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y)))
z = y-SP.dot(V,b)
Kiz = SP.dot(Ki,z)
# lml
lml = y.shape[0]*SP.log(2*SP.pi)
lml += 2*SP.log(SP.diag(cholK)).sum()
lml += 2*SP.log(SP.diag(cholAreml)).sum()
lml += SP.dot(z,Kiz)
lml *= 0.5
return lml
示例6: fitPairwiseModel
def fitPairwiseModel(Y,XX=None,S_XX=None,U_XX=None,verbose=False):
N,P = Y.shape
""" initilizes parameters """
RV = fitSingleTraitModel(Y,XX=XX,S_XX=S_XX,U_XX=U_XX,verbose=verbose)
Cg = covariance.freeform(2)
Cn = covariance.freeform(2)
gp = gp2kronSum(mean(Y[:,0:2]),Cg,Cn,XX=XX,S_XX=S_XX,U_XX=U_XX)
conv2 = SP.ones((P,P),dtype=bool)
rho_g = SP.ones((P,P))
rho_n = SP.ones((P,P))
for p1 in range(P):
for p2 in range(p1):
if verbose:
print '.. fitting correlation (%d,%d)'%(p1,p2)
gp.setY(Y[:,[p1,p2]])
Cg_params0 = SP.array([SP.sqrt(RV['varST'][p1,0]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,0])])
Cn_params0 = SP.array([SP.sqrt(RV['varST'][p1,1]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,1])])
params0 = {'Cg':Cg_params0,'Cn':Cn_params0}
conv2[p1,p2],info = OPT.opt_hyper(gp,params0,factr=1e3)
rho_g[p1,p2] = Cg.K()[0,1]/SP.sqrt(Cg.K().diagonal().prod())
rho_n[p1,p2] = Cn.K()[0,1]/SP.sqrt(Cn.K().diagonal().prod())
conv2[p2,p1] = conv2[p1,p2]; rho_g[p2,p1] = rho_g[p1,p2]; rho_n[p2,p1] = rho_n[p1,p2]
RV['Cg0'] = rho_g*SP.dot(SP.sqrt(RV['varST'][:,0:1]),SP.sqrt(RV['varST'][:,0:1].T))
RV['Cn0'] = rho_n*SP.dot(SP.sqrt(RV['varST'][:,1:2]),SP.sqrt(RV['varST'][:,1:2].T))
RV['conv2'] = conv2
#3. regularizes covariance matrices
offset_g = abs(SP.minimum(LA.eigh(RV['Cg0'])[0].min(),0))+1e-4
offset_n = abs(SP.minimum(LA.eigh(RV['Cn0'])[0].min(),0))+1e-4
RV['Cg0_reg'] = RV['Cg0']+offset_g*SP.eye(P)
RV['Cn0_reg'] = RV['Cn0']+offset_n*SP.eye(P)
RV['params0_Cg']=LA.cholesky(RV['Cg0_reg'])[SP.tril_indices(P)]
RV['params0_Cn']=LA.cholesky(RV['Cn0_reg'])[SP.tril_indices(P)]
return RV
示例7: simulate_lu_decom
def simulate_lu_decom(sim_locations,sample_locations,vmodel):
c11 = fill_cova(sample_locations,None,vmodel)
c21 = fill_cova(sim_locations,sample_locations,vmodel)
c22 = fill_cova(sim_locations,None,vmodel)
u11 = cholesky(c11)
l11 = u11.T
u11_inv = inv(u11)
l21 = c21 @ u11_inv
u12 = l21.T
l22 = cholesky([email protected],lower=True)
return u11_inv.T,l21,l22
l11,u11 = lu(c11,permute_l= True)
l11_inv = inv(l11)
a21t = l11_inv @ c21.T
a21 = a21t.T
b12 = a21t
l22,u22 = lu([email protected],permute_l= True)
return a21,l11_inv,l22
示例8: genBrownianMotion
def genBrownianMotion (n, tMax=10.0):
tSeq = np.arange (tMax/float(500),
tMax*(1+1/float(500)), tMax/float(500));
sig = np.zeros ((500,500), dtype='float64');
for i in range (500):
sig[i,0:i] = tSeq[0:i];
sig[i,i:] = tSeq[i];
sigSqrt = LA.cholesky (sig, lower=True);
for j in xrange(n/500):
z = np.dot (sigSqrt, nr.randn (500));
if j == 0:
zTot = np.insert (z, 0, 0);
else:
z = z + zTot[-1]
zTot = np.append(zTot, z)
m = n % 500 - 1
tSeq = np.arange (tMax/float(m),
tMax*(1+1/float(m)), tMax/float(m));
sig = np.zeros ((m,m), dtype='float64');
for i in range (m):
sig[i,0:i] = tSeq[0:i];
sig[i,i:] = tSeq[i];
print(sig)
sigSqrt = LA.cholesky (sig, lower=True);
z = np.dot (sigSqrt, nr.randn (m));
z = z + zTot[-1]
zTot = np.append(zTot, z)
return zTot
示例9: test_check_finite
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
c = cholesky(a, check_finite=False)
assert_array_almost_equal(dot(transpose(c), c), a)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c)
示例10: log_multivariate_normal_density
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices. """
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
示例11: choleskyjitter
def choleskyjitter(A, overwrite_a = False, check_finite = True):
"""Add jitter stochastically until a positive definite matrix occurs"""
# Avoid preparing for jittering if we can already find the cholesky
# with no problem
try:
return la.cholesky(A, lower = True, overwrite_a = overwrite_a,
check_finite = check_finite)
except Exception:
pass
# Prepare for jittering (all the magic numbers here are arbitary...)
n = A.shape[0]
maxscale = 1e10
minscale = 1e-4
scale = minscale
# Keep jittering stochastically, increasing the jitter magnitude along
# the way, until it's all good
while scale < maxscale:
try:
jitA = scale * np.diag(np.random.rand(n))
L = la.cholesky(A + jitA, lower = True, overwrite_a = overwrite_a,
check_finite = check_finite)
return L
except la.LinAlgError:
scale *= 1.01
log.warning('Jitter added stochastically. Scale: %f!' % scale)
raise la.LinAlgError("Max value of jitter reached")
示例12: isposdef
def isposdef(X):
"Return if matrix is positive definite. Relies on cholesky decomp"
try:
la.cholesky(X) # will raise LinAlgError if not positive def
return True
except la.LinAlgError:
return False
示例13: contour_2d
def contour_2d(mu, cov=None, prec=None,
n=100, radius=[1, np.sqrt(6)]):
"""
Assuming a bivariate normal
distribution, draw contours,
given 'radius' information.
Note:
sqrt(6) covers roughly 95% of probability
mass (in 2d), given the fact that the mahalanobis
distribution is chi-squared distributed.
"""
mu = mu.reshape(2,1)
t = np.linspace(0, 2*np.pi, n)
circle = np.array([np.cos(t), np.sin(t)])
if prec is None:
L = la.cholesky(cov)
ellipse = np.dot(L, circle)
else:
L = la.cholesky(prec)
ellipse = la.solve_triangular(L, circle)
# FIXME: not correct yet
plots = {}
for r in radius:
plots[r] = (r*ellipse[0,:] + mu[0], r*ellipse[1,:] + mu[1])
return plots
示例14: _initialize
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
示例15: jitchol_old
def jitchol_old(A, maxtries=5):
"""
:param A: An almost pd square matrix
:rval L: the Cholesky decomposition of A
.. note:
Adds jitter to K, to enforce positive-definiteness
if stuff breaks, please check:
np.allclose(sp.linalg.cholesky(XXT, lower = True), np.triu(sp.linalg.cho_factor(XXT)[0]).T)
"""
try:
return linalg.cholesky(A, lower=True)
except linalg.LinAlgError:
diagA = np.diag(A)
if np.any(diagA < 0.):
raise linalg.LinAlgError, "not pd: negative diagonal elements"
jitter = diagA.mean() * 1e-6
for i in range(1, maxtries + 1):
print '\rWarning: adding jitter of {:.10e} '.format(jitter),
try:
return linalg.cholesky(A + np.eye(A.shape[0]).T * jitter, lower=True)
except:
jitter *= 10
raise linalg.LinAlgError, "not positive definite, even with jitter."