本文整理汇总了Python中scipy.linalg.cho_solve函数的典型用法代码示例。如果您正苦于以下问题:Python cho_solve函数的具体用法?Python cho_solve怎么用?Python cho_solve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cho_solve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cho_solve_ATAI
def cho_solve_ATAI(A, rho, b, c, lwr, check_finite=True):
r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.cho_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
c : array_like
Matrix containing lower or upper triangular Cholesky factor,
as returned by :func:`scipy.linalg.cho_factor`
lwr : bool
Flag indicating whether the factor is lower or upper triangular
Returns
-------
x : ndarray
Solution to the linear system
"""
N, M = A.shape
if N >= M:
x = linalg.cho_solve((c, lwr), b, check_finite=check_finite)
else:
x = (b - A.T.dot(linalg.cho_solve((c, lwr), A.dot(b),
check_finite=check_finite))) / rho
return x
示例2: LMLdebug
def LMLdebug(self):
"""
LML function for debug
"""
assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000'
y = SP.reshape(self.Y,(self.N*self.P), order='F')
V = SP.kron(SP.eye(self.P),self.F)
XX = SP.dot(self.Xr,self.Xr.T)
K = SP.kron(self.Cr.K(),XX)
K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))
# inverse of K
cholK = LA.cholesky(K)
Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P))
# Areml and inverse
Areml = SP.dot(V.T,SP.dot(Ki,V))
cholAreml = LA.cholesky(Areml)
Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P))
# effect sizes and z
b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y)))
z = y-SP.dot(V,b)
Kiz = SP.dot(Ki,z)
# lml
lml = y.shape[0]*SP.log(2*SP.pi)
lml += 2*SP.log(SP.diag(cholK)).sum()
lml += 2*SP.log(SP.diag(cholAreml)).sum()
lml += SP.dot(z,Kiz)
lml *= 0.5
return lml
示例3: compute_logprod_derivative
def compute_logprod_derivative(Alup, dA, B, dB):
""" I = logdet(A)+Tr(inv(A)*B)
dI/dx = Tr(inv(A)*(dA - dA*inv(A)*B + dB) """
tmp = lalg.cho_solve(Alup, B, check_finite=False)
tmp2 = dA + dB - dA.dot(tmp)
return np.trace(lalg.cho_solve(Alup, tmp2, check_finite=False))
示例4: loglik_full
def loglik_full(self, l_a, l_rho, Agw, gammagw):
"""
Given all these parameters, calculate the full likelihood
@param l_a: List of Fourier coefficient arrays for all pulsars
@param l_rho: List of arrays of log10(PSD) amplitudes for all pulsars
@param Agw: log10(GW amplitude)
@param gammagw: GWB spectral index
@return: Log-likelihood
"""
# Transform the GWB parameters to PSD coefficients (pc)
pc_gw = self.gwPSD(Agw, gammagw)
rv = 0.0
for ii, freq in enumerate(self.freqs):
a_cos = l_a[:,2*ii] # Cosine modes for f=freq
a_sin = l_a[:,2*ii+1] # Sine modes for f=freq
rho = l_rho[:,ii] # PSD amp for f=freq
# Covariance matrix is the same for sine and cosine modes
cov = np.diag(10**rho) + self.hdmat * pc_gw[ii]
cf = sl.cho_factor(cov)
logdet = 2*np.sum(np.log(np.diag(cf[0])))
# Add the log-likelihood for the cosine and the sine modes
rv += -0.5 * np.dot(a_cos, sl.cho_solve(cf, a_cos)) - \
0.5 * np.dot(a_sin, sl.cho_solve(cf, a_sin)) - \
2*self.Npsr*np.log(2*np.pi) - logdet
return rv
示例5: get_covariances
def get_covariances(self,hyperparams):
"""
INPUT:
hyperparams: dictionary
OUTPUT: dictionary with the fields
K: kernel
Kinv: inverse of the kernel
L: chol(K)
alpha: solve(K,y)
W: D*Kinv * alpha*alpha^T
"""
if self._is_cached(hyperparams):
return self._covar_cache
K = self.covar.K(hyperparams['covar'])
if self.likelihood is not None:
Knoise = self.likelihood.K(hyperparams['lik'],self.n)
K += Knoise
L = LA.cholesky(K).T# lower triangular
alpha = LA.cho_solve((L,True),self.Y)
Kinv = LA.cho_solve((L,True),SP.eye(L.shape[0]))
W = self.t*Kinv - SP.dot(alpha,alpha.T)
self._covar_cache = {}
self._covar_cache['K'] = K
self._covar_cache['Kinv'] = Kinv
self._covar_cache['L'] = L
self._covar_cache['alpha'] = alpha
self._covar_cache['W'] = W
self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
return self._covar_cache
示例6: finalize
def finalize(self):
"""Finalize the fit, utilizing the already inverted Sigma matrix"""
# Calculate the prediction quantities
if len(self.dtp) > 0:
self.MNt_n = np.dot(self.Mp_n.T, sl.cho_solve(self.Np_cf, self.dtp))
dtNdt = np.dot(self.dtp, sl.cho_solve(self.Np_cf, self.dtp))
else:
self.MNt_n = np.zeros(self.Mp_n.shape[1])
dtNdt = 0.0
self.dpars_n = np.dot(self.Sigma_n, self.MNt_n + self.phipar_n)
# TODO: should use dpars, instead of MNt below here???
self.rp = np.dot(self.Mtot_n, np.dot(self.Sigma_n, self.MNt_n)) # Should be approx~0.0
self.rr = np.dot(self.Mtot_n, np.dot(self.Sigma_n, self.Mtot_n.T))
# Calculate the log-likelihood
logdetN2 = np.sum(np.log(np.diag(self.Np_cf[0])))
logdetphi2 = 0.5*np.sum(np.log(self.Phivec_n))
chi2dt = 0.5*dtNdt
chi2phi = 0.5*np.sum(self.prpars_delta_n**2/self.Phivec_n)
chi2phi1 = 0.5*np.dot(self.dpars_n, np.dot(self.Sigma_inv_n, self.dpars_n))
chi2_active = 0.5*np.dot(self.dpars_n, np.dot(self.Sigma_inv_n, self.dpars_n))
# NOTE: chi2_active is zero _if_ we move to ML solution. We are dpars
# away from there. That's why we subtract it from loglik.
# Note also that, now, chi2phi1 and chi2_active are the same in
# this rescaling
self.loglik = -logdetN2-logdetphi2-chi2dt-chi2phi+chi2phi1-chi2_active
self.loglik_ml = -logdetN2-logdetphi2-chi2dt-chi2phi+chi2phi1
示例7: _update_cache
def _update_cache(self):
"""
INPUT:
hyperparams: dictionary
OUTPUT: dictionary with the fields
K: kernel
Kinv: inverse of the kernel
L: chol(K)
alpha: solve(K,y)
W: D*Kinv * alpha*alpha^T
"""
cov_params_have_changed = self.covar.params_have_changed
if cov_params_have_changed or self.Y_has_changed:
K = self.covar.K()
L = LA.cholesky(K).T# lower triangular
Kinv = LA.cho_solve((L,True),SP.eye(L.shape[0]))
alpha = LA.cho_solve((L,True),self.Y)
W = self.t*Kinv - SP.dot(alpha,alpha.T)
self._covar_cache = {}
self._covar_cache['K'] = K
self._covar_cache['Kinv'] = Kinv
self._covar_cache['L'] = L
self._covar_cache['alpha'] = alpha
self._covar_cache['W'] = W
return self._covar_cache
示例8: predict
def predict(self, y, t):
"""
Compute the conditional predictive distribution of the model.
:param y: ``(nsamples,)``
The observations to condition the model on.
:param t: ``(ntest,)`` or ``(ntest, ndim)``
The coordinates where the predictive distribution should be
computed.
Returns a tuple ``(mu, cov)`` where
* **mu** ``(ntest,)`` is the mean of the predictive distribution, and
* **cov** ``(ntest, ntest)`` is the predictive covariance.
"""
self.recompute()
r = self._check_dimensions(y)[self.inds] - self.mean(self._x)
xs, i = self.parse_samples(t, False)
alpha = cho_solve(self._factor, r)
# Compute the predictive mean.
Kxs = self.kernel(self._x[None, :], xs[:, None])
mu = np.dot(Kxs, alpha) + self.mean(xs)
# Compute the predictive covariance.
cov = self.kernel(xs[:, None], xs[None, :])
cov -= np.dot(Kxs, cho_solve(self._factor, Kxs.T))
return mu, cov
示例9: _calculate_log_likelihood
def _calculate_log_likelihood(self):
#if self.m == None:
# Give error message
R = zeros((self.n, self.n))
X,Y = array(self.X), array(self.Y)
thetas = 10.**self.thetas
for i in range(self.n):
for j in arange(i+1,self.n):
R[i,j] = (1-self.nugget)*e**(-sum(thetas*(X[i]-X[j])**2.)) #weighted distance formula
R = R + R.T + eye(self.n)
self.R = R
one = ones(self.n)
try:
self.R_fact = cho_factor(R)
rhs = vstack([Y, one]).T
R_fact = (self.R_fact[0].T,not self.R_fact[1])
cho = cho_solve(R_fact, rhs).T
self.mu = dot(one,cho[0])/dot(one,cho[1])
self.sig2 = dot(Y-dot(one,self.mu),cho_solve(self.R_fact,(Y-dot(one,self.mu))))/self.n
#self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))-sum(thetas)
self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
except (linalg.LinAlgError,ValueError):
#------LSTSQ---------
self.R_fact = None #reset this to none, so we know not to use cholesky
#self.R = self.R+diag([10e-6]*self.n) #improve conditioning[Booker et al., 1999]
rhs = vstack([Y, one]).T
lsq = lstsq(self.R.T,rhs)[0].T
self.mu = dot(one,lsq[0])/dot(one,lsq[1])
self.sig2 = dot(Y-dot(one,self.mu),lstsq(self.R,Y-dot(one,self.mu))[0])/self.n
self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
示例10: grad_nlogprob
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in xrange(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
示例11: elbo
def elbo(params, mask, *args):
"""ELBO with full posterior covariance matrix"""
t, mu, post_cov = args
K, dK = kernel(t, params)
dK *= mask[np.newaxis, np.newaxis, :]
try:
L = cholesky(K, lower=True)
except LinAlgError:
return -np.inf, np.zeros_like(params)
Kinv = cho_solve((L, True), np.eye(K.shape[0])) # K inverse
if mu.ndim == 1:
mu = mu[:, np.newaxis]
alpha = cho_solve((L, True), mu)
ll_dims = -0.5 * np.einsum("ik,ik->k", mu, alpha)
tmp = np.einsum("ik,jk->ijk", alpha, alpha)
tmp -= Kinv[:, :, np.newaxis]
for i in range(post_cov.shape[-1]):
KinvSigma = cho_solve((L, True), post_cov[:, :, i])
ll_dims[i] -= 0.5 * np.trace(KinvSigma)
tmp[:, :, i] += KinvSigma @ Kinv
ll_dims -= np.log(np.diag(L)).sum()
ll = ll_dims.sum(-1)
dll_dims = 0.5 * np.einsum("ijl,ijk->kl", tmp, dK)
dll = dll_dims.sum(-1)
return ll, dll
示例12: rakeDistortionlessFilters
def rakeDistortionlessFilters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
'''
Compute time-domain filters of a beamformer minimizing noise and interference
while forcing a distortionless response towards the source.
'''
H = buildRIRMatrix(self.R, (source, interferer), self.Lg, self.Fs, epsilon=epsilon, unit_damping=True)
L = H.shape[1]/2
# We first assume the sample are uncorrelated
K_nq = np.dot(H[:,L:], H[:,L:].T) + R_n
# constraint
kappa = int(delay*self.Fs)
A = H[:,:L]
b = np.zeros((L,1))
b[kappa,0] = 1
# filter computation
C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
B = la.cho_solve(C, A)
D = np.dot(A.T, B)
C = la.cho_factor(D, overwrite_a=True, check_finite=False)
x = la.cho_solve(C, b)
g_val = np.dot(B, x)
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:,:L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num/denom
示例13: _LMLgrad_covar_debug
def _LMLgrad_covar_debug(self,covar):
assert self.N*self.P<2000, 'gp2kronSum:: N*P>=2000'
y = SP.reshape(self.Y,(self.N*self.P), order='F')
K = SP.kron(self.Cg.K(),self.XX)
K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))
cholK = LA.cholesky(K).T
Ki = LA.cho_solve((cholK,True),SP.eye(y.shape[0]))
Kiy = LA.cho_solve((cholK,True),y)
if covar=='Cr': n_params = self.Cr.getNumberParams()
elif covar=='Cg': n_params = self.Cg.getNumberParams()
elif covar=='Cn': n_params = self.Cn.getNumberParams()
RV = SP.zeros(n_params)
for i in range(n_params):
#0. calc grad_i
if covar=='Cg':
C = self.Cg.Kgrad_param(i)
Kgrad = SP.kron(C,self.XX)
elif covar=='Cn':
C = self.Cn.Kgrad_param(i)
Kgrad = SP.kron(C,SP.eye(self.N))
#1. der of log det
RV[i] = 0.5*(Ki*Kgrad).sum()
#2. der of quad form
RV[i] -= 0.5*(Kiy*SP.dot(Kgrad,Kiy)).sum()
return RV
示例14: predict
def predict(self, y, t):
"""
Compute the conditional predictive distribution of the model.
:param y: ``(nsamples, )``
The observations to condition the model on.
:param t: ``(ntest, )``
The coordinates where the predictive distribution should be
computed.
:returns mu: ``(ntest, )``
The mean of the predictive distribution.
:returns cov: ``(ntest, ntest)``
The predictive covariance.
"""
r = self._check_dimensions(y)
xs, i = self._parse_samples(t, False)
alpha = cho_solve(self._factor, r)
# Compute the predictive mean.
Kxs = self._kernel(self._x[None, :], xs[:, None])
mu = np.dot(Kxs, alpha)
# Compute the predictive covariance.
cov = self._kernel(xs[:, None], xs[None, :])
cov -= np.dot(Kxs, cho_solve(self._factor, Kxs.T))
return mu, cov
示例15: find_likelihood_der
def find_likelihood_der(self, X, y):
"""
Find the negative log likelihood and its partial derivatives.
Parameters
----------
Returns
-------
"""
n = len(X)
K = self.cf.eval(X)
#if len(self.krnds)!=K.shape[0]:
# print "Created new self.krnds!"
# self.krnds = np.random.randn(K.shape[0])*10**-6
#K = K + np.eye(K.shape[0])*self.krnds
L = np.linalg.cholesky(K) # Problems using this on the cluster - bad scaling! Running time becomes really bad with large N. Solution: Update ATLAS
#L = la.cholesky(K)
#print np.linalg.solve(L.T, np.linalg.solve(L, y))
#a = np.linalg.solve(L.T, np.linalg.solve(L, y))
a = la.cho_solve((L, True), y)
nll = 0.5*np.dot(y.T, a) + np.sum(np.log(np.diag(L))) + 0.5*n*np.log(2*np.pi)
ders = np.zeros(len(self.cf.get_params()))
#W = np.linalg.solve(L.T, np.linalg.solve(L, np.eye(n))) - a*a.T
W = la.cho_solve((L, True), np.eye(n)) - a*a.T
for i in range(len(self.cf.get_params())):
ders[i] = np.sum(W*self.cf.derivative(X, i))/2
return nll[0,0], ders