本文整理汇总了Python中numpy.linalg.solve函数的典型用法代码示例。如果您正苦于以下问题:Python solve函数的具体用法?Python solve怎么用?Python solve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了solve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: MdcNE
def MdcNE(A, b):
"""
Résolution du problème des moindres carrés linéaire :
Min_{alpha} || b - A*alpha ||^2
par factorisation de Cholesky du système des équations normales.
Parameters
----------
A : np.array ou np.matrix
b : np.array ou np.matrix
Returns
-------
alpha : np.array (dans tous les cas)
solution du problème des moindres carrés linéaire
"""
S = np.matrix(A).T * np.matrix(A)
# Vérification au préalable du conditionnement du système et de la stabilité
# numérique de la résolution qui va suivre
c = la.cond(S)
if c > 1e16:
print('Attention : le conditionnement de la matrice des équations')
print(' normales est très grand ---> %0.5g' % c)
# Factorisation suivi de la résolution
L = la.cholesky(S) # matrice triangulaire inférieure
m = b.size
bvect = np.matrix( b.reshape(m,1) )
y = A.T * bvect
z = la.solve(L, y)
alpha = np.array( la.solve(L.T, z) )
return alpha
示例2: rbf_int
def rbf_int(tocke, rbf, z):
"""
Funkcija rbf_int resi interpolacijski problem
F(tocke[i]) = z[i]
z radialnimi baznimi funkcijami oblike
rbf(norm(x-tocke[i]))
Vhodni podatki:
tocke ... nxk tabela n-tock v R^k
rbf ... funkcija, ki doloca obliko
z ... predpisane vrednosti v tockah
Rezultat:
alpha ... koeficienti v razvoju po RBF
"""
# zgradimo matriko sistema
n,k = tocke.shape
A = zeros((n,n))
for i in range(n):
for j in range(n):
A[i,j] = rbf(norm(tocke[i,:]-tocke[j,:])**2)
# razcep choleskega
try:
R = cholesky(A)
U = R.T
# obratno vstavljanje R*y = z
y = solve(R,z)
# direktno vstavljanje R^T*alpha = y
alpha = solve(R.T,y)
except:
#matrika ni pozitivno definitna
alpha = solve(A,z)
alpha = solve(A,z)
return alpha
示例3: show_solution
def show_solution(A, b, bp, ls):
# "real" solution
x = solve(A, b)
for l in ls:
bc = l * bp # current b matrix
db = abs(bc - b)
print "With %f: " % l
print "\t║𝚫b║₂ = %f" % frobenius(db)
# solve it
xc = solve(A, l*bp)
dx = abs(xc - x)
# Calcualte the errors
Ainv = inv(A)
cond = frobenius(A) * frobenius(Ainv)
abserr = frobenius(Ainv) * frobenius(db)
relerr = cond * (frobenius(db) / frobenius(b))
print "\t Expected max error %f" % abserr
print "\t║𝚫x║₂ = %f" % frobenius(dx)
print "\t Expected rel error %f" % relerr
print "\t║𝚫x║₂/║x║₂ = %f" % (frobenius(dx) / frobenius(x))
print ""
示例4: get_sdrgain_upd
def get_sdrgain_upd(amat, wnrm='fro', maxeps=None,
baseA=None, baseZ=None, baseGain=None,
maxfac=None):
deltaA = amat - baseA
# nda = npla.norm(deltaA, ord=wnrm)
# nz = npla.norm(baseZ, ord=wnrm)
# na = npla.norm(baseA, ord=wnrm)
# import ipdb; ipdb.set_trace()
epsP = spla.solve_sylvester(amat, -baseZ, -deltaA)
# print('debugging!!!')
# epsP = 0*amat
eps = npla.norm(epsP, ord=wnrm)
print('|amat - baseA|: {0} -- |E|: {1}'.
format(npla.norm(deltaA, ord=wnrm), eps))
if maxeps is not None:
if eps < maxeps:
updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
return updGaint.T, True
elif maxfac is not None:
if (1+eps)/(1-eps) < maxfac and eps < 1:
updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
return updGaint.T, True
return None, False
示例5: qzswitch
def qzswitch(i, A2, B2, Q, Z):
#print i, A2, B2, Q, Z
Aout = A2.copy(); Bout = B2.copy(); Qout = Q.copy(); Zout = Z.copy()
ix = i-1 # from 1-based to 0-based indexing...
# use all 1x1-matrices for convenient conjugate-transpose even if real:
a = mat(A2[ix, ix]); d = mat(B2[ix, ix]); b = mat(A2[ix, ix+1]);
e = mat(B2[ix, ix+1]); c = mat(A2[ix+1, ix+1]); f = mat(B2[ix+1, ix+1])
wz = c_[c*e - f*b, (c*d - f*a).H]
xy = c_[(b*d - e*a).H, (c*d - f*a).H]
n = sqrt(wz*wz.H)
m = sqrt(xy*xy.H)
if n[0,0] == 0: return (Aout, Bout, Qout, Zout)
wz = solve(n, wz)
xy = solve(m, xy)
wz = r_[ wz, \
c_[-wz[:,1].H, wz[:,0].H]]
xy = r_[ xy, \
c_[-xy[:,1].H, xy[:,0].H]]
Aout[ix:ix+2, :] = xy * Aout[ix:ix+2, :]
Bout[ix:ix+2, :] = xy * Bout[ix:ix+2, :]
Aout[:, ix:ix+2] = Aout[:, ix:ix+2] * wz
Bout[:, ix:ix+2] = Bout[:, ix:ix+2] * wz
Zout[:, ix:ix+2] = Zout[:, ix:ix+2] * wz
Qout[ix:ix+2, :] = xy * Qout[ix:ix+2, :]
return (Aout, Bout, Qout, Zout)
示例6: normal_eq_comb
def normal_eq_comb(AtA, AtB, PassSet = None):
num_cholesky = 0
num_eq = 0
if AtB.size == 0:
Z = np.zeros([])
elif (PassSet is None) or np.all(PassSet):
Z = nla.solve(AtA, AtB)
num_cholesky = 1
num_eq = AtB.shape[1]
else:
Z = np.zeros(AtB.shape) #(n, k)
if PassSet.shape[1] == 1:
if np.any(PassSet):
cols = np.nonzero(PassSet)[0]
Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
num_cholesky = 1
num_eq = 1
else:
groups = column_group(PassSet)
for g in groups:
cols = np.nonzero(PassSet[:, g[0]])[0]
if cols.size > 0:
ix1 = np.ix_(cols, g)
ix2 = np.ix_(cols, cols)
Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
num_cholesky += 1
num_eq += len(g)
num_eq += len(g)
return Z, num_cholesky, num_eq
示例7: ridge_regression
def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None):
"""
Also known as Tikhonov regularization. This solves the minimization problem:
min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2
One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization
Parameters:
X: a (n,d) numpy array
Y: a (n,) numpy array
c1: a scalar
c2: a scalar
offset: a (d,) numpy array.
Returns:
beta_hat: the solution to the minimization problem.
V = (X*X^T + (c1+c2)I)^{-1} X^T
"""
n, d = X.shape
X = X.astype(float)
penalizer_matrix = (c1 + c2) * np.eye(d)
if offset is None:
offset = np.zeros((d,))
A = (np.dot(X.T, X) + penalizer_matrix)
b = (np.dot(X.T, Y) + c2 * offset)
# rather than explicitly computing the inverse, just solve the system of equations
return (solve(A, b), solve(A, X.T))
示例8: inverse_chol
def inverse_chol(self, K=None, Chl=None):
"""
One can use this function for calculating the inverse of K through cholesky decomposition
Parameters
----------
K: ndarray
covariance K
Chl: ndarray
cholesky decomposition of K
Returns
-------
ndarray
the inverse of K
"""
if Chl is not None:
chol = Chl
else:
chol = self.calc_chol(K)
if chol is None:
return None
inve = 0
error_k = 1e-25
while(True):
try:
choly = chol + error_k * np.eye(chol.shape[0])
inve = solve(choly.T, solve(choly, np.eye(choly.shape[0])))
break
except np.linalg.LinAlgError:
error_k *= 10
return inve
示例9: marginalLikelihood
def marginalLikelihood(kernel, X, Y, nhyper, computeGradient=True, useCholesky=True, noise=1e-3):
"""
get the negative log marginal likelihood and its partial derivatives wrt
each hyperparameter
"""
NX = len(X)
assert NX == len(Y)
# compute covariance matrix
K = kernel.covMatrix(X) + eye(NX)*noise
if useCholesky:
# avoid inversion by using Cholesky decomp.
try:
L = cholesky(K)
alpha = solve(L.T, solve(L, Y))
except LinAlgError, e:
print '\n ================ error in matrix'
print '\thyper =', kernel.hyperparams
print '===================================='
logBadParams(kernel)
pdb.set_trace()
nlml = 0.5 * dot(Y, alpha) + sum(log(diag(L))) + 0.5 * NX * log(2.0*pi)
if computeGradient:
W = solve(L.T, solve(L, eye(NX))) - outer(alpha, alpha)
dnlml = array([sum(W*kernel.derivative(X, i)) / 2.0 for i in xrange(nhyper)])
# print ' loglik =', nlml, ' d loglik =', dnlml
return nlml, dnlml
else:
return nlml
示例10: als_step
def als_step(self,
latent_vectors,
fixed_vecs,
ratings,
_lambda,
type='user'):
"""
One of the two ALS steps. Solve for the latent vectors
specified by type.
"""
if type == 'user':
# Precompute
YTY = fixed_vecs.T.dot(fixed_vecs)
lambdaI = np.eye(YTY.shape[0]) * _lambda
for u in xrange(latent_vectors.shape[0]):
latent_vectors[u, :] = solve((YTY + lambdaI),
ratings[u, :].dot(fixed_vecs))
elif type == 'item':
# Precompute
XTX = fixed_vecs.T.dot(fixed_vecs)
lambdaI = np.eye(XTX.shape[0]) * _lambda
for i in xrange(latent_vectors.shape[0]):
latent_vectors[i, :] = solve((XTX + lambdaI),
ratings[:, i].T.dot(fixed_vecs))
return latent_vectors
示例11: observables2parameters
def observables2parameters(self,features_covariance=None):
"""
Computes the conversion matrix M that allows to match a feature vector V to its best fit parameters P, in the sense P = P[fiducial] + MV
:param features_covariance: covariance matrix of the simulated features, must be provided!
:type features_covariance: 2 dimensional array (or 1 dimensional if diagonal)
:returns: the (p,N) conversion matrix
:rtype: array
"""
#Safety checks
assert features_covariance is not None,"No science without the covariance matrix, you must provide one!"
assert features_covariance.shape in [self.training_set.shape[-1:],self.training_set.shape[-1:]*2]
#Check if derivatives are already computed
if not hasattr(self,"derivatives"):
self.compute_derivatives()
#Linear algebra manipulations (parameters = M x features)
if features_covariance.shape == self.training_set.shape[1:] * 2:
Y = solve(features_covariance,self.derivatives.transpose())
else:
Y = (1/features_covariance[:,np.newaxis]) * self.derivatives.transpose()
XY = np.dot(self.derivatives,Y)
return solve(XY,Y.transpose())
示例12: unteraufgabe_c
def unteraufgabe_c():
f = lambda x: np.sin(10*x*np.cos(x))
[A10,A20] = unteraufgabe_b()
b10 = np.zeros_like(x10)
b20 = np.zeros_like(x20)
alpha10 = np.zeros_like(x10)
alpha20 = np.zeros_like(x20)
b10 = f(x10)
b20 = f(x20)
alpha10 = solve(A10,b10)
alpha20 = solve(A20,b20)
x = np.linspace(0.0, 1.0, 100)
pi10 = np.zeros_like(x)
pi20 = np.zeros_like(x)
pi10 = np.polyval(alpha10,x)
pi20 = np.polyval(alpha20,x)
plt.figure()
plt.plot(x,f(x),"-b",label=r"$f(x)$")
plt.plot(x,pi10 ,"-g",label=r"$p_{10}(x)$")
plt.plot(x10,b10,"dg")
plt.plot(x,pi20 ,"-r",label=r"$p_{20}(x)$")
plt.plot(x20,b20,"or")
plt.grid(True)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.legend()
plt.savefig("interpolation.eps")
示例13: setCoefs
def setCoefs(self):
self.aquiferParent = self.modelParent.aq.findAquiferData(self.xw,self.yw)
self.rwsq = self.rw**2;
self.pylayers = self.layers-1; self.NscreenedLayers = len(self.layers)
if self.NscreenedLayers == 1: # Screened in only one layer, no unknown parameters
self.parameters = array([[self.discharge]])
else:
self.parameters = zeros((self.NscreenedLayers,1),'d')
self.coef = ones((self.NscreenedLayers,self.aquiferParent.Naquifers),'d')
if self.aquiferParent.Naquifers > 1: # Multiple aquifers, must compute coefficients
if self.aquiferParent.type == self.aquiferParent.conf:
for i in range(self.NscreenedLayers):
pylayer = self.pylayers[i]
ramat = self.aquiferParent.eigvec[:,1:] # All eigenvectors, but not first normalized transmissivity
ramat = vstack(( ramat[0:pylayer,:], ramat[pylayer+1:,:] )) # Remove row pylayer
rb = self.aquiferParent.eigvec[:,0] / (2.0*pi) # Store Tn vector in rb
rb = hstack(( rb[0:pylayer], rb[pylayer+1:] )) # solve takes rowvector
self.coef[i,1:self.aquiferParent.Naquifers] = linalg.solve(ramat,rb)
elif self.aquiferParent.type == self.aquiferParent.semi:
for i in range(self.NscreenedLayers):
pylayer = self.pylayers[i]
ramat = self.aquiferParent.eigvec
rb = zeros(self.aquiferParent.Naquifers,'d')
rb[pylayer] = - 1.0 / (2.0 * pi)
self.coef[i,:] = linalg.solve(ramat,rb)
if self.aquiferParent.Naquifers == 1 and self.aquiferParent.type == self.aquiferParent.semi:
self.coef[0,0] = -1.0 / (2.0 * pi)
self.paramxcoef = sum( self.parameters * self.coef, 0 ) # Parameters times coefficients
示例14: nf2
def nf2(l):
# Find x(l)
xl = solve((LV + (l + sigma * delta) * identity(D)),dot(-UV.T,g))
# Calculate |xl|-delta (for newton stopping rule)
xlmd = norm(xl) - delta
# Calculate f(l) for p=-1
fl = 1 / norm(xl) - 1 / delta
# Find x'(l)
xlp = solve((LV + (l + sigma * delta) * identity(D)),(-xl))
# Calculate f'(l) for p=-1
flp = -dot(xl,xlp) * (dot(xl,xl) ** (-1.5))
# Calculate increment
dl = fl / flp
# Set Delta
Delta = delta
return xlmd, dl, Delta
示例15: lu_inv
def lu_inv(L):
from numpy.linalg import solve
from numpy import eye
n = L.shape[0]
K_inv = solve(L.T, solve(L, eye(n)))
return K_inv