本文整理汇总了Python中pyamg.util.linalg.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gmres
def test_gmres(self):
# Ensure repeatability
random.seed(0)
# For these small matrices, Householder and MGS GMRES should give the same result,
# and for symmetric (but possibly indefinite) matrices CR and GMRES should give same result
for maxiter in [1,2,3]:
for case, symm_case in zip(self.cases, self.symm_cases):
A = case['A']
b = case['b']
x0 = case['x0']
A_symm = symm_case['A']
b_symm = symm_case['b']
x0_symm = symm_case['x0']
# Test agreement between Householder and GMRES
(x, flag) = gmres_householder(A,b,x0=x0,maxiter=min(A.shape[0],maxiter))
(x2, flag2) = gmres_mgs(A,b,x0=x0,maxiter=min(A.shape[0],maxiter))
assert_array_almost_equal(x/norm(x), x2/norm(x2),
err_msg='Householder GMRES and MGS GMRES gave different results for small matrix')
assert_equal(flag, flag2,
err_msg='Householder GMRES and MGS GMRES returned different convergence flags for small matrix')
# Test agreement between GMRES and CR
if A_symm.shape[0] > 1:
residuals2 = []
(x2, flag2) = gmres_mgs(A_symm, b_symm, x0=x0_symm, maxiter=min(A.shape[0],maxiter),residuals=residuals2)
residuals3 = []
(x3, flag2) = cr(A_symm, b_symm, x0=x0_symm, maxiter=min(A.shape[0],maxiter),residuals=residuals3)
residuals2 = array(residuals2)
residuals3 = array(residuals3)
assert_array_almost_equal(residuals3/norm(residuals3), residuals2/norm(residuals2),
err_msg='CR and GMRES yield different residual vectors')
assert_array_almost_equal(x2/norm(x2), x3/norm(x3), err_msg='CR and GMRES yield different answers')
示例2: solver_diagnostic
def solver_diagnostic(A):
##
# Generate B
B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()
##
# Random initial guess, zero right-hand side
random.seed(0)
b = zeros((A.shape[0],1))
x0 = rand(A.shape[0],1)
##
# Create solver
ml = smoothed_aggregation_solver(A, B=B, BH=BH,
strength=('symmetric', {'theta': 0.0}),
smooth=('energy', {'weighting': 'local', 'krylov': 'gmres', 'degree': 1, 'maxiter': 2}),
improve_candidates=[('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None],
aggregate="standard",
presmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
postsmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
max_levels=15,
max_coarse=300,
coarse_solver="pinv")
##
# Solve system
res = []
x = ml.solve(b, x0=x0, tol=1e-08, residuals=res, accel="gmres", maxiter=300, cycle="V")
res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))
normr0 = norm(ravel(b) - ravel(A*x0))
print " "
print ml
print "System size: " + str(A.shape)
print "Avg. Resid Reduction: %1.2f"%res_rate
print "Iterations: %d"%len(res)
print "Operator Complexity: %1.2f"%ml.operator_complexity()
print "Work per DOA: %1.2f"%(ml.cycle_complexity()/abs(log10(res_rate)))
print "Relative residual norm: %1.2e"%(norm(ravel(b) - ravel(A*x))/normr0)
##
# Plot residual history
pylab.semilogy(array(res)/normr0)
pylab.title('Residual Histories')
pylab.xlabel('Iteration')
pylab.ylabel('Relative Residual Norm')
pylab.show()
示例3: test_krylov
def test_krylov(self):
# Oblique projectors reduce the residual
for method in self.oblique:
for case in self.cases:
A = case['A']; b = case['b']; x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'], maxiter=case['maxiter'])
xNew = xNew.reshape(-1,1)
assert_equal( (norm(b - A*xNew)/norm(b - A*x0)) < case['reduction_factor'], True, err_msg='Oblique Krylov Method Failed Test')
# Oblique projectors reduce the residual, here we consider oblique projectors for symmetric matrices
for method in self.symm_oblique:
for case in self.symm_cases:
A = case['A']; b = case['b']; x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'], maxiter=case['maxiter'])
xNew = xNew.reshape(-1,1)
assert_equal( (norm(b - A*xNew)/norm(b - A*x0)) < case['reduction_factor'], True, err_msg='Symmetric oblique Krylov Method Failed Test')
# Orthogonal projectors reduce the error
for method in self.orth:
for case in self.cases:
A = case['A']; b = case['b']; x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'], maxiter=case['maxiter'])
xNew = xNew.reshape(-1,1)
soln = solve(A,b)
assert_equal( (norm(soln - xNew)/norm(soln - x0)) < case['reduction_factor'], True, err_msg='Orthogonal Krylov Method Failed Test')
# SPD Orthogonal projectors reduce the error
for method in self.spd_orth:
for case in self.spd_cases:
A = case['A']; b = case['b']; x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'], maxiter=case['maxiter'])
xNew = xNew.reshape(-1,1)
soln = solve(A,b)
assert_equal( (norm(soln - xNew)/norm(soln - x0)) < case['reduction_factor'], True, err_msg='Orthogonal Krylov Method Failed Test')
# Assume that Inexact Methods reduce the residual for these examples
for method in self.inexact:
for case in self.cases:
A = case['A']; b = case['b']; x0 = case['x0']
(xNew, flag) = method(A, b, x0=x0, tol=case['tol'], maxiter=A.shape[0])
xNew = xNew.reshape(-1,1)
assert_equal( (norm(b - A*xNew)/norm(b - A*x0)) < 0.15, True, err_msg='Inexact Krylov Method Failed Test')
示例4: test_norm
def test_norm(self):
cases = []
cases.append(4)
cases.append(-1)
cases.append(2.5)
cases.append(3 + 5j)
cases.append(7 - 2j)
cases.append([1 + 3j, 6])
cases.append([1 + 3j, 6 - 2j])
for A in cases:
assert_almost_equal(norm(A), linalg.norm(A))
示例5: stencil_grid
import time
from pyamg.krylov._gmres import gmres
A = stencil_grid([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], (100, 100),
dtype=float, format='csr')
b = random((A.shape[0],))
x0 = random((A.shape[0],))
print '\n\nTesting CR with %d x %d 2D Laplace Matrix' % \
(A.shape[0], A.shape[0])
t1 = time.time()
r = []
(x, flag) = cr(A, b, x0, tol=1e-8, maxiter=100, residuals=r)
t2 = time.time()
print '%s took %0.3f ms' % ('cr', (t2-t1)*1000.0)
print 'norm = %g' % (norm(b - A*x))
print 'info flag = %d' % (flag)
t1 = time.time()
r2 = []
(x, flag) = gmres(A, b, x0, tol=1e-8, maxiter=100, residuals=r2)
t2 = time.time()
print '%s took %0.3f ms' % ('gmres', (t2-t1)*1000.0)
print 'norm = %g' % (norm(b - A*x))
print 'info flag = %d' % (flag)
# from scipy.sparse.linalg.isolve import cg as icg
# t1=time.time()
# (y,flag) = icg(A,b,x0,tol=1e-8,maxiter=100)
# t2=time.time()
# print '\n%s took %0.3f ms' % ('linalg cg', (t2-t1)*1000.0)
示例6: gmres_mgs
def gmres_mgs(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None,
M=None, callback=None, residuals=None, reorth=False):
'''
Generalized Minimum Residual Method (GMRES)
GMRES iteratively refines the initial solution guess to the system
Ax = b
Modified Gram-Schmidt version
Parameters
----------
A : {array, matrix, sparse matrix, LinearOperator}
n x n, linear system to solve
b : {array, matrix}
right hand side, shape is (n,) or (n,1)
x0 : {array, matrix}
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : {None, int}
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : {None, int}
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : {array, matrix, sparse matrix, LinearOperator}
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback( ||rk||_2 ), where rk is the current preconditioned residual
vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print norm(b - A*x)
>>> 6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
'''
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
dimen = A.shape[0]
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._gmres_mgs')
# Choose type
if not hasattr(A, 'dtype'):
Atype = upcast(x.dtype, b.dtype)
else:
#.........这里部分代码省略.........
示例7: random
# x0 = random((4,1))
# %timeit -n 15 (x,flag) = gmres(A,b,x0,tol=1e-8,maxiter=100)
from pyamg.gallery import poisson
from numpy.random import random
from pyamg.util.linalg import norm
A = poisson((125, 125), dtype=float, format='csr')
# A.data = A.data + 0.001j*rand(A.data.shape[0])
b = random((A.shape[0],))
x0 = random((A.shape[0],))
import time
from scipy.sparse.linalg.isolve import gmres as igmres
print('\n\nTesting GMRES with %d x %d 2D Laplace Matrix' %
(A.shape[0], A.shape[0]))
t1 = time.time()
(x, flag) = gmres_mgs(A, b, x0, tol=1e-8, maxiter=500)
t2 = time.time()
print('%s took %0.3f ms' % ('gmres', (t2-t1)*1000.0))
print('norm = %g' % (norm(b - A*x)))
print('info flag = %d' % (flag))
t1 = time.time()
# DON"T Enforce a maxiter as scipy gmres can't handle it correctly
(y, flag) = igmres(A, b, x0, tol=1e-8)
t2 = time.time()
print('\n%s took %0.3f ms' % ('linalg gmres', (t2-t1)*1000.0))
print('norm = %g' % (norm(b - A*y)))
print('info flag = %d' % (flag))
示例8: test_krylov
def test_krylov(self):
# Oblique projectors reduce the residual
for method in self.oblique:
for case in self.cases:
A = case["A"]
b = case["b"]
x0 = case["x0"]
(xNew, flag) = method(A, b, x0=x0, tol=case["tol"], maxiter=case["maxiter"])
xNew = xNew.reshape(-1, 1)
assert_equal(
(norm(b - A * xNew) / norm(b - A * x0)) < case["reduction_factor"],
True,
err_msg="Oblique Krylov Method Failed Test",
)
# Oblique projectors reduce the residual, here we consider oblique
# projectors for symmetric matrices
for method in self.symm_oblique:
for case in self.symm_cases:
A = case["A"]
b = case["b"]
x0 = case["x0"]
(xNew, flag) = method(A, b, x0=x0, tol=case["tol"], maxiter=case["maxiter"])
xNew = xNew.reshape(-1, 1)
assert_equal(
(norm(b - A * xNew) / norm(b - A * x0)) < case["reduction_factor"],
True,
err_msg="Symmetric oblique Krylov Method Failed",
)
# Orthogonal projectors reduce the error
for method in self.orth:
for case in self.cases:
A = case["A"]
b = case["b"]
x0 = case["x0"]
(xNew, flag) = method(A, b, x0=x0, tol=case["tol"], maxiter=case["maxiter"])
xNew = xNew.reshape(-1, 1)
soln = solve(A, b)
assert_equal(
(norm(soln - xNew) / norm(soln - x0)) < case["reduction_factor"],
True,
err_msg="Orthogonal Krylov Method Failed Test",
)
# SPD Orthogonal projectors reduce the error
for method in self.spd_orth:
for case in self.spd_cases:
A = case["A"]
b = case["b"]
x0 = case["x0"]
(xNew, flag) = method(A, b, x0=x0, tol=case["tol"], maxiter=case["maxiter"])
xNew = xNew.reshape(-1, 1)
soln = solve(A, b)
assert_equal(
(norm(soln - xNew) / norm(soln - x0)) < case["reduction_factor"],
True,
err_msg="Orthogonal Krylov Method Failed Test",
)
# Assume that Inexact Methods reduce the residual for these examples
for method in self.inexact:
for case in self.cases:
A = case["A"]
b = case["b"]
x0 = case["x0"]
(xNew, flag) = method(A, b, x0=x0, tol=case["tol"], maxiter=A.shape[0])
xNew = xNew.reshape(-1, 1)
assert_equal(
(norm(b - A * xNew) / norm(b - A * x0)) < 0.35, True, err_msg="Inexact Krylov Method Failed Test"
)
示例9: cgnr
def cgnr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
'''Conjugate Gradient, Normal Residual algorithm
Applies CG to the normal equations, A.H A x = b. Left preconditioning
is supported. Note that unless A is well-conditioned, the use of
CGNR is inadvisable
Parameters
----------
A : {array, matrix, sparse matrix, LinearOperator}
n x n, linear system to solve
b : {array, matrix}
right hand side, shape is (n,) or (n,1)
x0 : {array, matrix}
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : {array, matrix, sparse matrix, LinearOperator}
n x n, inverted preconditioner, i.e. solve M A.H A x = b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cgnr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
Examples
--------
>>> from pyamg.krylov.cgnr import cgnr
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cgnr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
9.3910201849
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 276-7, 2003
http://www-users.cs.umn.edu/~saad/books.html
'''
# Store the conjugate transpose explicitly as it will be used much later on
if isspmatrix(A):
AH = A.H
else:
# TODO avoid doing this since A may be a different sparse type
AH = aslinearoperator(asmatrix(A).H)
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
dimen = A.shape[0]
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cgnr')
# Choose type
if not hasattr(A, 'dtype'):
Atype = upcast(x.dtype, b.dtype)
else:
Atype = A.dtype
if not hasattr(M, 'dtype'):
Mtype = upcast(x.dtype, b.dtype)
else:
Mtype = M.dtype
xtype = upcast(Atype, x.dtype, b.dtype, Mtype)
# Should norm(r) be kept
if residuals == []:
#.........这里部分代码省略.........
示例10: solve
def solve(A, b, x0=None, tol=1e-5, maxiter=400, return_solver=False,
existing_solver=None, verb=True):
"""
Solve the arbitrary system Ax=b with the best out-of-the box choice for a
solver. The matrix A can be non-Hermitian, indefinite, Hermitian
positive-definite, complex, etc... Generic and robust settings for
smoothed_aggregation_solver(..) are used to invert A.
Parameters
----------
A : {array, matrix, csr_matrix, bsr_matrix}
Matrix to invert, CSR or BSR format preferred for efficiency
b : {array}
Right hand side.
x0 : {array} : default random vector
Initial guess
tol : {float} : default 1e-5
Stopping criteria: relative residual r[k]/r[0] tolerance
maxiter : {int} : default 400
Stopping criteria: maximum number of allowable iterations
return_solver : {bool} : default False
True: return the solver generated
existing_solver : {smoothed_aggregation_solver} : default None
If instance of a multilevel solver, then existing_solver is used
to invert A, thus saving time on setup cost.
verb : {bool}
If True, print verbose output during runtime
Returns
-------
x : {array}
Solution to Ax = b
ml : multilevel_solver
Optional return of the multilevel structure used for the solve
Notes
-----
If calling solve(...) multiple times for the same matrix, A, solver reuse
is easy and efficient. Set "return_solver=True", and the return value will
be a tuple, (x,ml), where ml is the solver used to invert A, and x is the
solution to Ax=b. Then, the next time solve(...) is called, set
"existing_solver=ml".
Examples
--------
>>> from numpy import arange, array
>>> from pyamg import solve
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> A = poisson((40,40),format='csr')
>>> b = array(arange(A.shape[0]), dtype=float)
>>> x = solve(A,b,verb=False)
>>> print "%1.2e"%(norm(b - A*x)/norm(b))
6.28e-06
"""
# Convert A to acceptable CSR/BSR format
A = make_csr(A)
# Generate solver if necessary
if existing_solver is None:
# Parameter dictionary for smoothed_aggregation_solver
config = solver_configuration(A, B=None, verb=verb)
# Generate solver
existing_solver = solver(A, config)
else:
if existing_solver.levels[0].A.shape[0] != A.shape[0]:
raise TypeError('Argument existing_solver must have level 0 matrix\
of same size as A')
# Krylov acceleration depends on symmetry of A
if existing_solver.levels[0].A.symmetry == 'hermitian':
accel = 'cg'
else:
accel = 'gmres'
# Initial guess
if x0 is None:
x0 = np.array(sp.rand(A.shape[0],), dtype=A.dtype)
# Callback function to print iteration number
if verb:
iteration = np.zeros((1,))
print " maxiter = %d" % maxiter
def callback(x, iteration):
iteration[0] = iteration[0] + 1
print " iteration %d" % iteration[0]
callback2 = lambda x: callback(x, iteration)
else:
callback2 = None
# Solve with accelerated Krylov method
x = existing_solver.solve(b, x0=x0, accel=accel, tol=tol, maxiter=maxiter,
callback=callback2)
if verb:
#.........这里部分代码省略.........
示例11: solve
def solve(self, b, x0=None, tol=1e-5, maxiter=100, cycle='V', accel=None,
callback=None, residuals=None, return_residuals=False):
"""Execute multigrid cycling.
Parameters
----------
b : array
Right hand side.
x0 : array
Initial guess.
tol : float
Stopping criteria: relative residual r[k]/r[0] tolerance.
maxiter : int
Stopping criteria: maximum number of allowable iterations.
cycle : {'V','W','F','AMLI'}
Type of multigrid cycle to perform in each iteration.
accel : string, function
Defines acceleration method. Can be a string such as 'cg'
or 'gmres' which is the name of an iterative solver in
pyamg.krylov (preferred) or scipy.sparse.linalg.isolve.
If accel is not a string, it will be treated like a function
with the same interface provided by the iterative solvers in SciPy.
callback : function
User-defined function called after each iteration. It is
called as callback(xk) where xk is the k-th iterate vector.
residuals : list
List to contain residual norms at each iteration.
Returns
-------
x : array
Approximate solution to Ax=b
See Also
--------
aspreconditioner
Examples
--------
>>> from numpy import ones
>>> from pyamg import ruge_stuben_solver
>>> from pyamg.gallery import poisson
>>> A = poisson((100, 100), format='csr')
>>> b = A * ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> residuals = []
>>> x = ml.solve(b, tol=1e-12, residuals=residuals) # standalone solver
"""
from pyamg.util.linalg import residual_norm, norm
if x0 is None:
x = np.zeros_like(b)
else:
x = np.array(x0) # copy
cycle = str(cycle).upper()
# AMLI cycles require hermitian matrix
if (cycle == 'AMLI') and hasattr(self.levels[0].A, 'symmetry'):
if self.levels[0].A.symmetry != 'hermitian':
raise ValueError('AMLI cycles require \
symmetry to be hermitian')
if accel is not None:
# Check for symmetric smoothing scheme when using CG
if (accel is 'cg') and (not self.symmetric_smoothing):
warn('Incompatible non-symmetric multigrid preconditioner '
'detected, due to presmoother/postsmoother combination. '
'CG requires SPD preconditioner, not just SPD matrix.')
# Check for AMLI compatability
if (accel != 'fgmres') and (cycle == 'AMLI'):
raise ValueError('AMLI cycles require acceleration (accel) '
'to be fgmres, or no acceleration')
# py23 compatibility:
try:
basestring
except NameError:
basestring = str
# Acceleration is being used
kwargs = {}
if isinstance(accel, basestring):
from pyamg import krylov
from scipy.sparse.linalg import isolve
kwargs = {}
if hasattr(krylov, accel):
accel = getattr(krylov, accel)
else:
accel = getattr(isolve, accel)
kwargs['atol'] = 'legacy'
A = self.levels[0].A
M = self.aspreconditioner(cycle=cycle)
try: # try PyAMG style interface which has a residuals parameter
return accel(A, b, x0=x0, tol=tol, maxiter=maxiter, M=M,
#.........这里部分代码省略.........
示例12: general_setup_stage
#.........这里部分代码省略.........
# construct R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
elif symmetry == 'hermitian':
levels[i].R = levels[i].P.H.asformat(levels[i].P.format)
# construct coarse A
levels[i+1].A = levels[i].R * levels[i].A * levels[i].P
# construct bridging P
T_bridge = make_bridge(levels[i+1].T)
R_bridge = levels[i+2].B
# smooth bridging P
fn, kwargs = unpack_arg(smooth[i+1])
if fn == 'jacobi':
levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, **kwargs)
elif fn == 'richardson':
levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
T_bridge,
**kwargs)
elif fn == 'energy':
levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, None,
(False, {}), **kwargs)
elif fn is None:
levels[i+1].P = T_bridge
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct the "bridging" R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
elif symmetry == 'hermitian':
levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)
# run solver on candidate
solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
change_smoothers(solver, presmoother=prepostsmoother,
postsmoother=prepostsmoother)
x = solver.solve(np.zeros_like(x), x0=x,
tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters)
work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
candidate_iters*2
# update values on next level
levels[i+1].B = R[:, :-1].copy()
levels[i+1].T = T_bridge
# note that we only use the x from the second coarsest level
fn, kwargs = unpack_arg(prepostsmoother)
for lvl in reversed(levels[:-2]):
x = lvl.P * x
work[:] += lvl.A.nnz*candidate_iters*2
if fn == 'gauss_seidel':
# only relax at nonzeros, so as not to mess up any locally dropped
# candidates
indices = np.ravel(x).nonzero()[0]
gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(lvl.A))
elif fn == 'richardson':
polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
coefficients=[1.0/approximate_spectral_radius(lvl.A)])
elif fn == 'gmres':
x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# x will be dense again, so we have to drop locally again
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
**elim_kwargs)
return x.reshape(-1, 1)
示例13: adaptive_sa_solver
#.........这里部分代码省略.........
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Track work in terms of relaxation
work = np.zeros((1,))
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Develop initial candidate(s). Note that any predefined aggregation is
# preserved.
if initial_candidates is None:
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate,
prepostsmoother, smooth, strength, work)
# Normalize B
B = (1.0/norm(B, 'inf')) * B
num_candidates -= 1
else:
# Otherwise, use predefined candidates
B = initial_candidates
num_candidates -= B.shape[1]
# Generate Aggregation and Strength Operators (the brute force way)
sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth, strength=strength,
max_levels=max_levels,
max_coarse=max_coarse,
aggregate=aggregate,
coarse_solver=coarse_solver,
improve_candidates=None, keep=True,
**kwargs)
if len(sa.levels) > 1:
# Set strength-of-connection and aggregation
aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
for i in range(len(sa.levels) - 1)]
strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
for i in range(len(sa.levels) - 1)]
# Develop additional candidates
for i in range(num_candidates):
x = general_setup_stage(
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
示例14: cr
def cr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
'''Conjugate Residual algorithm
Solves the linear system Ax = b. Left preconditioning is supported.
The matrix A must be Hermitian symmetric (but not necessarily definite).
Parameters
----------
A : {array, matrix, sparse matrix, LinearOperator}
n x n, linear system to solve
b : {array, matrix}
right hand side, shape is (n,) or (n,1)
x0 : {array, matrix}
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : {array, matrix, sparse matrix, LinearOperator}
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The 2-norm of the preconditioned residual is used both for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cr import cr
>>> from pyamg.util.linalg import norm
>>> import numpy
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = numpy.ones((A.shape[0],))
>>> (x,flag) = cr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
'''
A, M, x, b, postprocess = make_system(A, M, x0, b, xtype=None)
n = len(b)
##
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cr')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
t = A.dtype.char
eps = numpy.finfo(numpy.float).eps
feps = numpy.finfo(numpy.single).eps
geps = numpy.finfo(numpy.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
numerically_zero = {0: feps*1e3, 1: eps*1e6,
2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
#.........这里部分代码省略.........
示例15: polynomial
def polynomial(A, x, b, coefficients, iterations=1):
"""Apply a polynomial smoother to the system Ax=b
Parameters
----------
A : sparse matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
coefficients : {array_like}
Coefficients of the polynomial. See Notes section for details.
iterations : int
Number of iterations to perform
Returns
-------
Nothing, x will be modified in place.
Notes
-----
The smoother has the form x[:] = x + p(A) (b - A*x) where p(A) is a
polynomial in A whose scalar coefficients are specified (in descending
order) by argument 'coefficients'.
- Richardson iteration p(A) = c_0:
polynomial_smoother(A, x, b, [c_0])
- Linear smoother p(A) = c_1*A + c_0:
polynomial_smoother(A, x, b, [c_1, c_0])
- Quadratic smoother p(A) = c_2*A^2 + c_1*A + c_0:
polynomial_smoother(A, x, b, [c_2, c_1, c_0])
Here, Horner's Rule is applied to avoid computing A^k directly.
For efficience, the method detects the case x = 0 one matrix-vector
product is avoided (since (b - A*x) is b).
Examples
--------
>>> ## The polynomial smoother is not currently used directly
>>> ## in PyAMG. It is only used by the chebyshev smoothing option,
>>> ## which automatically calculates the correct coefficients.
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> A = poisson((10,10), format='csr')
>>> b = numpy.ones((A.shape[0],1))
>>> sa = smoothed_aggregation_solver(A, B=numpy.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('chebyshev', {'degree':3, 'iterations':1}),
... postsmoother=('chebyshev', {'degree':3, 'iterations':1}))
>>> x0=numpy.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A,x,b = make_system(A, x, b, formats=None)
for i in range(iterations):
from pyamg.util.linalg import norm
if norm(x) == 0:
residual = b
else:
residual = (b - A*x)
h = coefficients[0]*residual
for c in coefficients[1:]:
h = c*residual + A*h
x += h