本文整理汇总了Golang中github.com/cpmech/gosl/mpi.Size函数的典型用法代码示例。如果您正苦于以下问题:Golang Size函数的具体用法?Golang Size怎么用?Golang Size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Size函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
io.PfYel("\nTest MPI 03\n")
}
if mpi.Size() != 3 {
chk.Panic("this test needs 3 processors")
}
x := []int{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
n := len(x)
id, sz := mpi.Rank(), mpi.Size()
start, endp1 := (id*n)/sz, ((id+1)*n)/sz
for i := start; i < endp1; i++ {
x[i] = i
}
//io.Pforan("x = %v\n", x)
// IntAllReduceMax
w := make([]int, n)
mpi.IntAllReduceMax(x, w)
var tst testing.T
chk.Ints(&tst, fmt.Sprintf("IntAllReduceMax: x @ proc # %d", id), x, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
//io.Pfred("x = %v\n", x)
}
示例2: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("TestJacobian 02b (MPI)")
}
if mpi.Size() > 6 {
io.Pf("this tests works with 6 or less MPI processors\n")
return
}
ffcn := func(fx, x []float64) error {
fx[0] = 2.0*x[0] - x[1] + sin(x[2]) - cos(x[3]) - x[5]*x[5] - 1.0 // 0
fx[1] = -x[0] + 2.0*x[1] + cos(x[2]) - sin(x[3]) + x[5] - 1.0 // 1
fx[2] = x[0] + 3.0*x[1] + sin(x[3]) - cos(x[4]) - x[5]*x[5] - 1.0 // 2
fx[3] = 2.0*x[0] + 4.0*x[1] + cos(x[3]) - cos(x[4]) + x[5] - 1.0 // 3
fx[4] = x[0] + 5.0*x[1] - sin(x[2]) + sin(x[4]) - x[5]*x[5]*x[5] - 1.0 // 4
fx[5] = x[0] + 6.0*x[1] - cos(x[2]) + cos(x[4]) + x[5] - 1.0 // 5
return nil
}
Jfcn := func(dfdx *la.Triplet, x []float64) error {
dfdx.Start()
J := [][]float64{
{2.0, -1.0, cos(x[2]), sin(x[3]), 0.0, -2.0 * x[5]},
{-1.0, 2.0, -sin(x[2]), -cos(x[3]), 0.0, 1.0},
{1.0, 3.0, 0.0, cos(x[3]), sin(x[4]), -2.0 * x[5]},
{2.0, 4.0, 0.0, -sin(x[3]), sin(x[4]), 1.0},
{1.0, 5.0, -cos(x[2]), 0.0, cos(x[4]), -3.0 * x[5] * x[5]},
{1.0, 6.0, sin(x[2]), 0.0, -sin(x[4]), 1.0},
}
id, sz, ndim := mpi.Rank(), mpi.Size(), 6
start, endp1 := (id*ndim)/sz, ((id+1)*ndim)/sz
for col := 0; col < 6; col++ {
for row := start; row < endp1; row++ {
dfdx.Put(row, col, J[row][col])
}
}
//la.PrintMat(fmt.Sprintf("J @ %d",mpi.Rank()), dfdx.ToMatrix(nil).ToDense(), "%12.6f", false)
return nil
}
x := []float64{5.0, 5.0, pi, pi, pi, 5.0}
var tst testing.T
num.CompareJac(&tst, ffcn, Jfcn, x, 1e-6, true)
}
示例3: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
myrank := mpi.Rank()
if myrank == 0 {
chk.PrintTitle("Test MUMPS Sol 05")
}
ndim := 10
id, sz := mpi.Rank(), mpi.Size()
start, endp1 := (id*ndim)/sz, ((id+1)*ndim)/sz
if mpi.Size() > ndim {
chk.Panic("the number of processors must be smaller than or equal to %d", ndim)
}
n := 10
b := make([]complex128, n)
x_correct := make([]complex128, n)
// Let exact solution = 1 + 0.5i
for i := 0; i < ndim; i++ {
x_correct[i] = complex(float64(i+1), float64(i+1)/10.0)
}
var t la.TripletC
t.Init(ndim, ndim, ndim, true)
// assemble a and b
for i := start; i < endp1; i++ {
// Some very fake diagonals. Should take exactly 20 GMRES steps
ar := 10.0 + float64(i)/(float64(ndim)/10.0)
ac := 10.0 - float64(i)/(float64(ndim)/10.0)
t.Put(i, i, ar, ac)
// Generate RHS to match exact solution
b[i] = complex(ar*real(x_correct[i])-ac*imag(x_correct[i]),
ar*imag(x_correct[i])+ac*real(x_correct[i]))
}
sum_b_to_root := true
la.RunMumpsTestC(&t, 1e-14, b, x_correct, sum_b_to_root)
}
示例4: init_mpi
func (o *Solver) init_mpi() {
if mpi.IsOn() {
o.root = (mpi.Rank() == 0)
if mpi.Size() > 1 {
o.Distr = true
}
}
}
示例5: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
myrank := mpi.Rank()
if myrank == 0 {
chk.PrintTitle("Test MUMPS Sol 01a")
}
var t la.Triplet
switch mpi.Size() {
case 1:
t.Init(5, 5, 13)
t.Put(0, 0, 1.0)
t.Put(0, 0, 1.0)
t.Put(1, 0, 3.0)
t.Put(0, 1, 3.0)
t.Put(2, 1, -1.0)
t.Put(4, 1, 4.0)
t.Put(1, 2, 4.0)
t.Put(2, 2, -3.0)
t.Put(3, 2, 1.0)
t.Put(4, 2, 2.0)
t.Put(2, 3, 2.0)
t.Put(1, 4, 6.0)
t.Put(4, 4, 1.0)
case 2:
if myrank == 0 {
t.Init(5, 5, 6)
t.Put(0, 0, 1.0)
t.Put(0, 0, 1.0)
t.Put(1, 0, 3.0)
t.Put(0, 1, 3.0)
t.Put(2, 1, -1.0)
t.Put(4, 1, 4.0)
} else {
t.Init(5, 5, 7)
t.Put(1, 2, 4.0)
t.Put(2, 2, -3.0)
t.Put(3, 2, 1.0)
t.Put(4, 2, 2.0)
t.Put(2, 3, 2.0)
t.Put(1, 4, 6.0)
t.Put(4, 4, 1.0)
}
default:
chk.Panic("this test needs 1 or 2 procs")
}
b := []float64{8.0, 45.0, -3.0, 3.0, 19.0}
x_correct := []float64{1, 2, 3, 4, 5}
sum_b_to_root := false
la.RunMumpsTestR(&t, 1e-14, b, x_correct, sum_b_to_root)
}
示例6: PostProcess
// PostProcess performs a post-processing of the just read json file
func (o *LinSolData) PostProcess() {
if mpi.IsOn() {
if mpi.Size() > 1 {
o.Name = "mumps"
}
} else {
o.Name = "umfpack"
}
}
示例7: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
myrank := mpi.Rank()
if myrank == 0 {
chk.PrintTitle("Test MUMPS Sol 04")
}
ndim := 10
id, sz := mpi.Rank(), mpi.Size()
start, endp1 := (id*ndim)/sz, ((id+1)*ndim)/sz
if mpi.Size() > ndim {
chk.Panic("the number of processors must be smaller than or equal to %d", ndim)
}
b := make([]complex128, ndim)
var t la.TripletC
t.Init(ndim, ndim, ndim*ndim, true)
for i := start; i < endp1; i++ {
j := i
if i > 0 {
j = i - 1
}
for ; j < 10; j++ {
val := 10.0 - float64(j)
if i > j {
val -= 1.0
}
t.Put(i, j, val, 0)
}
b[i] = complex(float64(i+1), 0.0)
}
x_correct := []complex128{-1, 8, -65, 454, -2725, 13624, -54497, 163490, -326981, 326991}
sum_b_to_root := true
la.RunMumpsTestC(&t, 1e-4, b, x_correct, sum_b_to_root)
}
示例8: Start
// Start initialises 'global' and starts logging
func Start(simfilepath string, erasefiles, verbose bool) (startisok bool) {
// multiprocessing data
Global.Rank = 0
Global.Nproc = 1
Global.Root = true
Global.Distr = false
if mpi.IsOn() {
Global.Rank = mpi.Rank()
Global.Nproc = mpi.Size()
Global.Root = Global.Rank == 0
Global.Distr = Global.Nproc > 1
}
Global.Verbose = verbose
if !Global.Root {
Global.Verbose = false
}
Global.WspcStop = make([]int, Global.Nproc)
Global.WspcInum = make([]int, Global.Nproc)
// simulation and convenience variables
dir := filepath.Dir(simfilepath)
fn := filepath.Base(simfilepath)
Global.Sim = inp.ReadSim(dir, fn, Global.LogPrefix, erasefiles)
LogErrCond(Global.Sim == nil, "ReadSim failed\n")
if Stop() {
return
}
Global.Ndim = Global.Sim.Ndim
Global.Dirout = Global.Sim.Data.DirOut
Global.Fnkey = Global.Sim.Data.FnameKey
Global.Enc = Global.Sim.Data.Encoder
Global.Stat = Global.Sim.Data.Stat
Global.LogBcs = Global.Sim.Data.LogBcs
Global.Debug = Global.Sim.Data.Debug
// fix show residual flag
if !Global.Root {
Global.Sim.Data.ShowR = false
}
// auxiliar structures
Global.DynCoefs = new(DynCoefs)
if !Global.DynCoefs.Init(&Global.Sim.Solver) {
return
}
Global.HydroSt = new(HydroStatic)
Global.HydroSt.Init()
// success
return true
}
示例9: Jacobian
/* Jacobian
========
Calculates (with N=n-1):
df0dx0, df0dx1, df0dx2, ... df0dxN
df1dx0, df1dx1, df1dx2, ... df1dxN
. . . . . . . . . . . . .
dfNdx0, dfNdx1, dfNdx2, ... dfNdxN
INPUT:
ffcn : f(x) function
x : station where dfdx has to be calculated
fx : f @ x
w : workspace with size == n == len(x)
RETURNS:
J : dfdx @ x [must be pre-allocated] */
func Jacobian(J *la.Triplet, ffcn Cb_f, x, fx, w []float64, distr bool) (err error) {
ndim := len(x)
start, endp1 := 0, ndim
if distr {
id, sz := mpi.Rank(), mpi.Size()
start, endp1 = (id*ndim)/sz, ((id+1)*ndim)/sz
if J.Max() == 0 {
J.Init(ndim, ndim, (endp1-start)*ndim)
}
} else {
if J.Max() == 0 {
J.Init(ndim, ndim, ndim*ndim)
}
}
J.Start()
// NOTE: cannot split calculation by columns unless the f function is
// independently calculated by each MPI processor.
// Otherwise, the AllReduce in f calculation would
// join pieces of f from different processors calculated for
// different x values (δx[col] from different columns).
/*
for col := start; col < endp1; col++ {
xsafe := x[col]
delta := math.Sqrt(EPS * max(CTE1, math.Abs(xsafe)))
x[col] = xsafe + delta
ffcn(w, x) // fnew
io.Pforan("x = %v, f = %v\n", x, w)
for row := 0; row < ndim; row++ {
J.Put(row, col, (w[row]-fx[row])/delta)
}
x[col] = xsafe
}
*/
var df float64
for col := 0; col < ndim; col++ {
xsafe := x[col]
delta := math.Sqrt(EPS * max(CTE1, math.Abs(xsafe)))
x[col] = xsafe + delta
err = ffcn(w, x) // w := f(x+δx[col])
if err != nil {
return
}
for row := start; row < endp1; row++ {
df = w[row] - fx[row]
//if math.Abs(df) > EPS {
J.Put(row, col, df/delta)
//}
}
x[col] = xsafe
}
return
}
示例10: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("Test SumToRoot 01")
}
M := [][]float64{
{1000, 1000, 1000, 1011, 1021, 1000},
{1000, 1000, 1000, 1012, 1022, 1000},
{1000, 1000, 1000, 1013, 1023, 1000},
{1011, 1012, 1013, 1000, 1000, 1000},
{1021, 1022, 1023, 1000, 1000, 1000},
{1000, 1000, 1000, 1000, 1000, 1000},
}
id, sz, m := mpi.Rank(), mpi.Size(), len(M)
start, endp1 := (id*m)/sz, ((id+1)*m)/sz
if sz > 6 {
chk.Panic("this test works with at most 6 processors")
}
var J la.Triplet
J.Init(m, m, m*m)
for i := start; i < endp1; i++ {
for j := 0; j < m; j++ {
J.Put(i, j, M[i][j])
}
}
la.PrintMat(fmt.Sprintf("J @ proc # %d", id), J.ToMatrix(nil).ToDense(), "%10.1f", false)
la.SpTriSumToRoot(&J)
var tst testing.T
if mpi.Rank() == 0 {
chk.Matrix(&tst, "J @ proc 0", 1.0e-17, J.ToMatrix(nil).ToDense(), [][]float64{
{1000, 1000, 1000, 1011, 1021, 1000},
{1000, 1000, 1000, 1012, 1022, 1000},
{1000, 1000, 1000, 1013, 1023, 1000},
{1011, 1012, 1013, 1000, 1000, 1000},
{1021, 1022, 1023, 1000, 1000, 1000},
{1000, 1000, 1000, 1000, 1000, 1000},
})
}
}
示例11: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("TestJacobian 01b (MPI)")
}
if mpi.Size() != 2 {
io.Pf("this tests needs MPI 2 processors\n")
return
}
ffcn := func(fx, x []float64) error {
fx[0] = math.Pow(x[0], 3.0) + x[1] - 1.0
fx[1] = -x[0] + math.Pow(x[1], 3.0) + 1.0
return nil
}
Jfcn := func(dfdx *la.Triplet, x []float64) error {
dfdx.Start()
if false {
if mpi.Rank() == 0 {
dfdx.Put(0, 0, 3.0*x[0]*x[0])
dfdx.Put(1, 0, -1.0)
} else {
dfdx.Put(0, 1, 1.0)
dfdx.Put(1, 1, 3.0*x[1]*x[1])
}
} else {
if mpi.Rank() == 0 {
dfdx.Put(0, 0, 3.0*x[0]*x[0])
dfdx.Put(0, 1, 1.0)
} else {
dfdx.Put(1, 0, -1.0)
dfdx.Put(1, 1, 3.0*x[1]*x[1])
}
}
return nil
}
x := []float64{0.5, 0.5}
var tst testing.T
num.CompareJacMpi(&tst, ffcn, Jfcn, x, 1e-8, true)
}
示例12: SpTriSumToRoot
// SpTriSumToRoot join (MPI) parallel triplets to root (Rank == 0) processor.
// NOTE: J in root is also joined into Jroot
func SpTriSumToRoot(J *Triplet) {
if mpi.Rank() == 0 {
for proc := 1; proc < mpi.Size(); proc++ {
nnz := mpi.SingleIntRecv(proc)
irec := make([]int, nnz)
drec := make([]float64, nnz)
mpi.IntRecv(irec, proc)
J.i = append(J.i, irec...)
mpi.IntRecv(irec, proc)
J.j = append(J.j, irec...)
mpi.DblRecv(drec, proc)
J.x = append(J.x, drec...)
}
J.pos = len(J.x)
J.max = J.pos
} else {
mpi.SingleIntSend(J.max, 0)
mpi.IntSend(J.i, 0)
mpi.IntSend(J.j, 0)
mpi.DblSend(J.x, 0)
}
}
示例13: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("ode02: Hairer-Wanner VII-p5 Eq.(1.5) Van der Pol's Equation")
}
if mpi.Size() != 2 {
chk.Panic(">> error: this test requires 2 MPI processors\n")
return
}
eps := 1.0e-6
w := make([]float64, 2) // workspace
fcn := func(f []float64, dx, x float64, y []float64, args ...interface{}) error {
f[0], f[1] = 0, 0
switch mpi.Rank() {
case 0:
f[0] = y[1]
case 1:
f[1] = ((1.0-y[0]*y[0])*y[1] - y[0]) / eps
}
// join all f
mpi.AllReduceSum(f, w)
return nil
}
jac := func(dfdy *la.Triplet, dx, x float64, y []float64, args ...interface{}) error {
if dfdy.Max() == 0 {
dfdy.Init(2, 2, 4)
}
dfdy.Start()
if false { // per column
switch mpi.Rank() {
case 0:
dfdy.Put(0, 0, 0.0)
dfdy.Put(1, 0, (-2.0*y[0]*y[1]-1.0)/eps)
case 1:
dfdy.Put(0, 1, 1.0)
dfdy.Put(1, 1, (1.0-y[0]*y[0])/eps)
}
} else { // per row
switch mpi.Rank() {
case 0:
dfdy.Put(0, 0, 0.0)
dfdy.Put(0, 1, 1.0)
case 1:
dfdy.Put(1, 0, (-2.0*y[0]*y[1]-1.0)/eps)
dfdy.Put(1, 1, (1.0-y[0]*y[0])/eps)
}
}
return nil
}
// method and flags
silent := false
fixstp := false
//method := "Dopri5"
method := "Radau5"
numjac := false
xa, xb := 0.0, 2.0
ya := []float64{2.0, -0.6}
ndim := len(ya)
// structure to hold numerical results
res := ode.Results{Method: method}
// allocate ODE object
var o ode.Solver
o.Distr = true
if numjac {
o.Init(method, ndim, fcn, nil, nil, ode.SimpleOutput, silent)
} else {
o.Init(method, ndim, fcn, jac, nil, ode.SimpleOutput, silent)
}
// tolerances and initial step size
rtol := 1e-4
atol := rtol
o.IniH = 1.0e-4
o.SetTol(atol, rtol)
//o.NmaxSS = 2
// solve problem
y := make([]float64, ndim)
copy(y, ya)
t0 := time.Now()
if fixstp {
o.Solve(y, xa, xb, 0.05, fixstp, &res)
} else {
o.Solve(y, xa, xb, xb-xa, fixstp, &res)
}
// plot
if mpi.Rank() == 0 {
io.Pfmag("elapsed time = %v\n", time.Now().Sub(t0))
plt.SetForEps(1.5, 400)
args := "'b-', marker='.', lw=1, ms=4, clip_on=0"
//.........这里部分代码省略.........
示例14: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("Test ODE 02b")
io.Pfcyan("Hairer-Wanner VII-p5 Eq.(1.5) Van der Pol's Equation (MPI)\n")
}
if mpi.Size() != 2 {
chk.Panic(">> error: this test requires 2 MPI processors\n")
return
}
eps := 1.0e-6
w := make([]float64, 2) // workspace
fcn := func(f []float64, x float64, y []float64, args ...interface{}) error {
f[0], f[1] = 0, 0
switch mpi.Rank() {
case 0:
f[0] = y[1]
case 1:
f[1] = ((1.0-y[0]*y[0])*y[1] - y[0]) / eps
}
// join all f
mpi.AllReduceSum(f, w)
return nil
}
jac := func(dfdy *la.Triplet, x float64, y []float64, args ...interface{}) error {
if dfdy.Max() == 0 {
dfdy.Init(2, 2, 4)
}
dfdy.Start()
if false { // per column
switch mpi.Rank() {
case 0:
dfdy.Put(0, 0, 0.0)
dfdy.Put(1, 0, (-2.0*y[0]*y[1]-1.0)/eps)
case 1:
dfdy.Put(0, 1, 1.0)
dfdy.Put(1, 1, (1.0-y[0]*y[0])/eps)
}
} else { // per row
switch mpi.Rank() {
case 0:
dfdy.Put(0, 0, 0.0)
dfdy.Put(0, 1, 1.0)
case 1:
dfdy.Put(1, 0, (-2.0*y[0]*y[1]-1.0)/eps)
dfdy.Put(1, 1, (1.0-y[0]*y[0])/eps)
}
}
return nil
}
// data
silent := false
fixstp := false
//method := "Dopri5"
method := "Radau5"
xa, xb := 0.0, 2.0
ya := []float64{2.0, -0.6}
ndim := len(ya)
// output
var b bytes.Buffer
out := func(first bool, dx, x float64, y []float64, args ...interface{}) error {
if mpi.Rank() == 0 {
if first {
fmt.Fprintf(&b, "%23s %23s %23s %23s\n", "dx", "x", "y0", "y1")
}
fmt.Fprintf(&b, "%23.15E %23.15E %23.15E %23.15E\n", dx, x, y[0], y[1])
}
return nil
}
defer func() {
if mpi.Rank() == 0 {
extra := "d2 = Read('data/vdpol_radau5_for.dat')\n" +
"subplot(3,1,1)\n" +
"plot(d2['x'],d2['y0'],'k+',label='res',ms=10)\n" +
"subplot(3,1,2)\n" +
"plot(d2['x'],d2['y1'],'k+',label='res',ms=10)\n"
ode.Plot("/tmp/gosl", "vdpolB", method, &b, []int{0, 1}, ndim, nil, xa, xb, true, false, extra)
}
}()
// one run
var o ode.ODE
o.Distr = true
//numjac := true
numjac := false
if numjac {
o.Init(method, ndim, fcn, nil, nil, out, silent)
} else {
o.Init(method, ndim, fcn, jac, nil, out, silent)
}
// tolerances and initial step size
//.........这里部分代码省略.........
示例15: main
func main() {
mpi.Start(false)
defer func() {
mpi.Stop(false)
}()
if mpi.Rank() == 0 {
chk.PrintTitle("Test ODE 04b (MPI)")
io.Pfcyan("Hairer-Wanner VII-p376 Transistor Amplifier (MPI)\n")
io.Pfcyan("(from E Hairer's website, not the system in the book)\n")
}
if mpi.Size() != 3 {
chk.Panic(">> error: this test requires 3 MPI processors\n")
return
}
// RIGHT-HAND SIDE OF THE AMPLIFIER PROBLEM
w := make([]float64, 8) // workspace
fcn := func(f []float64, x float64, y []float64, args ...interface{}) error {
d := args[0].(*HWtransData)
UET := d.UE * math.Sin(d.W*x)
FAC1 := d.BETA * (math.Exp((y[3]-y[2])/d.UF) - 1.0)
FAC2 := d.BETA * (math.Exp((y[6]-y[5])/d.UF) - 1.0)
la.VecFill(f, 0)
switch mpi.Rank() {
case 0:
f[0] = y[0] / d.R9
case 1:
f[1] = (y[1]-d.UB)/d.R8 + d.ALPHA*FAC1
f[2] = y[2]/d.R7 - FAC1
case 2:
f[3] = y[3]/d.R5 + (y[3]-d.UB)/d.R6 + (1.0-d.ALPHA)*FAC1
f[4] = (y[4]-d.UB)/d.R4 + d.ALPHA*FAC2
f[5] = y[5]/d.R3 - FAC2
f[6] = y[6]/d.R1 + (y[6]-d.UB)/d.R2 + (1.0-d.ALPHA)*FAC2
f[7] = (y[7] - UET) / d.R0
}
mpi.AllReduceSum(f, w)
return nil
}
// JACOBIAN OF THE AMPLIFIER PROBLEM
jac := func(dfdy *la.Triplet, x float64, y []float64, args ...interface{}) error {
d := args[0].(*HWtransData)
FAC14 := d.BETA * math.Exp((y[3]-y[2])/d.UF) / d.UF
FAC27 := d.BETA * math.Exp((y[6]-y[5])/d.UF) / d.UF
if dfdy.Max() == 0 {
dfdy.Init(8, 8, 16)
}
NU := 2
dfdy.Start()
switch mpi.Rank() {
case 0:
dfdy.Put(2+0-NU, 0, 1.0/d.R9)
dfdy.Put(2+1-NU, 1, 1.0/d.R8)
dfdy.Put(1+2-NU, 2, -d.ALPHA*FAC14)
dfdy.Put(0+3-NU, 3, d.ALPHA*FAC14)
dfdy.Put(2+2-NU, 2, 1.0/d.R7+FAC14)
case 1:
dfdy.Put(1+3-NU, 3, -FAC14)
dfdy.Put(2+3-NU, 3, 1.0/d.R5+1.0/d.R6+(1.0-d.ALPHA)*FAC14)
dfdy.Put(3+2-NU, 2, -(1.0-d.ALPHA)*FAC14)
dfdy.Put(2+4-NU, 4, 1.0/d.R4)
dfdy.Put(1+5-NU, 5, -d.ALPHA*FAC27)
case 2:
dfdy.Put(0+6-NU, 6, d.ALPHA*FAC27)
dfdy.Put(2+5-NU, 5, 1.0/d.R3+FAC27)
dfdy.Put(1+6-NU, 6, -FAC27)
dfdy.Put(2+6-NU, 6, 1.0/d.R1+1.0/d.R2+(1.0-d.ALPHA)*FAC27)
dfdy.Put(3+5-NU, 5, -(1.0-d.ALPHA)*FAC27)
dfdy.Put(2+7-NU, 7, 1.0/d.R0)
}
return nil
}
// MATRIX "M"
c1, c2, c3, c4, c5 := 1.0e-6, 2.0e-6, 3.0e-6, 4.0e-6, 5.0e-6
var M la.Triplet
M.Init(8, 8, 14)
M.Start()
NU := 1
switch mpi.Rank() {
case 0:
M.Put(1+0-NU, 0, -c5)
M.Put(0+1-NU, 1, c5)
M.Put(2+0-NU, 0, c5)
M.Put(1+1-NU, 1, -c5)
M.Put(1+2-NU, 2, -c4)
M.Put(1+3-NU, 3, -c3)
case 1:
M.Put(0+4-NU, 4, c3)
M.Put(2+3-NU, 3, c3)
M.Put(1+4-NU, 4, -c3)
case 2:
M.Put(1+5-NU, 5, -c2)
M.Put(1+6-NU, 6, -c1)
M.Put(0+7-NU, 7, c1)
M.Put(2+6-NU, 6, c1)
M.Put(1+7-NU, 7, -c1)
//.........这里部分代码省略.........