本文整理匯總了Golang中cmd/internal/gc.Node.Xoffset方法的典型用法代碼示例。如果您正苦於以下問題:Golang Node.Xoffset方法的具體用法?Golang Node.Xoffset怎麽用?Golang Node.Xoffset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cmd/internal/gc.Node
的用法示例。
在下文中一共展示了Node.Xoffset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cgen_aret
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = x86.REG_SP
nod1.Addable = 1
nod1.Xoffset = fp.Width
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(x86.ALEAL, &nod1, &nod2)
gins(x86.AMOVL, &nod2, res)
regfree(&nod2)
} else {
gins(x86.ALEAL, &nod1, res)
}
}
示例2: split64
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
if nsclean >= len(sclean) {
gc.Fatal("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
示例3: dotaddable
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
if n.Op != gc.ODOT {
return false
}
var oary [10]int64
var nn *gc.Node
o := gc.Dotoffset(n, oary[:], &nn)
if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
return true
}
return false
}
示例4: fixlargeoffset
func fixlargeoffset(n *gc.Node) {
if n == nil {
return
}
if n.Op != gc.OINDREG {
return
}
if -4096 <= n.Xoffset && n.Xoffset < 4096 {
return
}
a := gc.Node(*n)
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
a.Xoffset = 0
gc.Cgen_checknil(&a)
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
n.Xoffset = 0
}
示例5: cgen_callret
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = x86.REG_SP
nod.Addable = 1
nod.Xoffset = fp.Width
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
示例6: bgen
//.........這裏部分代碼省略.........
p2 := gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
ll := n.Ninit // avoid re-genning ninit
n.Ninit = nil
bgen(n, true, -likely, p2)
n.Ninit = ll
gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
gc.Patch(p2, gc.Pc)
return
}
a = gc.Brcom(a)
true_ = !true_
}
// make simplest on right
if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
a = gc.Brrev(a)
r := nl
nl = nr
nr = r
}
if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
break
}
a = optoas(a, gc.Types[gc.Tptr])
var n1 gc.Node
igen(nl, &n1, nil)
n1.Xoffset += int64(gc.Array_array)
n1.Type = gc.Types[gc.Tptr]
var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], &n1)
gmove(&n1, &n2)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
regfree(&n2)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
regfree(&n1)
break
}
if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
break
}
a = optoas(a, gc.Types[gc.Tptr])
var n1 gc.Node
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], &n1)
gmove(&n1, &n2)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
regfree(&n2)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
示例7: componentgen
//.........這裏部分代碼省略.........
// When zeroing, prepare a register containing zero.
var tmp gc.Node
gc.Nodconst(&tmp, nl.Type, 0)
regalloc(&nodr, gc.Types[gc.TUINT], nil)
gmove(&tmp, &nodr)
freer = 1
}
// nl and nr are 'cadable' which basically means they are names (variables) now.
// If they are the same variable, don't generate any code, because the
// VARDEF we generate will mark the old value as dead incorrectly.
// (And also the assignments are useless.)
if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
goto yes
}
switch nl.Type.Etype {
// componentgen for arrays.
case gc.TARRAY:
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
t := nl.Type
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
if nr == nil {
gc.Clearslim(&nodl)
} else {
gmove(&nodr, &nodl)
}
nodl.Xoffset += t.Type.Width
nodr.Xoffset += t.Type.Width
}
goto yes
}
// componentgen for slices.
nodl.Xoffset += int64(gc.Array_array)
nodl.Type = gc.Ptrto(nl.Type.Type)
if nr != nil {
nodr.Xoffset += int64(gc.Array_array)
nodr.Type = nodl.Type
}
gmove(&nodr, &nodl)
nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
if nr != nil {
nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
nodr.Type = nodl.Type
}
gmove(&nodr, &nodl)
nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
if nr != nil {
示例8: igen
/*
* generate:
* newreg = &n;
* res = newreg
*
* on exit, a has been changed to be *newreg.
* caller must regfree(a).
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
switch n.Op {
case gc.ONAME:
if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
break
}
*a = *n
return
// Increase the refcount of the register so that igen's caller
// has to call regfree.
case gc.OINDREG:
if n.Val.U.Reg != ppc64.REGSP {
reg[n.Val.U.Reg]++
}
*a = *n
return
case gc.ODOT:
igen(n.Left, a, res)
a.Xoffset += n.Xoffset
a.Type = n.Type
fixlargeoffset(a)
return
case gc.ODOTPTR:
cgenr(n.Left, a, res)
gc.Cgen_checknil(a)
a.Op = gc.OINDREG
a.Xoffset += n.Xoffset
a.Type = n.Type
fixlargeoffset(a)
return
case gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
switch n.Op {
case gc.OCALLFUNC:
cgen_call(n, 0)
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
case gc.OCALLINTER:
cgen_callinter(n, nil, 0)
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = ppc64.REGSP
a.Addable = 1
a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
a.Type = n.Type
return
// Index of fixed-size array by constant can
// put the offset in the addressing.
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !gc.Isptr[n.Left.Type.Etype] {
igen(n.Left, a, res)
} else {
var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
gmove(&n1, a)
regfree(&n1)
a.Op = gc.OINDREG
}
// Compute &a[i] as &a + i*width.
a.Type = n.Type
a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
fixlargeoffset(a)
return
}
}
}
//.........這裏部分代碼省略.........
示例9: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........
示例10: stackcopy
func stackcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Val.U.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Val.U.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
p.To.Offset = 14 * (128 - q)
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
nodsi.Xoffset += 8
noddi.Xoffset += 8
q--
}
} else {
for q > 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
q--
}
}
//.........這裏部分代碼省略.........
示例11: ginscall
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an x86 NOP that we will have the right line number.
// x86 NOP 0x90 is really XCHG AX, AX; use that description
// because the NOP pseudo-instruction will be removed by
// the linker.
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, ®, ®)
}
p := gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
gmove(f, ®)
reg.Op = gc.OINDREG
gmove(®, &r1)
reg.Op = gc.OREGISTER
gins(obj.ACALL, ®, &r1)
case 3: // normal call of c function pointer
gins(obj.ACALL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var stk gc.Node
stk.Op = gc.OINDREG
stk.Val.U.Reg = x86.REG_SP
stk.Xoffset = 0
// size of arguments at 0(SP)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(x86.AMOVL, &con, &stk)
// FuncVal* at 4(SP)
stk.Xoffset = int64(gc.Widthptr)
gins(x86.AMOVL, f, &stk)
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
gins(x86.ATESTL, ®, ®)
p := gc.Gbranch(x86.AJEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
示例12: cgen_callinter
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
// Release res register during genlist and cgen,
// which might have their own function calls.
r := -1
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
r = int(res.Val.U.Reg)
reg[r]--
}
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // args
if r >= 0 {
reg[r]++
}
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], res)
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
nodo.Op = gc.OINDREG
agen(i, &nodr) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodo.Xoffset += int64(gc.Widthptr)
cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
nodo.Xoffset -= int64(gc.Widthptr)
cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
gc.Cgen_checknil(&nodr) // in case offset is huge
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
nodr.Op = gc.OINDREG
proc = 3
} else {
// go/defer. generate go func value.
p := gins(arm.AMOVW, &nodo, &nodr)
p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
示例13: agenr
/*
* allocate a register (reusing res if possible) and generate
* a = &n
* The caller must call regfree(a).
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("\nagenr-n", n)
}
nl := n.Left
nr := n.Right
switch n.Op {
case gc.ODOT,
gc.ODOTPTR,
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
regfree(&n1)
case gc.OIND:
cgenr(n.Left, a, res)
gc.Cgen_checknil(a)
case gc.OINDEX:
freelen := 0
w := uint64(n.Type.Width)
// Generate the non-addressable child first.
var n3 gc.Node
var nlen gc.Node
var tmp gc.Node
var n1 gc.Node
if nr.Addable != 0 {
goto irad
}
if nl.Addable != 0 {
cgenr(nr, &n1, nil)
if !gc.Isconst(nl, gc.CTSTR) {
if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
igen(nl, &nlen, res)
freelen = 1
nlen.Type = gc.Types[gc.Tptr]
nlen.Xoffset += int64(gc.Array_array)
regalloc(&n3, gc.Types[gc.Tptr], res)
gmove(&nlen, &n3)
nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
}
}
goto index
}
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
irad:
if !gc.Isconst(nl, gc.CTSTR) {
if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
if nl.Addable == 0 {
// igen will need an addressable node.
var tmp2 gc.Node
gc.Tempname(&tmp2, nl.Type)
cgen(nl, &tmp2)
nl = &tmp2
}
igen(nl, &nlen, res)
freelen = 1
nlen.Type = gc.Types[gc.Tptr]
nlen.Xoffset += int64(gc.Array_array)
regalloc(&n3, gc.Types[gc.Tptr], res)
gmove(&nlen, &n3)
nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
}
}
if !gc.Isconst(nr, gc.CTINT) {
cgenr(nr, &n1, nil)
}
goto index
// &a is in &n3 (allocated in res)
// i is in &n1 (if not constant)
// len(a) is in nlen (if needed)
//.........這裏部分代碼省略.........
示例14: sgen
//.........這裏部分代碼省略.........
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
p.To.Offset = 14 * (128 - q)
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
nodsi.Xoffset += 8
noddi.Xoffset += 8
q--
}
} else {
for q > 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
q--
}
}
// copy the remaining c bytes
if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
for c > 0 {
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
c--
}
} else if w < 8 || c <= 4 {
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
cx.Type = gc.Types[gc.TINT32]
nodsi.Type = gc.Types[gc.TINT32]
noddi.Type = gc.Types[gc.TINT32]
if c > 4 {
nodsi.Xoffset = 0
noddi.Xoffset = 0
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
}
nodsi.Xoffset = c - 4
noddi.Xoffset = c - 4
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
} else {
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
cx.Type = gc.Types[gc.TINT64]
nodsi.Type = gc.Types[gc.TINT64]
noddi.Type = gc.Types[gc.TINT64]
nodsi.Xoffset = c - 8
noddi.Xoffset = c - 8
gmove(&nodsi, &cx)
gmove(&cx, &noddi)
}
}
restx(&cx, &oldcx)
}
示例15: cgen
//.........這裏部分代碼省略.........
case gc.OLEN:
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
n2.Type = gc.Types[gc.Simtype[gc.TINT]]
gmove(&n2, &n1)
gc.Patch(p1, gc.Pc)
gmove(&n1, res)
regfree(&n1)
break
}
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_nel)
gmove(&n1, res)
regfree(&n1)
break
}
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
n2.Xoffset = int64(gc.Widthint)
n2.Type = gc.Types[gc.Simtype[gc.TINT]]
gmove(&n2, &n1)
gc.Patch(p1, gc.Pc)
gmove(&n1, res)
regfree(&n1)
break