本文整理匯總了Golang中cmd/internal/gc.Dump函數的典型用法代碼示例。如果您正苦於以下問題:Golang Dump函數的具體用法?Golang Dump怎麽用?Golang Dump使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Dump函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cgenr
/*
* allocate a register (reusing res if possible) and generate
* a = n
* The caller must call regfree(a).
*/
func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("cgenr-n", n)
}
if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
if n.Addable != 0 {
regalloc(a, n.Type, res)
gmove(n, a)
return
}
switch n.Op {
case gc.ONAME,
gc.ODOT,
gc.ODOTPTR,
gc.OINDEX,
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
gmove(&n1, a)
regfree(&n1)
default:
regalloc(a, n.Type, res)
cgen(n, a)
}
}
示例2: bgen
/*
* branch gen
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
if n == nil {
n = gc.Nodbool(true)
}
if n.Ninit != nil {
gc.Genlist(n.Ninit)
}
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
return
}
}
et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
return
}
for n.Op == gc.OCONVNOP {
n = n.Left
if n.Ninit != nil {
gc.Genlist(n.Ninit)
}
}
nl := n.Left
var nr *gc.Node
if nl != nil && gc.Isfloat[nl.Type.Etype] {
bgen_float(n, bool2int(true_), likely, to)
return
}
switch n.Op {
default:
goto def
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
}
return
case gc.ONAME:
if n.Addable == 0 {
goto def
}
var n1 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), n, &n1)
a := x86.AJNE
if !true_ {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
return
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
p1 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, to)
gc.Patch(p2, gc.Pc)
} else {
bgen(n.Left, true_, likely, to)
bgen(n.Right, true_, likely, to)
}
return
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OGT,
gc.OLE,
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
return
}
fallthrough
//.........這裏部分代碼省略.........
示例3: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSL.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSL loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
// 1 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 1 * (128 - int64(q))
} else {
for q > 0 {
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
q--
}
}
for c > 0 {
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
c--
}
}
示例4: rawgins
/*
* generate one instruction:
* as f, t
*/
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
case obj.ACALL:
if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
pp := gc.Prog(as)
pp.From = p.From
pp.To.Type = obj.TYPE_REG
pp.To.Reg = ppc64.REG_CTR
p.As = ppc64.AMOVD
p.From = p.To
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_CTR
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
fmt.Printf("%v\n", pp)
}
return pp
}
// Bad things the front end has done to us. Crash to find call stack.
case ppc64.AAND, ppc64.AMULLD:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
case ppc64.ACMP, ppc64.ACMPU:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
w := int32(0)
switch as {
case ppc64.AMOVB,
ppc64.AMOVBU,
ppc64.AMOVBZ,
ppc64.AMOVBZU:
w = 1
case ppc64.AMOVH,
ppc64.AMOVHU,
ppc64.AMOVHZ,
ppc64.AMOVHZU:
w = 2
case ppc64.AMOVW,
ppc64.AMOVWU,
ppc64.AMOVWZ,
ppc64.AMOVWZU:
w = 4
case ppc64.AMOVD,
ppc64.AMOVDU:
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
break
}
w = 8
}
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
return p
}
示例5: agen
/*
* generate:
* res = &n;
* The generated code checks that the result is not nil.
*/
func agen(n *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
gc.Dump("agen-r", n)
}
if n == nil || n.Type == nil {
return
}
for n.Op == gc.OCONVNOP {
n = n.Left
}
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
// be terribly efficient. See issue 3670.
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Gvardef(&n1)
clearfat(&n1)
var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
var n3 gc.Node
n3.Op = gc.OADDR
n3.Left = &n1
gins(ppc64.AMOVD, &n3, &n2)
gmove(&n2, res)
regfree(&n2)
return
}
if n.Addable != 0 {
var n1 gc.Node
n1.Op = gc.OADDR
n1.Left = n
var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(ppc64.AMOVD, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
return
}
nl := n.Left
switch n.Op {
default:
gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
// TODO(minux): 5g has this: Release res so that it is available for cgen_call.
// Pick it up again after the call for OCALLMETH and OCALLFUNC.
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
cgen_aret(n, res)
case gc.OCALLINTER:
cgen_callinter(n, res, 0)
cgen_aret(n, res)
case gc.OCALLFUNC:
cgen_call(n, 0)
cgen_aret(n, res)
case gc.OSLICE,
gc.OSLICEARR,
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
agen(&n1, res)
case gc.OEFACE:
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
agen(&n1, res)
case gc.OINDEX:
var n1 gc.Node
agenr(n, &n1, res)
gmove(&n1, res)
regfree(&n1)
// should only get here with names in this func.
case gc.ONAME:
if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
//.........這裏部分代碼省略.........
示例6: igen
/*
* generate:
* newreg = &n;
* res = newreg
*
* on exit, a has been changed to be *newreg.
* caller must regfree(a).
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
switch n.Op {
case gc.ONAME:
if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
break
}
*a = *n
return
// Increase the refcount of the register so that igen's caller
// has to call regfree.
case gc.OINDREG:
if n.Val.U.Reg != ppc64.REGSP {
reg[n.Val.U.Reg]++
}
*a = *n
return
case gc.ODOT:
igen(n.Left, a, res)
a.Xoffset += n.Xoffset
a.Type = n.Type
fixlargeoffset(a)
return
case gc.ODOTPTR:
cgenr(n.Left, a, res)
gc.Cgen_checknil(a)
a.Op = gc.OINDREG
a.Xoffset += n.Xoffset
a.Type = n.Type
fixlargeoffset(a)
return
case gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
switch n.Op {
case gc.OCALLFUNC:
cgen_call(n, 0)
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
case gc.OCALLINTER:
cgen_callinter(n, nil, 0)
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = ppc64.REGSP
a.Addable = 1
a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
a.Type = n.Type
return
// Index of fixed-size array by constant can
// put the offset in the addressing.
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !gc.Isptr[n.Left.Type.Etype] {
igen(n.Left, a, res)
} else {
var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
gmove(&n1, a)
regfree(&n1)
a.Op = gc.OINDREG
}
// Compute &a[i] as &a + i*width.
a.Type = n.Type
a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
fixlargeoffset(a)
return
}
}
}
//.........這裏部分代碼省略.........
示例7: sgen
/*
* block copy:
* memmove(&ns, &n, w);
*/
func sgen(n *gc.Node, ns *gc.Node, w int64) {
var res *gc.Node = ns
if gc.Debug['g'] != 0 {
fmt.Printf("\nsgen w=%d\n", w)
gc.Dump("r", n)
gc.Dump("res", ns)
}
if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
gc.Fatal("sgen UINF")
}
if w < 0 {
gc.Fatal("sgen copy %d", w)
}
// If copying .args, that's all the results, so record definition sites
// for them for the liveness analysis.
if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
for l := gc.Curfn.Dcl; l != nil; l = l.Next {
if l.N.Class == gc.PPARAMOUT {
gc.Gvardef(l.N)
}
}
}
// Avoid taking the address for simple enough types.
//if(componentgen(n, ns))
// return;
if w == 0 {
// evaluate side effects only.
var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], nil)
agen(res, &dst)
agen(n, &dst)
regfree(&dst)
return
}
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
case 1:
op = ppc64.AMOVBU
case 2:
op = ppc64.AMOVHU
case 4:
op = ppc64.AMOVWZU // there is no lwau, only lwaux
case 8:
op = ppc64.AMOVDU
}
if w%int64(align) != 0 {
gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
}
c := int32(w / int64(align))
// offset on the stack
osrc := int32(stkof(n))
odst := int32(stkof(res))
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
// osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
var tmp gc.Node
gc.Tempname(&tmp, n.Type)
sgen(n, &tmp, w)
sgen(&tmp, res, w)
return
}
if osrc%int32(align) != 0 || odst%int32(align) != 0 {
gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
//.........這裏部分代碼省略.........
示例8: anyregalloc
//.........這裏部分代碼省略.........
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
w := int32(0)
switch as {
case ppc64.AMOVB,
ppc64.AMOVBU,
ppc64.AMOVBZ,
ppc64.AMOVBZU:
w = 1
case ppc64.AMOVH,
ppc64.AMOVHU,
ppc64.AMOVHZ,
ppc64.AMOVHZU:
w = 2
case ppc64.AMOVW,
ppc64.AMOVWU,
ppc64.AMOVWZ,
ppc64.AMOVWZU:
w = 4
case ppc64.AMOVD,
ppc64.AMOVDU:
if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR {
break
}
w = 8
}
if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
}
return p
}
func fixlargeoffset(n *gc.Node) {
if n == nil {
return
}
if n.Op != gc.OINDREG {
return
}
if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large
return
}
if n.Xoffset != int64(int32(n.Xoffset)) {
// TODO(minux): offset too large, move into R31 and add to R31 instead.
// this is used only in test/fixedbugs/issue6036.go.
gc.Fatal("offset too large: %v", gc.Nconv(n, 0))
a := gc.Node(*n)
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
a.Xoffset = 0
gc.Cgen_checknil(&a)
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
n.Xoffset = 0
}
}
示例9: agenr
/*
* generate:
* newreg = &n;
*
* caller must regfree(a).
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
}
nl := n.Left
nr := n.Right
switch n.Op {
case gc.ODOT,
gc.ODOTPTR,
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
regfree(&n1)
case gc.OIND:
cgenr(n.Left, a, res)
gc.Cgen_checknil(a)
case gc.OINDEX:
var p2 *obj.Prog // to be patched to panicindex.
w := uint32(n.Type.Width)
bounded := gc.Debug['B'] != 0 || n.Bounded
var n1 gc.Node
var n3 gc.Node
if nr.Addable != 0 {
var tmp gc.Node
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
}
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
if !gc.Isconst(nr, gc.CTINT) {
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
if !gc.Isconst(nr, gc.CTINT) {
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
nr = &tmp
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
}
// &a is in &n3 (allocated in res)
// i is in &n1 (if not constant)
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
var n2 gc.Node
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_nel)
var n4 gc.Node
regalloc(&n4, n1.Type, nil)
gmove(&n1, &n4)
gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
regfree(&n4)
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
//.........這裏部分代碼省略.........
示例10: cgen
/*
* generate:
* res = n;
* simplifies and calls gmove.
*/
func cgen(n *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
}
if n == nil || n.Type == nil {
return
}
if res == nil || res.Type == nil {
gc.Fatal("cgen: res nil")
}
switch n.Op {
case gc.OSLICE,
gc.OSLICEARR,
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
} else {
gc.Cgen_slice(n, res)
}
return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
} else {
gc.Cgen_eface(n, res)
}
return
}
for n.Op == gc.OCONVNOP {
n = n.Left
}
if n.Ullman >= gc.UINF {
if n.Op == gc.OINDREG {
gc.Fatal("cgen: this is going to misscompile")
}
if res.Ullman >= gc.UINF {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
return
}
}
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
return
}
// update addressability for string, slice
// can't do in walk because n->left->addable
// changes if n->left is an escaping local variable.
switch n.Op {
case gc.OSPTR,
gc.OLEN:
if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
case gc.OITAB:
n.Addable = n.Left.Addable
}
// if both are addressable, move
if n.Addable != 0 && res.Addable != 0 {
if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] || gc.Iscomplex[res.Type.Etype] {
gmove(n, res)
} else {
var n1 gc.Node
regalloc(&n1, n.Type, nil)
gmove(n, &n1)
cgen(&n1, res)
//.........這裏部分代碼省略.........
示例11: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
l := n.Left
var t1 gc.Node
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
gc.Regalloc(&t1, lo1.Type, nil)
var al gc.Node
gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
gc.Regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gmove(ncon(0), &t1)
p1 := gins(arm.ASUB, &al, &t1)
p1.Scond |= arm.C_SBIT
gins(arm.AMOVW, &t1, &lo2)
gmove(ncon(0), &t1)
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&al)
gc.Regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
gc.Regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
var n1 gc.Node
gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &lo2)
gins(arm.AMOVW, &hi1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&n1)
splitclean()
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR,
gc.OLROT:
break
}
// setup for binary operators
r := n.Right
if r != nil && r.Addable == 0 {
var t2 gc.Node
//.........這裏部分代碼省略.........
示例12: gins
/*
* generate one instruction:
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// if(f != N && f->op == OINDEX) {
// gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
// gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// gc.Regfree(&nod);
// }
// if(t != N && t->op == OINDEX) {
// gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
// gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// gc.Regfree(&nod);
// }
if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) {
// Turn MOVL $xxx into LEAL xxx.
// These should be equivalent but most of the backend
// only expects to see LEAL, because that's what we had
// historically generated. Various hidden assumptions are baked in by now.
if as == x86.AMOVL {
as = x86.ALEAL
} else {
as = x86.ALEAQ
}
f = f.Left
}
switch as {
case x86.AMOVB,
x86.AMOVW,
x86.AMOVL,
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
if f != nil && t != nil && samaddr(f, t) {
return nil
}
case x86.ALEAQ:
if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAQ nil %v", f.Type)
}
}
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
w := int32(0)
switch as {
case x86.AMOVB:
w = 1
case x86.AMOVW:
w = 2
case x86.AMOVL:
w = 4
case x86.AMOVQ:
w = 8
}
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
if p.To.Type == obj.TYPE_ADDR && w > 0 {
gc.Fatal("bad use of addr: %v", p)
}
return p
}
示例13: gins
/*
* generate one instruction:
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
gc.Fatal("gins MOVF reg, reg")
}
if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
gc.Fatal("gins CVTSD2SS const")
}
if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Reg == x86.REG_F0 {
gc.Fatal("gins MOVSD into F0")
}
if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
// Turn MOVL $xxx(FP/SP) into LEAL xxx.
// These should be equivalent but most of the backend
// only expects to see LEAL, because that's what we had
// historically generated. Various hidden assumptions are baked in by now.
as = x86.ALEAL
f = f.Left
}
switch as {
case x86.AMOVB,
x86.AMOVW,
x86.AMOVL:
if f != nil && t != nil && samaddr(f, t) {
return nil
}
case x86.ALEAL:
if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
}
}
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
w := 0
switch as {
case x86.AMOVB:
w = 1
case x86.AMOVW:
w = 2
case x86.AMOVL:
w = 4
}
if true && w != 0 && f != nil && (p.From.Width > int64(w) || p.To.Width > int64(w)) {
gc.Dump("bad width from:", f)
gc.Dump("bad width to:", t)
gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
if p.To.Type == obj.TYPE_ADDR && w > 0 {
gc.Fatal("bad use of addr: %v", p)
}
return p
}
示例14: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
gc.Cgen(n.Left, res)
var hi1 gc.Node
var lo1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANEGL, nil, &lo1)
gins(x86.AADCL, ncon(0), &hi1)
gins(x86.ANEGL, nil, &hi1)
splitclean()
return
case gc.OCOM:
gc.Cgen(n.Left, res)
var lo1 gc.Node
var hi1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANOTL, nil, &lo1)
gins(x86.ANOTL, nil, &hi1)
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLROT,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR:
break
}
l := n.Left
r := n.Right
if !l.Addable {
var t1 gc.Node
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
if r != nil && !r.Addable {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
gc.Cgen(r, &t2)
r = &t2
}
var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)
// Setup for binary operation.
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
var lo2 gc.Node
var hi2 gc.Node
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
// Do op. Leave result in DX:AX.
switch n.Op {
// TODO: Constants
case gc.OADD:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
gins(x86.AADDL, &lo2, &ax)
gins(x86.AADCL, &hi2, &dx)
// TODO: Constants.
case gc.OSUB:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
//.........這裏部分代碼省略.........
示例15: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........