本文整理匯總了Golang中cmd/compile/internal/gc.Node.Int方法的典型用法代碼示例。如果您正苦於以下問題:Golang Node.Int方法的具體用法?Golang Node.Int怎麽用?Golang Node.Int使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cmd/compile/internal/gc.Node
的用法示例。
在下文中一共展示了Node.Int方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: ginscmp
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
gins(arm.ACMP, &r1, n2)
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gins(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
示例2: ginscmp
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gcmp(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
示例3: split64
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatalf("split64 %v", n.Type)
}
if nsclean >= len(sclean) {
gc.Fatalf("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
gc.Cgen(n.Name.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
n.Convconst(&n1, n.Type)
i := n1.Int()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
示例4: intLiteral
func intLiteral(n *gc.Node) (x int64, ok bool) {
switch {
case n == nil:
return
case gc.Isconst(n, gc.CTINT):
return n.Int(), true
case gc.Isconst(n, gc.CTBOOL):
return int64(obj.Bool2int(n.Bool())), true
}
return
}
示例5: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
var n1 gc.Node
gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
gc.Patch(p1, gc.Pc)
}
gins(a, &n1, &n2)
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
示例6: dodiv
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
// Also need to explicitly trap on division on zero,
// the hardware will silently generate undefined result.
// DIVW will leave unpredicable result in higher 32-bit,
// so always use DIVD/DIVDU.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
}
}
if t.Width < 8 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT64]
} else {
t = gc.Types[gc.TUINT64]
}
check = 0
}
a := optoas(gc.ODIV, t)
var tl gc.Node
gc.Regalloc(&tl, t0, nil)
var tr gc.Node
gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &tl)
gc.Cgen(nr, &tr)
} else {
gc.Cgen(nr, &tr)
gc.Cgen(nl, &tl)
}
if t != t0 {
// Convert
tl2 := tl
tr2 := tr
tl.Type = t
tr.Type = t
gmove(&tl2, &tl)
gmove(&tr2, &tr)
}
// Handle divide-by-zero panic.
p1 := gins(optoas(gc.OCMP, t), &tr, nil)
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGZERO
p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
if check != 0 {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &tl)
gmove(&tl, res)
} else {
// a % (-1) is 0.
var nz gc.Node
gc.Nodconst(&nz, t, 0)
gmove(&nz, res)
}
p2 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
}
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
gc.Regfree(&tr)
//.........這裏部分代碼省略.........
示例7: dodiv
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
}
a := optoas(op, t)
var n3 gc.Node
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
}
if t != t0 {
// Convert
ax1 := ax
n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
gmove(&ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
gmove(&n4, res)
}
//.........這裏部分代碼省略.........
示例8: cgen64
//.........這裏部分代碼省略.........
gins(x86.AORL, &ex, &fx)
p1 := gc.Gbranch(x86.AJNE, nil, 0)
gins(x86.AMULL, &gx, nil) // implicit &ax
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// full 64x64 -> 64, from 32x32 -> 64.
gins(x86.AIMULL, &gx, &dx)
gins(x86.AMOVL, &ax, &fx)
gins(x86.AIMULL, &ex, &fx)
gins(x86.AADDL, &dx, &fx)
gins(x86.AMOVL, &gx, &dx)
gins(x86.AMULL, &dx, nil) // implicit &ax
gins(x86.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc)
gc.Regfree(&ex)
gc.Regfree(&fx)
gc.Regfree(&gx)
// We only rotate by a constant c in [0,64).
// if c >= 32:
// lo, hi = hi, lo
// c -= 32
// if c == 0:
// no-op
// else:
// t = hi
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
v := uint64(r.Int())
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gins(x86.AMOVL, &lo1, &dx)
gins(x86.AMOVL, &hi1, &ax)
} else {
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
}
if v == 0 {
} else // done
{
gins(x86.AMOVL, &dx, &cx)
p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = x86.REG_AX // double-width shift
p1.From.Scale = 0
p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax)
p1.From.Index = x86.REG_CX // double-width shift
p1.From.Scale = 0
}
case gc.OLSH:
if r.Op == gc.OLITERAL {
v := uint64(r.Int())
if v >= 64 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
示例9: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n2 gc.Node
gc.Tempname(&n2, nl.Type)
gc.Cgen(nl, &n2)
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, ncon(uint32(w)-1), &n1)
gins(a, ncon(uint32(w)-1), &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
var oldcx gc.Node
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
if gc.GetReg(x86.REG_CX) > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
var n1 gc.Node
var nt gc.Node
if nr.Type.Width > 4 {
gc.Tempname(&nt, nr.Type)
n1 = nt
} else {
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
} else {
gc.Cgen(nr, &n1)
gc.Cgen(nl, &n2)
}
// test and fix up large shifts
if bounded {
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
splitclean()
}
} else {
var p1 *obj.Prog
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
splitclean()
gc.Patch(p2, gc.Pc)
} else {
gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
//.........這裏部分代碼省略.........
示例10: dodiv
/*
* generate division.
* caller must set:
* ax = allocated AX register
* dx = allocated DX register
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := false
if gc.Issigned[t.Etype] {
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = false
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = false
}
var t1 gc.Node
gc.Tempname(&t1, t)
var t2 gc.Node
gc.Tempname(&t2, t)
if t0 != t {
var t3 gc.Node
gc.Tempname(&t3, t0)
var t4 gc.Node
gc.Tempname(&t4, t0)
gc.Cgen(nl, &t3)
gc.Cgen(nr, &t4)
// Convert.
gmove(&t3, &t1)
gmove(&t4, &t2)
} else {
gc.Cgen(nl, &t1)
gc.Cgen(nr, &t2)
}
var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
gc.Regalloc(&n1, t, res)
} else {
gc.Regalloc(&n1, t, nil)
}
gmove(&t2, &n1)
gmove(&t1, ax)
var p2 *obj.Prog
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, ax)
gmove(ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
//.........這裏部分代碼省略.........
示例11: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
//.........這裏部分代碼省略.........
示例12: sudoaddable
/*
* generate code to compute address of n,
* a reference to a (perhaps nested) field inside
* an array or struct.
* return 0 on failure, 1 on success.
* on success, leaves usable address in a.
*
* caller is responsible for calling sudoclean
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
v := n.Int()
if v >= 32000 || v <= -32000 {
break
}
switch as {
default:
return false
case arm.AADD,
arm.ASUB,
arm.AAND,
arm.AORR,
arm.AEOR,
arm.AMOVB,
arm.AMOVBS,
arm.AMOVBU,
arm.AMOVH,
arm.AMOVHS,
arm.AMOVHU,
arm.AMOVW:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(a, n)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(a, &n1)
return true
}
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
//.........這裏部分代碼省略.........
示例13: gmove
/*
* generate move:
* t = f
* hard part is conversions.
*/
func gmove(f *gc.Node, t *gc.Node) {
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
}
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
return
}
// cannot have two memory operands
var a int
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
var con gc.Node
f.Convconst(&con, t.Type)
f = &con
ft = tt // so big switch will choose a simple mov
// some constants can't move directly to memory.
if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] {
goto hard
}
// 64-bit immediates are really 32-bit sign-extended
// unless moving into a register.
if gc.Isint[tt] {
if i := con.Int(); int64(int32(i)) != i {
goto hard
}
}
}
}
// value -> value copy, only one memory operand.
// figure out the instruction to use.
// break out of switch for one-instruction gins.
// goto rdst for "destination must be register".
// goto hard for "convert to cvt type first".
// otherwise handle and return.
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
/*
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8, // same size
gc.TINT8<<16 | gc.TUINT8,
gc.TUINT8<<16 | gc.TINT8,
gc.TUINT8<<16 | gc.TUINT8,
gc.TINT16<<16 | gc.TINT8,
// truncate
gc.TUINT16<<16 | gc.TINT8,
gc.TINT32<<16 | gc.TINT8,
gc.TUINT32<<16 | gc.TINT8,
gc.TINT64<<16 | gc.TINT8,
gc.TUINT64<<16 | gc.TINT8,
gc.TINT16<<16 | gc.TUINT8,
gc.TUINT16<<16 | gc.TUINT8,
gc.TINT32<<16 | gc.TUINT8,
gc.TUINT32<<16 | gc.TUINT8,
gc.TINT64<<16 | gc.TUINT8,
gc.TUINT64<<16 | gc.TUINT8:
a = x86.AMOVB
case gc.TINT16<<16 | gc.TINT16, // same size
gc.TINT16<<16 | gc.TUINT16,
gc.TUINT16<<16 | gc.TINT16,
gc.TUINT16<<16 | gc.TUINT16,
gc.TINT32<<16 | gc.TINT16,
// truncate
gc.TUINT32<<16 | gc.TINT16,
gc.TINT64<<16 | gc.TINT16,
gc.TUINT64<<16 | gc.TINT16,
gc.TINT32<<16 | gc.TUINT16,
gc.TUINT32<<16 | gc.TUINT16,
gc.TINT64<<16 | gc.TUINT16,
gc.TUINT64<<16 | gc.TUINT16:
a = x86.AMOVW
case gc.TINT32<<16 | gc.TINT32, // same size
gc.TINT32<<16 | gc.TUINT32,
gc.TUINT32<<16 | gc.TINT32,
//.........這裏部分代碼省略.........
示例14: gmove
/*
* generate move:
* t = f
* hard part is conversions.
*/
func gmove(f *gc.Node, t *gc.Node) {
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
}
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
cvt := (*gc.Type)(t.Type)
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
return
}
// cannot have two memory operands
var a int
if gc.Ismem(f) && gc.Ismem(t) {
if gmvc(f, t) {
return
}
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
var con gc.Node
f.Convconst(&con, t.Type)
f = &con
ft = tt // so big switch will choose a simple mov
// some constants can't move directly to memory.
if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] {
goto hard
}
// all immediates are 16-bit sign-extended
// unless moving into a register.
if gc.Isint[tt] {
if i := con.Int(); int64(int16(i)) != i {
goto hard
}
}
}
}
// a float-to-int or int-to-float conversion requires the source operand in a register
if gc.Ismem(f) && ((gc.Isfloat[ft] && gc.Isint[tt]) || (gc.Isint[ft] && gc.Isfloat[tt])) {
cvt = (*gc.Type)(f.Type)
goto hard
}
// a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register
if gc.Ismem(f) && gc.Isfloat[ft] && gc.Isfloat[tt] && (ft != tt) {
cvt = (*gc.Type)(f.Type)
goto hard
}
// float constants come from memory.
//if(isfloat[tt])
// goto hard;
// 64-bit immediates are also from memory.
//if(isint[tt])
// goto hard;
//// 64-bit immediates are really 32-bit sign-extended
//// unless moving into a register.
//if(isint[tt]) {
// if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
// goto hard;
// if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
// goto hard;
//}
// value -> value copy, only one memory operand.
// figure out the instruction to use.
// break out of switch for one-instruction gins.
// goto rdst for "destination must be register".
// goto hard for "convert to cvt type first".
// otherwise handle and return.
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
/*
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8, // same size
gc.TUINT8<<16 | gc.TINT8,
gc.TINT16<<16 | gc.TINT8,
// truncate
gc.TUINT16<<16 | gc.TINT8,
gc.TINT32<<16 | gc.TINT8,
//.........這裏部分代碼省略.........