本文整理匯總了Golang中cmd/compile/avail/gc.Nodreg函數的典型用法代碼示例。如果您正苦於以下問題:Golang Nodreg函數的具體用法?Golang Nodreg怎麽用?Golang Nodreg使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Nodreg函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: dodiv
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
t0 := t
if t.Width < 8 {
if t.IsSigned() {
t = gc.Types[gc.TINT64]
} else {
t = gc.Types[gc.TUINT64]
}
}
a := optoas(gc.ODIV, t)
var tl gc.Node
gc.Regalloc(&tl, t0, nil)
var tr gc.Node
gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &tl)
gc.Cgen(nr, &tr)
} else {
gc.Cgen(nr, &tr)
gc.Cgen(nl, &tl)
}
if t != t0 {
// Convert
tl2 := tl
tr2 := tr
tl.Type = t
tr.Type = t
gmove(&tl2, &tl)
gmove(&tr2, &tr)
}
// Handle divide-by-zero panic.
p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
gins3(a, &tr, &tl, nil)
gc.Regfree(&tr)
if op == gc.ODIV {
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &tl)
} else { // remainder in REG_HI
var hi gc.Node
gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
gins(mips.AMOVV, &hi, &tl)
}
gmove(&tl, res)
gc.Regfree(&tl)
}
示例2: cgen_hmul
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.Cgenr(nr, &n2, nil)
var ax, oldax, dx, olddx gc.Node
savex(x86.REG_AX, &ax, &oldax, res, gc.Types[gc.TUINT64])
savex(x86.REG_DX, &dx, &olddx, res, gc.Types[gc.TUINT64])
gmove(&n1, &ax)
gins(a, &n2, nil)
gc.Regfree(&n2)
gc.Regfree(&n1)
if t.Width == 1 {
// byte multiply behaves differently.
var byteAH, byteDX gc.Node
gc.Nodreg(&byteAH, t, x86.REG_AH)
gc.Nodreg(&byteDX, t, x86.REG_DX)
gmove(&byteAH, &byteDX)
}
gmove(&dx, res)
restx(&ax, &oldax)
restx(&dx, &olddx)
}
示例3: gconreg
/*
* generate
* as $c, reg
*/
func gconreg(as obj.As, c int64, reg int) {
var nr gc.Node
switch as {
case x86.AADDL,
x86.AMOVL,
x86.ALEAL:
gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
default:
gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
}
ginscon(as, c, &nr)
}
示例4: ginsnop
func ginsnop() {
// This is actually not the x86 NOP anymore,
// but at the point where it gets used, AX is dead
// so it's okay if we lose the high bits.
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, ®, ®)
}
示例5: cgen_float387
// floating-point. 387 (not SSE2)
func cgen_float387(n *gc.Node, res *gc.Node) {
var f0 gc.Node
var f1 gc.Node
nl := n.Left
nr := n.Right
gc.Nodreg(&f0, nl.Type, x86.REG_F0)
gc.Nodreg(&f1, n.Type, x86.REG_F0+1)
if nr != nil {
// binary
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &f0)
if nr.Addable {
gins(foptoas(n.Op, n.Type, 0), nr, &f0)
} else {
gc.Cgen(nr, &f0)
gins(foptoas(n.Op, n.Type, Fpop), &f0, &f1)
}
} else {
gc.Cgen(nr, &f0)
if nl.Addable {
gins(foptoas(n.Op, n.Type, Frev), nl, &f0)
} else {
gc.Cgen(nl, &f0)
gins(foptoas(n.Op, n.Type, Frev|Fpop), &f0, &f1)
}
}
gmove(&f0, res)
return
}
// unary
gc.Cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(foptoas(n.Op, n.Type, 0), nil, nil)
}
gmove(&f0, res)
return
}
示例6: cgen_bmul
/*
* generate byte multiply:
* res = nl * nr
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
// largest ullman on left.
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
// generate operands in "8-bit" registers.
var n1b gc.Node
gc.Regalloc(&n1b, nl.Type, res)
gc.Cgen(nl, &n1b)
var n2b gc.Node
gc.Regalloc(&n2b, nr.Type, nil)
gc.Cgen(nr, &n2b)
// perform full-width multiplication.
t := gc.Types[gc.TUINT64]
if nl.Type.IsSigned() {
t = gc.Types[gc.TINT64]
}
var n1 gc.Node
gc.Nodreg(&n1, t, int(n1b.Reg))
var n2 gc.Node
gc.Nodreg(&n2, t, int(n2b.Reg))
a := optoas(op, t)
gins(a, &n2, &n1)
// truncate.
gmove(&n1, res)
gc.Regfree(&n1b)
gc.Regfree(&n2b)
return true
}
示例7: savex
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
r := gc.GetReg(dr)
gc.Nodreg(x, gc.Types[gc.TINT32], dr)
// save current ax and dx if they are live
// and not the destination
*oldx = gc.Node{}
if r > 0 && !gc.Samereg(x, res) {
gc.Tempname(oldx, gc.Types[gc.TINT32])
gmove(x, oldx)
}
gc.Regalloc(x, t, x)
}
示例8: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int64())
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
var n1 gc.Node
gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
var rtmp gc.Node
gc.Nodreg(&rtmp, tcount, mips.REGTMP)
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins3(mips.ASGTU, &n3, &n1, &rtmp)
p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
if op == gc.ORSH && nl.Type.IsSigned() {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
gc.Patch(p1, gc.Pc)
}
gins(a, &n1, &n2)
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
示例9: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例10: ginsnop
func ginsnop() {
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], s390x.REG_R0)
gins(s390x.AOR, ®, ®)
}
示例11: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // dwords
var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO)
var dst gc.Node
// REGRT1 is reserved on arm64, see arm64/gsubr.go.
gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1)
gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
p := gins(arm64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(arm64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
p = gins(arm64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
p.Scond = arm64.C_XPRE
pl := p
p = gcmp(arm64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
gc.Regfree(&end)
// The loop leaves R16 on the last zeroed dword
boff = 8
} else if q >= 4 && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
p := gins(arm64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm64x.s
p.To.Offset = int64(4 * (128 - q))
// duffzero leaves R16 on the last zeroed dword
boff = 8
} else {
var p *obj.Prog
for t := uint64(0); t < q; t++ {
p = gins(arm64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(8 * t)
}
boff = 8 * q
}
var p *obj.Prog
for t := uint64(0); t < c; t++ {
p = gins(arm64.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(t + boff)
}
}
示例12: gmove
//.........這裏部分代碼省略.........
gc.TFLOAT32<<16 | gc.TINT16,
gc.TFLOAT32<<16 | gc.TINT8,
gc.TFLOAT32<<16 | gc.TUINT16,
gc.TFLOAT32<<16 | gc.TUINT8,
gc.TFLOAT64<<16 | gc.TINT16,
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8,
gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32,
gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
bignodes()
gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil)
gmove(f, &r1)
if tt == gc.TUINT64 {
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins3(mips.ACMPGED, &r1, &r2, nil)
p1 := gc.Gbranch(mips.ABFPF, nil, 0)
gins(mips.ASUBD, &r2, &r1)
gc.Patch(p1, gc.Pc)
gc.Regfree(&r2)
}
gc.Regalloc(&r2, gc.Types[gc.TINT64], t)
gins(mips.ATRUNCDV, &r1, &r1)
gins(mips.AMOVV, &r1, &r2)
gc.Regfree(&r1)
if tt == gc.TUINT64 {
p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP)
gmove(&bigi, &r1)
gins(mips.AADDVU, &r1, &r2)
gc.Patch(p1, gc.Pc)
}
gmove(&r2, t)
gc.Regfree(&r2)
return
//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
//return;
// algorithm is:
// if small enough, use native int64 -> float64 conversion.
// otherwise, halve (x -> (x>>1)|(x&1)), convert, and double.
/*
* integer to float
*/
case gc.TINT32<<16 | gc.TFLOAT32,
gc.TINT32<<16 | gc.TFLOAT64,
gc.TINT64<<16 | gc.TFLOAT32,
gc.TINT64<<16 | gc.TFLOAT64,
gc.TINT16<<16 | gc.TFLOAT32,
gc.TINT16<<16 | gc.TFLOAT64,
gc.TINT8<<16 | gc.TFLOAT32,
gc.TINT8<<16 | gc.TFLOAT64,
gc.TUINT16<<16 | gc.TFLOAT32,
gc.TUINT16<<16 | gc.TFLOAT64,
gc.TUINT8<<16 | gc.TFLOAT32,
gc.TUINT8<<16 | gc.TFLOAT64,
gc.TUINT32<<16 | gc.TFLOAT32,
gc.TUINT32<<16 | gc.TFLOAT64,
gc.TUINT64<<16 | gc.TFLOAT32,
示例13: bgen_float
func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
nl := n.Left
nr := n.Right
op := n.Op
if !wantTrue {
// brcom is not valid on floats when NaN is involved.
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// No need to avoid re-genning ninit.
bgen_float(n, true, -likely, p2)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p2, gc.Pc)
return
}
if gc.Thearch.Use387 {
op = gc.Brrev(op) // because the args are stacked
if op == gc.OGE || op == gc.OGT {
// only < and <= work right with NaN; reverse if needed
nl, nr = nr, nl
op = gc.Brrev(op)
}
var ax, n2, tmp gc.Node
gc.Nodreg(&tmp, nr.Type, x86.REG_F0)
gc.Nodreg(&n2, nr.Type, x86.REG_F0+1)
gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
if gc.Simsimtype(nr.Type) == gc.TFLOAT64 {
if nl.Ullman > nr.Ullman {
gc.Cgen(nl, &tmp)
gc.Cgen(nr, &tmp)
gins(x86.AFXCHD, &tmp, &n2)
} else {
gc.Cgen(nr, &tmp)
gc.Cgen(nl, &tmp)
}
gins(x86.AFUCOMPP, &tmp, &n2)
} else {
// TODO(rsc): The moves back and forth to memory
// here are for truncating the value to 32 bits.
// This handles 32-bit comparison but presumably
// all the other ops have the same problem.
// We need to figure out what the right general
// solution is, besides telling people to use float64.
var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
var t2 gc.Node
gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
gc.Cgen(nr, &t1)
gc.Cgen(nl, &t2)
gmove(&t2, &tmp)
gins(x86.AFCOMFP, &t1, &tmp)
}
gins(x86.AFSTSW, nil, &ax)
gins(x86.ASAHF, nil, nil)
} else {
// Not 387
if !nl.Addable {
nl = gc.CgenTemp(nl)
}
if !nr.Addable {
nr = gc.CgenTemp(nr)
}
var n2 gc.Node
gc.Regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
gc.Regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
if op == gc.OGE || op == gc.OGT {
// only < and <= work right with NopN; reverse if needed
nl, nr = nr, nl
op = gc.Brrev(op)
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
gc.Regfree(nl)
}
gc.Regfree(nr)
}
switch op {
case gc.OEQ:
// neither NE nor P
p1 := gc.Gbranch(x86.AJNE, nil, -likely)
p2 := gc.Gbranch(x86.AJPS, nil, -likely)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p1, gc.Pc)
//.........這裏部分代碼省略.........
示例14: ginscmp
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if !t.IsFloat() && (op == gc.OLT || op == gc.OGE) {
// swap nodes to fit SGT instruction
n1, n2 = n2, n1
}
if t.IsFloat() && (op == gc.OLT || op == gc.OLE) {
// swap nodes to fit CMPGT, CMPGE instructions and reverse relation
n1, n2 = n2, n1
if op == gc.OLT {
op = gc.OGT
} else {
op = gc.OGE
}
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
var p *obj.Prog
var ntmp gc.Node
gc.Nodreg(&ntmp, gc.Types[gc.TINT], mips.REGTMP)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
gc.TINT32,
gc.TINT64:
if op == gc.OEQ || op == gc.ONE {
p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
} else {
gins3(mips.ASGT, &r1, &r2, &ntmp)
p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
}
case gc.TBOOL,
gc.TUINT8,
gc.TUINT16,
gc.TUINT32,
gc.TUINT64,
gc.TPTR32,
gc.TPTR64:
if op == gc.OEQ || op == gc.ONE {
p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
} else {
gins3(mips.ASGTU, &r1, &r2, &ntmp)
p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
}
case gc.TFLOAT32:
switch op {
default:
gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t)
case gc.OEQ,
gc.ONE:
gins3(mips.ACMPEQF, &r1, &r2, nil)
case gc.OGE:
gins3(mips.ACMPGEF, &r1, &r2, nil)
case gc.OGT:
gins3(mips.ACMPGTF, &r1, &r2, nil)
}
p = gc.Gbranch(optoas(op, t), nil, likely)
case gc.TFLOAT64:
switch op {
default:
gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t)
case gc.OEQ,
gc.ONE:
gins3(mips.ACMPEQD, &r1, &r2, nil)
case gc.OGE:
gins3(mips.ACMPGED, &r1, &r2, nil)
case gc.OGT:
gins3(mips.ACMPGTD, &r1, &r2, nil)
}
p = gc.Gbranch(optoas(op, t), nil, likely)
}
gc.Regfree(&g2)
gc.Regfree(&r2)
gc.Regfree(&g1)
gc.Regfree(&r1)
return p
}
示例15: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatalf("cgen64 %v of %v", n.Op, res.Op)
}
switch n.Op {
default:
gc.Fatalf("cgen64 %v", n.Op)
case gc.OMINUS:
gc.Cgen(n.Left, res)
var hi1 gc.Node
var lo1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANEGL, nil, &lo1)
gins(x86.AADCL, ncon(0), &hi1)
gins(x86.ANEGL, nil, &hi1)
splitclean()
return
case gc.OCOM:
gc.Cgen(n.Left, res)
var lo1 gc.Node
var hi1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANOTL, nil, &lo1)
gins(x86.ANOTL, nil, &hi1)
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLROT,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR:
break
}
l := n.Left
r := n.Right
if !l.Addable {
var t1 gc.Node
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
if r != nil && !r.Addable {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
gc.Cgen(r, &t2)
r = &t2
}
var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)
// Setup for binary operation.
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
var lo2 gc.Node
var hi2 gc.Node
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
// Do op. Leave result in DX:AX.
switch n.Op {
// TODO: Constants
case gc.OADD:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
gins(x86.AADDL, &lo2, &ax)
gins(x86.AADCL, &hi2, &dx)
// TODO: Constants.
case gc.OSUB:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
//.........這裏部分代碼省略.........