本文整理匯總了Golang中cmd/internal/gc.Nodreg函數的典型用法代碼示例。如果您正苦於以下問題:Golang Nodreg函數的具體用法?Golang Nodreg怎麽用?Golang Nodreg使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Nodreg函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cgen_hmul
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
}
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.Cgenr(nr, &n2, nil)
var ax gc.Node
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n1, &ax)
gins(a, &n2, nil)
gc.Regfree(&n2)
gc.Regfree(&n1)
var dx gc.Node
if t.Width == 1 {
// byte multiply behaves differently.
gc.Nodreg(&ax, t, x86.REG_AH)
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&ax, &dx)
}
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&dx, res)
}
示例2: gconreg
/*
* generate
* as $c, reg
*/
func gconreg(as int, c int64, reg int) {
var nr gc.Node
switch as {
case x86.AADDL,
x86.AMOVL,
x86.ALEAL:
gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
default:
gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
}
ginscon(as, c, &nr)
}
示例3: gconreg
/*
* generate
* as $c, reg
*/
func gconreg(as int, c int64, reg int) {
var n1 gc.Node
var n2 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
gins(as, &n1, &n2)
}
示例4: ginsnop
func ginsnop() {
// This is actually not the x86 NOP anymore,
// but at the point where it gets used, AX is dead
// so it's okay if we lose the high bits.
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, ®, ®)
}
示例5: cgen_bmul
/*
* generate byte multiply:
* res = nl * nr
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
}
// generate operands in "8-bit" registers.
var n1b gc.Node
gc.Regalloc(&n1b, nl.Type, res)
gc.Cgen(nl, &n1b)
var n2b gc.Node
gc.Regalloc(&n2b, nr.Type, nil)
gc.Cgen(nr, &n2b)
// perform full-width multiplication.
t := gc.Types[gc.TUINT64]
if gc.Issigned[nl.Type.Etype] {
t = gc.Types[gc.TINT64]
}
var n1 gc.Node
gc.Nodreg(&n1, t, int(n1b.Reg))
var n2 gc.Node
gc.Nodreg(&n2, t, int(n2b.Reg))
a := optoas(op, t)
gins(a, &n2, &n1)
// truncate.
gmove(&n1, res)
gc.Regfree(&n1b)
gc.Regfree(&n2b)
return true
}
示例6: cgen_float387
// floating-point. 387 (not SSE2)
func cgen_float387(n *gc.Node, res *gc.Node) {
var f0 gc.Node
var f1 gc.Node
nl := n.Left
nr := n.Right
gc.Nodreg(&f0, nl.Type, x86.REG_F0)
gc.Nodreg(&f1, n.Type, x86.REG_F0+1)
if nr != nil {
// binary
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &f0)
if nr.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
} else {
gc.Cgen(nr, &f0)
gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
}
} else {
gc.Cgen(nr, &f0)
if nl.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
} else {
gc.Cgen(nl, &f0)
gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
}
}
gmove(&f0, res)
return
}
// unary
gc.Cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
}
gmove(&f0, res)
return
}
示例7: cgen_hmul
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var ax gc.Node
var dx gc.Node
t := nl.Type
a := optoas(gc.OHMUL, t)
// gen nl in n1.
gc.Tempname(&n1, t)
gc.Cgen(nl, &n1)
// gen nr in n2.
gc.Regalloc(&n2, t, res)
gc.Cgen(nr, &n2)
// multiply.
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n2, &ax)
gins(a, &n1, nil)
gc.Regfree(&n2)
if t.Width == 1 {
// byte multiply behaves differently.
gc.Nodreg(&ax, t, x86.REG_AH)
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&ax, &dx)
}
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&dx, res)
}
示例8: savex
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
r := int(reg[dr])
gc.Nodreg(x, gc.Types[gc.TINT32], dr)
// save current ax and dx if they are live
// and not the destination
*oldx = gc.Node{}
if r > 0 && !gc.Samereg(x, res) {
gc.Tempname(oldx, gc.Types[gc.TINT32])
gmove(x, oldx)
}
gc.Regalloc(x, t, x)
}
示例9: savex
/*
* register dr is one of the special ones (AX, CX, DI, SI, etc.).
* we need to use it. if it is already allocated as a temporary
* (r > 1; can only happen if a routine like sgen passed a
* special as cgen's res and then cgen used regalloc to reuse
* it as its own temporary), then move it for now to another
* register. caller must call restx to move it back.
* the move is not necessary if dr == res, because res is
* known to be dead.
*/
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
r := int(reg[dr])
// save current ax and dx if they are live
// and not the destination
*oldx = gc.Node{}
gc.Nodreg(x, t, dr)
if r > 1 && !gc.Samereg(x, res) {
gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
oldx.Ostk = int32(r) // squirrel away old r value
reg[dr] = 1
}
}
示例10: ginscall
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert a ppc64 NOP that we will have the right line number.
// The ppc64 NOP is really or r0, r0, r0; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
gins(ppc64.AOR, ®, ®)
}
p := gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
gmove(f, ®)
reg.Op = gc.OINDREG
gmove(®, &r1)
reg.Op = gc.OREGISTER
ginsBL(®, &r1)
case 3: // normal call of c function pointer
ginsBL(nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
var reg2 gc.Node
gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4)
gmove(f, ®)
gmove(&con, ®2)
p := gins(ppc64.AMOVW, ®2, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 8
p = gins(ppc64.AMOVD, ®, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 16
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
p := gins(ppc64.ACMP, ®, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGZERO
p = gc.Gbranch(ppc64.ABEQ, nil, +1)
cgen_ret(nil)
//.........這裏部分代碼省略.........
示例11: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
rcx := int(reg[x86.REG_CX])
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
var oldcx gc.Node
if rcx > 0 && !gc.Samereg(&cx, res) {
gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
cx.Type = tcount
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
//.........這裏部分代碼省略.........
示例12: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
gc.Cgen(n.Left, res)
var hi1 gc.Node
var lo1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANEGL, nil, &lo1)
gins(x86.AADCL, ncon(0), &hi1)
gins(x86.ANEGL, nil, &hi1)
splitclean()
return
case gc.OCOM:
gc.Cgen(n.Left, res)
var lo1 gc.Node
var hi1 gc.Node
split64(res, &lo1, &hi1)
gins(x86.ANOTL, nil, &lo1)
gins(x86.ANOTL, nil, &hi1)
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLROT,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR:
break
}
l := n.Left
r := n.Right
if !l.Addable {
var t1 gc.Node
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
if r != nil && !r.Addable {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
gc.Cgen(r, &t2)
r = &t2
}
var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)
// Setup for binary operation.
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
var lo2 gc.Node
var hi2 gc.Node
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
// Do op. Leave result in DX:AX.
switch n.Op {
// TODO: Constants
case gc.OADD:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
gins(x86.AADDL, &lo2, &ax)
gins(x86.AADCL, &hi2, &dx)
// TODO: Constants.
case gc.OSUB:
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
//.........這裏部分代碼省略.........
示例13: gmove
//.........這裏部分代碼省略.........
gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
bignodes()
var r1 gc.Node
gc.Regalloc(&r1, gc.Types[ft], f)
gmove(f, &r1)
if tt == gc.TUINT64 {
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
gc.Regfree(&r2)
}
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
var r3 gc.Node
gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
p1 = gins(ppc64.AMOVD, nil, &r3)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = ppc64.REGSP
p1.From.Offset = -8
gc.Regfree(&r2)
gc.Regfree(&r1)
if tt == gc.TUINT64 {
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
gins(ppc64.AMOVD, &bigi, &r1)
gins(ppc64.AADD, &r1, &r3)
gc.Patch(p1, gc.Pc)
}
gmove(&r3, t)
gc.Regfree(&r3)
return
//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
//return;
// algorithm is:
// if small enough, use native int64 -> uint64 conversion.
// otherwise, halve (rounding to odd?), convert, and double.
/*
* integer to float
*/
case gc.TINT32<<16 | gc.TFLOAT32,
gc.TINT32<<16 | gc.TFLOAT64,
gc.TINT64<<16 | gc.TFLOAT32,
gc.TINT64<<16 | gc.TFLOAT64,
gc.TINT16<<16 | gc.TFLOAT32,
gc.TINT16<<16 | gc.TFLOAT64,
gc.TINT8<<16 | gc.TFLOAT32,
gc.TINT8<<16 | gc.TFLOAT64,
gc.TUINT16<<16 | gc.TFLOAT32,
gc.TUINT16<<16 | gc.TFLOAT64,
gc.TUINT8<<16 | gc.TFLOAT32,
gc.TUINT8<<16 | gc.TFLOAT64,
gc.TUINT32<<16 | gc.TFLOAT32,
gc.TUINT32<<16 | gc.TFLOAT64,
gc.TUINT64<<16 | gc.TFLOAT32,
示例14: stackcopy
func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if n.Addable == 0 {
gc.Agen(n, &tsrc)
}
if res.Addable == 0 {
gc.Agen(res, &tdst)
}
if n.Addable != 0 {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable != 0 {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && int64(odst) < int64(osrc)+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例15: gmove
func gmove(f *gc.Node, t *gc.Node) {
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
return
}
if gc.Isfloat[ft] || gc.Isfloat[tt] {
floatmove(f, t)
return
}
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
var r1 gc.Node
var a int
if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
var con gc.Node
gc.Convconst(&con, t.Type, &f.Val)
f = &con
ft = gc.Simsimtype(con.Type)
}
// value -> value copy, only one memory operand.
// figure out the instruction to use.
// break out of switch for one-instruction gins.
// goto rdst for "destination must be register".
// goto hard for "convert to cvt type first".
// otherwise handle and return.
switch uint32(ft)<<16 | uint32(tt) {
default:
// should not happen
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
return
/*
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8, // same size
gc.TINT8<<16 | gc.TUINT8,
gc.TUINT8<<16 | gc.TINT8,
gc.TUINT8<<16 | gc.TUINT8:
a = x86.AMOVB
case gc.TINT16<<16 | gc.TINT8, // truncate
gc.TUINT16<<16 | gc.TINT8,
gc.TINT32<<16 | gc.TINT8,
gc.TUINT32<<16 | gc.TINT8,
gc.TINT16<<16 | gc.TUINT8,
gc.TUINT16<<16 | gc.TUINT8,
gc.TINT32<<16 | gc.TUINT8,
gc.TUINT32<<16 | gc.TUINT8:
a = x86.AMOVB
goto rsrc
case gc.TINT64<<16 | gc.TINT8, // truncate low word
gc.TUINT64<<16 | gc.TINT8,
gc.TINT64<<16 | gc.TUINT8,
gc.TUINT64<<16 | gc.TUINT8:
var flo gc.Node
var fhi gc.Node
split64(f, &flo, &fhi)
var r1 gc.Node
gc.Nodreg(&r1, t.Type, x86.REG_AX)
gmove(&flo, &r1)
gins(x86.AMOVB, &r1, t)
splitclean()
return
case gc.TINT16<<16 | gc.TINT16, // same size
gc.TINT16<<16 | gc.TUINT16,
gc.TUINT16<<16 | gc.TINT16,
gc.TUINT16<<16 | gc.TUINT16:
a = x86.AMOVW
case gc.TINT32<<16 | gc.TINT16, // truncate
gc.TUINT32<<16 | gc.TINT16,
gc.TINT32<<16 | gc.TUINT16,
gc.TUINT32<<16 | gc.TUINT16:
a = x86.AMOVW
goto rsrc
case gc.TINT64<<16 | gc.TINT16, // truncate low word
gc.TUINT64<<16 | gc.TINT16,
//.........這裏部分代碼省略.........