本文整理匯總了Golang中cmd/compile/internal/gc.Tempname函數的典型用法代碼示例。如果您正苦於以下問題:Golang Tempname函數的具體用法?Golang Tempname怎麽用?Golang Tempname使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Tempname函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: igenindex
/*
* generate an addressable node in res, containing the value of n.
* n is an array index, and might be any size; res width is <= 32-bit.
* returns Prog* to patch to panic call.
*/
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
if n.Addable && (gc.Simtype[n.Etype] == gc.TUINT32 || gc.Simtype[n.Etype] == gc.TINT32) {
// nothing to do.
*res = *n
} else {
gc.Tempname(res, gc.Types[gc.TUINT32])
gc.Cgen(n, res)
}
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(x86.ACMPL, &hi, &zero)
splitclean()
return gc.Gbranch(x86.AJNE, nil, +1)
}
示例2: cgen_bmul
/*
* generate byte multiply:
* res = nl * nr
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
// copy from byte to full registers
t := gc.Types[gc.TUINT32]
if gc.Issigned[nl.Type.Etype] {
t = gc.Types[gc.TINT32]
}
// largest ullman on left.
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
var nt gc.Node
gc.Tempname(&nt, nl.Type)
gc.Cgen(nl, &nt)
var n1 gc.Node
gc.Regalloc(&n1, t, res)
gc.Cgen(nr, &n1)
var n2 gc.Node
gc.Regalloc(&n2, t, nil)
gmove(&nt, &n2)
a := optoas(op, t)
gins(a, &n2, &n1)
gc.Regfree(&n2)
gmove(&n1, res)
gc.Regfree(&n1)
return true
}
示例3: cgenindex
/*
* generate array index into res.
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
gins(arm.ACMP, &n1, &n2)
gc.Regfree(&n2)
gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
示例4: savex
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
r := gc.GetReg(dr)
gc.Nodreg(x, gc.Types[gc.TINT32], dr)
// save current ax and dx if they are live
// and not the destination
*oldx = gc.Node{}
if r > 0 && !gc.Samereg(x, res) {
gc.Tempname(oldx, gc.Types[gc.TINT32])
gmove(x, oldx)
}
gc.Regalloc(x, t, x)
}
示例5: cgen_float
/*
* generate floating-point operation.
*/
func cgen_float(n *gc.Node, res *gc.Node) {
nl := n.Left
switch n.Op {
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OLE,
gc.OGE:
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Pc
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
gc.Cgen(nl, res)
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
gc.Cgen(nl, res)
return
}
var n2 gc.Node
gc.Tempname(&n2, n.Type)
var n1 gc.Node
gc.Mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
gc.Mfree(&n1)
return
}
if gc.Thearch.Use387 {
cgen_float387(n, res)
} else {
cgen_floatsse(n, res)
}
}
示例6: cgen_hmul
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var ax gc.Node
var dx gc.Node
t := nl.Type
a := optoas(gc.OHMUL, t)
// gen nl in n1.
gc.Tempname(&n1, t)
gc.Cgen(nl, &n1)
// gen nr in n2.
gc.Regalloc(&n2, t, res)
gc.Cgen(nr, &n2)
// multiply.
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n2, &ax)
gins(a, &n1, nil)
gc.Regfree(&n2)
if t.Width == 1 {
// byte multiply behaves differently.
gc.Nodreg(&ax, t, x86.REG_AH)
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&ax, &dx)
}
gc.Nodreg(&dx, t, x86.REG_DX)
gmove(&dx, res)
}
示例7: cgen_hmul
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
t := nl.Type
a := optoas(gc.OHMUL, t)
// gen nl in n1.
gc.Tempname(&n1, t)
gc.Cgen(nl, &n1)
// gen nr in n2.
gc.Regalloc(&n2, t, res)
gc.Cgen(nr, &n2)
var ax, oldax, dx, olddx gc.Node
savex(x86.REG_AX, &ax, &oldax, res, gc.Types[gc.TUINT32])
savex(x86.REG_DX, &dx, &olddx, res, gc.Types[gc.TUINT32])
gmove(&n2, &ax)
gins(a, &n1, nil)
gc.Regfree(&n2)
if t.Width == 1 {
// byte multiply behaves differently.
var byteAH, byteDX gc.Node
gc.Nodreg(&byteAH, t, x86.REG_AH)
gc.Nodreg(&byteDX, t, x86.REG_DX)
gmove(&byteAH, &byteDX)
}
gmove(&dx, res)
restx(&ax, &oldax)
restx(&dx, &olddx)
}
示例8: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例9: igenindex
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
gc.Tempname(res, n.Type)
return cgenindex(n, res, bounded)
}
示例10: floatmove_sse
//.........這裏部分代碼省略.........
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8:
cvt = gc.Types[gc.TINT32]
goto hard
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
cvt = gc.Types[gc.TINT64]
goto hardmem
case gc.TFLOAT32<<16 | gc.TINT32:
a = x86.ACVTTSS2SL
goto rdst
case gc.TFLOAT64<<16 | gc.TINT32:
a = x86.ACVTTSD2SL
goto rdst
// convert via int32 memory
/*
* integer to float
*/
case gc.TINT8<<16 | gc.TFLOAT32,
gc.TINT8<<16 | gc.TFLOAT64,
gc.TINT16<<16 | gc.TFLOAT32,
gc.TINT16<<16 | gc.TFLOAT64,
gc.TUINT16<<16 | gc.TFLOAT32,
gc.TUINT16<<16 | gc.TFLOAT64,
gc.TUINT8<<16 | gc.TFLOAT32,
gc.TUINT8<<16 | gc.TFLOAT64:
cvt = gc.Types[gc.TINT32]
goto hard
// convert via int64 memory
case gc.TUINT32<<16 | gc.TFLOAT32,
gc.TUINT32<<16 | gc.TFLOAT64:
cvt = gc.Types[gc.TINT64]
goto hardmem
case gc.TINT32<<16 | gc.TFLOAT32:
a = x86.ACVTSL2SS
goto rdst
case gc.TINT32<<16 | gc.TFLOAT64:
a = x86.ACVTSL2SD
goto rdst
/*
* float to float
*/
case gc.TFLOAT32<<16 | gc.TFLOAT32:
a = x86.AMOVSS
case gc.TFLOAT64<<16 | gc.TFLOAT64:
a = x86.AMOVSD
case gc.TFLOAT32<<16 | gc.TFLOAT64:
a = x86.ACVTSS2SD
goto rdst
case gc.TFLOAT64<<16 | gc.TFLOAT32:
a = x86.ACVTSD2SS
goto rdst
}
gins(a, f, t)
return
// requires register intermediate
hard:
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
// requires memory intermediate
hardmem:
gc.Tempname(&r1, cvt)
gmove(f, &r1)
gmove(&r1, t)
return
// requires register destination
rdst:
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
}
示例11: memname
func memname(n *gc.Node, t *gc.Type) {
gc.Tempname(n, t)
n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
n.Orig.Sym = n.Sym
}
示例12: floatmove
//.........這裏部分代碼省略.........
return
/*
* integer to float
*/
case gc.TINT64<<16 | gc.TFLOAT32,
gc.TINT64<<16 | gc.TFLOAT64:
if t.Op == gc.OREGISTER {
goto hardmem
}
var f0 gc.Node
gc.Nodreg(&f0, t.Type, x86.REG_F0)
gins(x86.AFMOVV, f, &f0)
if tt == gc.TFLOAT32 {
gins(x86.AFMOVFP, &f0, t)
} else {
gins(x86.AFMOVDP, &f0, t)
}
return
// algorithm is:
// if small enough, use native int64 -> float64 conversion.
// otherwise, halve (rounding to odd?), convert, and double.
case gc.TUINT64<<16 | gc.TFLOAT32,
gc.TUINT64<<16 | gc.TFLOAT64:
var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX)
var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX)
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
var t1 gc.Node
gc.Tempname(&t1, f.Type)
var tlo gc.Node
var thi gc.Node
split64(&t1, &tlo, &thi)
gmove(f, &t1)
gins(x86.ACMPL, &thi, ncon(0))
p1 := gc.Gbranch(x86.AJLT, nil, 0)
// native
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
gins(x86.AFMOVV, &t1, &r1)
if tt == gc.TFLOAT32 {
gins(x86.AFMOVFP, &r1, t)
} else {
gins(x86.AFMOVDP, &r1, t)
}
p2 := gc.Gbranch(obj.AJMP, nil, 0)
// simulated
gc.Patch(p1, gc.Pc)
gmove(&tlo, &ax)
gmove(&thi, &dx)
p1 = gins(x86.ASHRL, ncon(1), &ax)
p1.From.Index = x86.REG_DX // double-width shift DX -> AX
p1.From.Scale = 0
gins(x86.AMOVL, ncon(0), &cx)
gins(x86.ASETCC, nil, &cx)
gins(x86.AORL, &cx, &ax)
gins(x86.ASHRL, ncon(1), &dx)
gmove(&dx, &thi)
示例13: floatmove_387
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
var a int
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
switch uint32(ft)<<16 | uint32(tt) {
default:
goto fatal
/*
* float to integer
*/
case gc.TFLOAT32<<16 | gc.TINT16,
gc.TFLOAT32<<16 | gc.TINT32,
gc.TFLOAT32<<16 | gc.TINT64,
gc.TFLOAT64<<16 | gc.TINT16,
gc.TFLOAT64<<16 | gc.TINT32,
gc.TFLOAT64<<16 | gc.TINT64:
if t.Op == gc.OREGISTER {
goto hardmem
}
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
if f.Op != gc.OREGISTER {
if ft == gc.TFLOAT32 {
gins(x86.AFMOVF, f, &r1)
} else {
gins(x86.AFMOVD, f, &r1)
}
}
// set round to zero mode during conversion
var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(x86.AFSTCW, nil, &t1)
gins(x86.AMOVW, ncon(0xf7f), &t2)
gins(x86.AFLDCW, &t2, nil)
if tt == gc.TINT16 {
gins(x86.AFMOVWP, &r1, t)
} else if tt == gc.TINT32 {
gins(x86.AFMOVLP, &r1, t)
} else {
gins(x86.AFMOVVP, &r1, t)
}
gins(x86.AFLDCW, &t1, nil)
return
// convert via int32.
case gc.TFLOAT32<<16 | gc.TINT8,
gc.TFLOAT32<<16 | gc.TUINT16,
gc.TFLOAT32<<16 | gc.TUINT8,
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8:
var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TINT32])
gmove(f, &t1)
switch tt {
default:
gc.Fatalf("gmove %v", t)
case gc.TINT8:
gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
gins(x86.ACMPL, &t1, ncon(0x7f))
p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
gmove(ncon(-0x80&(1<<32-1)), &t1)
gc.Patch(p3, gc.Pc)
gmove(&t1, t)
case gc.TUINT8:
gins(x86.ATESTL, ncon(0xffffff00), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
case gc.TUINT16:
gins(x86.ATESTL, ncon(0xffff0000), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
}
return
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
//.........這裏部分代碼省略.........
示例14: cgen_floatsse
func cgen_floatsse(n *gc.Node, res *gc.Node) {
var a obj.As
nl := n.Left
nr := n.Right
switch n.Op {
default:
gc.Dump("cgen_floatsse", n)
gc.Fatalf("cgen_floatsse %v", gc.Oconv(n.Op, 0))
return
case gc.OMINUS,
gc.OCOM:
nr = gc.Nodintconst(-1)
gc.Convlit(&nr, n.Type)
a = foptoas(gc.OMUL, nl.Type, 0)
goto sbop
// symmetric binary
case gc.OADD,
gc.OMUL:
a = foptoas(n.Op, nl.Type, 0)
goto sbop
// asymmetric binary
case gc.OSUB,
gc.OMOD,
gc.ODIV:
a = foptoas(n.Op, nl.Type, 0)
goto abop
}
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
nl, nr = nr, nl
}
abop: // asymmetric binary
if nl.Ullman >= nr.Ullman {
var nt gc.Node
gc.Tempname(&nt, nl.Type)
gc.Cgen(nl, &nt)
var n2 gc.Node
gc.Mgen(nr, &n2, nil)
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gmove(&nt, &n1)
gins(a, &n2, &n1)
gmove(&n1, res)
gc.Regfree(&n1)
gc.Mfree(&n2)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nr.Type, res)
gc.Cgen(nr, &n2)
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, nil)
gc.Cgen(nl, &n1)
gins(a, &n2, &n1)
gc.Regfree(&n2)
gmove(&n1, res)
gc.Regfree(&n1)
}
return
}
示例15: bgen_float
func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
nl := n.Left
nr := n.Right
op := n.Op
if !wantTrue {
// brcom is not valid on floats when NaN is involved.
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// No need to avoid re-genning ninit.
bgen_float(n, true, -likely, p2)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p2, gc.Pc)
return
}
if gc.Thearch.Use387 {
op = gc.Brrev(op) // because the args are stacked
if op == gc.OGE || op == gc.OGT {
// only < and <= work right with NaN; reverse if needed
nl, nr = nr, nl
op = gc.Brrev(op)
}
var ax, n2, tmp gc.Node
gc.Nodreg(&tmp, nr.Type, x86.REG_F0)
gc.Nodreg(&n2, nr.Type, x86.REG_F0+1)
gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
if gc.Simsimtype(nr.Type) == gc.TFLOAT64 {
if nl.Ullman > nr.Ullman {
gc.Cgen(nl, &tmp)
gc.Cgen(nr, &tmp)
gins(x86.AFXCHD, &tmp, &n2)
} else {
gc.Cgen(nr, &tmp)
gc.Cgen(nl, &tmp)
}
gins(x86.AFUCOMPP, &tmp, &n2)
} else {
// TODO(rsc): The moves back and forth to memory
// here are for truncating the value to 32 bits.
// This handles 32-bit comparison but presumably
// all the other ops have the same problem.
// We need to figure out what the right general
// solution is, besides telling people to use float64.
var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
var t2 gc.Node
gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
gc.Cgen(nr, &t1)
gc.Cgen(nl, &t2)
gmove(&t2, &tmp)
gins(x86.AFCOMFP, &t1, &tmp)
}
gins(x86.AFSTSW, nil, &ax)
gins(x86.ASAHF, nil, nil)
} else {
// Not 387
if !nl.Addable {
nl = gc.CgenTemp(nl)
}
if !nr.Addable {
nr = gc.CgenTemp(nr)
}
var n2 gc.Node
gc.Regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
gc.Regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
if op == gc.OGE || op == gc.OGT {
// only < and <= work right with NopN; reverse if needed
nl, nr = nr, nl
op = gc.Brrev(op)
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
gc.Regfree(nl)
}
gc.Regfree(nr)
}
switch op {
case gc.OEQ:
// neither NE nor P
p1 := gc.Gbranch(x86.AJNE, nil, -likely)
p2 := gc.Gbranch(x86.AJPS, nil, -likely)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p1, gc.Pc)
//.........這裏部分代碼省略.........