本文整理匯總了Golang中cmd/internal/gc.Patch函數的典型用法代碼示例。如果您正苦於以下問題:Golang Patch函數的具體用法?Golang Patch怎麽用?Golang Patch使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Patch函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
p1 := p
p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例2: cgen_float
/*
* generate floating-point operation.
*/
func cgen_float(n *gc.Node, res *gc.Node) {
nl := n.Left
switch n.Op {
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OLE,
gc.OGE:
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Pc
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
gc.Cgen(nl, res)
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
gc.Cgen(nl, res)
return
}
var n2 gc.Node
gc.Tempname(&n2, n.Type)
var n1 gc.Node
gc.Mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
gc.Mfree(&n1)
return
}
if gc.Thearch.Use387 {
cgen_float387(n, res)
} else {
cgen_floatsse(n, res)
}
}
示例3: gencmp0
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
var n1 gc.Node
gc.Regalloc(&n1, t, nil)
gc.Cgen(n, &n1)
a := optoas(gc.OCMP, t)
if a != arm.ACMP {
var n2 gc.Node
gc.Nodconst(&n2, t, 0)
var n3 gc.Node
gc.Regalloc(&n3, t, nil)
gmove(&n2, &n3)
gins(a, &n1, &n3)
gc.Regfree(&n3)
} else {
gins(arm.ATST, &n1, nil)
}
a = optoas(o, t)
gc.Patch(gc.Gbranch(a, t, likely), to)
gc.Regfree(&n1)
}
示例4: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *r0 == 0 {
p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例5: sgen
//.........這裏部分代碼省略.........
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
agenr(n, &dst, res) // temporarily use dst
regalloc(&src, gc.Types[gc.Tptr], nil)
gins(ppc64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
agenr(res, &dst, res)
agenr(n, &src, nil)
}
var tmp gc.Node
regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(ppc64.AMOVD, &src, &nend)
}
p := gins(ppc64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(ppc64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(ppc64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(ppc64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(ppc64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p = gins(ppc64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
}
}
regfree(&dst)
regfree(&src)
regfree(&tmp)
}
示例6: cgen
//.........這裏部分代碼省略.........
regfree(&n2)
}
sudoclean()
return
}
}
// TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
// OGE, OLE, and ONE ourselves.
// if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
var a int
switch n.Op {
default:
gc.Dump("cgen", n)
gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
// these call bgen to get a bool value
case gc.OOROR,
gc.OANDAND,
gc.OEQ,
gc.ONE,
gc.OLT,
gc.OLE,
gc.OGE,
gc.OGT,
gc.ONOT:
p1 := gc.Gbranch(ppc64.ABR, nil, 0)
p2 := gc.Pc
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
cgen(nl, res)
return
// unary
case gc.OCOM:
a := optoas(gc.OXOR, nl.Type)
var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, -1)
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] {
nr = gc.Nodintconst(-1)
gc.Convlit(&nr, n.Type)
a = optoas(gc.OMUL, nl.Type)
goto sbop
}
a := optoas(int(n.Op), nl.Type)
示例7: gmove
//.........這裏部分代碼省略.........
goto hard
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
cvt = gc.Types[gc.TINT64]
goto hard
// algorithm is:
// if small enough, use native float64 -> int64 conversion.
// otherwise, subtract 2^63, convert, and add it back.
case gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
a := x86.ACVTTSS2SQ
if ft == gc.TFLOAT64 {
a = x86.ACVTTSD2SQ
}
bignodes()
var r1 gc.Node
gc.Regalloc(&r1, gc.Types[ft], nil)
var r2 gc.Node
gc.Regalloc(&r2, gc.Types[tt], t)
var r3 gc.Node
gc.Regalloc(&r3, gc.Types[ft], nil)
var r4 gc.Node
gc.Regalloc(&r4, gc.Types[tt], nil)
gins(optoas(gc.OAS, f.Type), f, &r1)
gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
gins(a, &r1, &r2)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gins(optoas(gc.OAS, f.Type), &bigf, &r3)
gins(optoas(gc.OSUB, f.Type), &r3, &r1)
gins(a, &r1, &r2)
gins(x86.AMOVQ, &bigi, &r4)
gins(x86.AXORQ, &r4, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
gc.Regfree(&r4)
gc.Regfree(&r3)
gc.Regfree(&r2)
gc.Regfree(&r1)
return
/*
* integer to float
*/
case gc.TINT32<<16 | gc.TFLOAT32:
a = x86.ACVTSL2SS
goto rdst
case gc.TINT32<<16 | gc.TFLOAT64:
a = x86.ACVTSL2SD
goto rdst
case gc.TINT64<<16 | gc.TFLOAT32:
a = x86.ACVTSQ2SS
goto rdst
case gc.TINT64<<16 | gc.TFLOAT64:
a = x86.ACVTSQ2SD
goto rdst
示例8: bgen
/*
* generate:
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
if n == nil {
n = gc.Nodbool(true)
}
if n.Ninit != nil {
gc.Genlist(n.Ninit)
}
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
return
}
}
et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
return
}
var nr *gc.Node
for n.Op == gc.OCONVNOP {
n = n.Left
if n.Ninit != nil {
gc.Genlist(n.Ninit)
}
}
var nl *gc.Node
switch n.Op {
default:
var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
a := ppc64.ABNE
if !true_ {
a = ppc64.ABEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
return
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
p1 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, to)
gc.Patch(p2, gc.Pc)
} else {
bgen(n.Left, true_, likely, to)
bgen(n.Right, true_, likely, to)
}
return
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OGT,
gc.OLE,
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
return
}
fallthrough
case gc.ONOT: // unary
nl = n.Left
if nl == nil || nl.Type == nil {
return
}
//.........這裏部分代碼省略.........
示例9: ginscall
//.........這裏部分代碼省略.........
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert a ppc64 NOP that we will have the right line number.
// The ppc64 NOP is really or r0, r0, r0; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
gins(ppc64.AOR, ®, ®)
}
p := gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
gmove(f, ®)
reg.Op = gc.OINDREG
gmove(®, &r1)
reg.Op = gc.OREGISTER
ginsBL(®, &r1)
case 3: // normal call of c function pointer
ginsBL(nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
var reg2 gc.Node
gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4)
gmove(f, ®)
gmove(&con, ®2)
p := gins(ppc64.AMOVW, ®2, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 8
p = gins(ppc64.AMOVD, ®, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 16
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
p := gins(ppc64.ACMP, ®, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGZERO
p = gc.Gbranch(ppc64.ABEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
示例10: dodiv
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
}
a := optoas(op, t)
var n3 gc.Node
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
}
if t != t0 {
// Convert
ax1 := ax
n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
gmove(&ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
gmove(&n4, res)
}
//.........這裏部分代碼省略.........
示例11: dodiv
/*
* generate division.
* caller must set:
* ax = allocated AX register
* dx = allocated DX register
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
}
var t1 gc.Node
gc.Tempname(&t1, t)
var t2 gc.Node
gc.Tempname(&t2, t)
if t0 != t {
var t3 gc.Node
gc.Tempname(&t3, t0)
var t4 gc.Node
gc.Tempname(&t4, t0)
gc.Cgen(nl, &t3)
gc.Cgen(nr, &t4)
// Convert.
gmove(&t3, &t1)
gmove(&t4, &t2)
} else {
gc.Cgen(nl, &t1)
gc.Cgen(nr, &t2)
}
var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
gc.Regalloc(&n1, t, res)
} else {
gc.Regalloc(&n1, t, nil)
}
gmove(&t2, &n1)
gmove(&t1, ax)
var p2 *obj.Prog
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, ax)
gmove(ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
//.........這裏部分代碼省略.........
示例12: agenr
/*
* allocate a register (reusing res if possible) and generate
* a = &n
* The caller must call regfree(a).
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
}
nl := n.Left
nr := n.Right
switch n.Op {
case gc.ODOT,
gc.ODOTPTR,
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
regfree(&n1)
case gc.OIND:
cgenr(n.Left, a, res)
gc.Cgen_checknil(a)
case gc.OINDEX:
var p2 *obj.Prog // to be patched to panicindex.
w := uint32(n.Type.Width)
//bounded = debug['B'] || n->bounded;
var n3 gc.Node
var n1 gc.Node
if nr.Addable != 0 {
var tmp gc.Node
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
}
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
if !gc.Isconst(nr, gc.CTINT) {
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
if !gc.Isconst(nr, gc.CTINT) {
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
nr = &tmp
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
}
// &a is in &n3 (allocated in res)
// i is in &n1 (if not constant)
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_nel)
var n4 gc.Node
regalloc(&n4, n1.Type, nil)
gmove(&n1, &n4)
ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
regfree(&n4)
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
}
//.........這裏部分代碼省略.........
示例13: floatmove_387
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
var a int
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
switch uint32(ft)<<16 | uint32(tt) {
default:
goto fatal
/*
* float to integer
*/
case gc.TFLOAT32<<16 | gc.TINT16,
gc.TFLOAT32<<16 | gc.TINT32,
gc.TFLOAT32<<16 | gc.TINT64,
gc.TFLOAT64<<16 | gc.TINT16,
gc.TFLOAT64<<16 | gc.TINT32,
gc.TFLOAT64<<16 | gc.TINT64:
if t.Op == gc.OREGISTER {
goto hardmem
}
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
if f.Op != gc.OREGISTER {
if ft == gc.TFLOAT32 {
gins(x86.AFMOVF, f, &r1)
} else {
gins(x86.AFMOVD, f, &r1)
}
}
// set round to zero mode during conversion
var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(x86.AFSTCW, nil, &t1)
gins(x86.AMOVW, ncon(0xf7f), &t2)
gins(x86.AFLDCW, &t2, nil)
if tt == gc.TINT16 {
gins(x86.AFMOVWP, &r1, t)
} else if tt == gc.TINT32 {
gins(x86.AFMOVLP, &r1, t)
} else {
gins(x86.AFMOVVP, &r1, t)
}
gins(x86.AFLDCW, &t1, nil)
return
// convert via int32.
case gc.TFLOAT32<<16 | gc.TINT8,
gc.TFLOAT32<<16 | gc.TUINT16,
gc.TFLOAT32<<16 | gc.TUINT8,
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8:
var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TINT32])
gmove(f, &t1)
switch tt {
default:
gc.Fatal("gmove %v", gc.Nconv(t, 0))
case gc.TINT8:
gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
gins(x86.ACMPL, &t1, ncon(0x7f))
p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
gmove(ncon(-0x80&(1<<32-1)), &t1)
gc.Patch(p3, gc.Pc)
gmove(&t1, t)
case gc.TUINT8:
gins(x86.ATESTL, ncon(0xffffff00), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
case gc.TUINT16:
gins(x86.ATESTL, ncon(0xffff0000), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
}
return
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
//.........這裏部分代碼省略.........
示例14: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %P\n", p);
q--
}
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %P\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
示例15: cgen_shift
//.........這裏部分代碼省略.........
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
} else {
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
}
// test for shift being 0
gins(arm.ATST, &n1, nil)
p3 := gc.Gbranch(arm.ABEQ, nil, -1)
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
gc.Regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
gins(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
var p1 *obj.Prog
var p2 *obj.Prog
if gc.Issigned[nl.Type.Etype] {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
} else {
p1 = gins(arm.AEOR, &n2, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
}
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
} else {
p1 := gins(arm.AEOR, &n2, &n2)
p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
}
gc.Regfree(&n3)
gc.Patch(p3, gc.Pc)
// Left-shift of smaller word must be sign/zero-extended.
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n2, &n2)
}
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}