本文整理匯總了Golang中cmd/internal/obj.Prog.Scond方法的典型用法代碼示例。如果您正苦於以下問題:Golang Prog.Scond方法的具體用法?Golang Prog.Scond怎麽用?Golang Prog.Scond使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cmd/internal/obj.Prog
的用法示例。
在下文中一共展示了Prog.Scond方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i)
}
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = gc.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0)
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0)
p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
p.Scond = arm64.C_XPRE
p1 := p
p = gc.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
p = gc.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例2: ARM64Suffix
// ARM64Suffix handles the special suffix for the ARM64.
// It returns a boolean to indicate success; failure means
// cond was unrecognized.
func ARM64Suffix(prog *obj.Prog, cond string) bool {
if cond == "" {
return true
}
bits, ok := ParseARM64Suffix(cond)
if !ok {
return false
}
prog.Scond = bits
return true
}
示例3: ARMConditionCodes
// ARMConditionCodes handles the special condition code situation for the ARM.
// It returns a boolean to indicate success; failure means cond was unrecognized.
func ARMConditionCodes(prog *obj.Prog, cond string) bool {
if cond == "" {
return true
}
bits, ok := ParseARMCondition(cond)
if !ok {
return false
}
/* hack to make B.NE etc. work: turn it into the corresponding conditional */
if prog.As == arm.AB {
prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf])
bits = (bits &^ 0xf) | arm.C_SCOND_NONE
}
prog.Scond = bits
return true
}
示例4: expandchecks
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var reg int
var p1 *obj.Prog
for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(int(p.Lineno), "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v", p)
}
reg = int(p.From.Reg)
// check is
// CMP arg, $0
// MOV.EQ arg, 0(arg)
p1 = gc.Ctxt.NewProg()
gc.Clearp(p1)
p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p1.Pc = 9999
p1.As = arm.AMOVW
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(reg)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = int16(reg)
p1.To.Offset = 0
p1.Scond = arm.C_SCOND_EQ
p.As = arm.ACMP
p.From.Type = obj.TYPE_CONST
p.From.Reg = 0
p.From.Offset = 0
p.Reg = int16(reg)
}
}
示例5: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *r0 == 0 {
p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例6: outcode
func outcode(a, scond int32, g1 *obj.Addr, reg int32, g2 *obj.Addr) {
var p *obj.Prog
var pl *obj.Plist
/* hack to make B.NE etc. work: turn it into the corresponding conditional */
if a == arm.AB {
a = int32(bcode[(scond^arm.C_SCOND_XOR)&0xf])
scond = (scond &^ 0xf) | Always
}
if asm.Pass == 1 {
goto out
}
p = new(obj.Prog)
*p = obj.Prog{}
p.Ctxt = asm.Ctxt
p.As = int16(a)
p.Lineno = stmtline
p.Scond = uint8(scond)
p.From = *g1
p.Reg = int16(reg)
p.To = *g2
p.Pc = int64(asm.PC)
if lastpc == nil {
pl = obj.Linknewplist(asm.Ctxt)
pl.Firstpc = p
} else {
lastpc.Link = p
}
lastpc = p
out:
if a != obj.AGLOBL && a != obj.ADATA {
asm.PC++
}
}
示例7: sgen
//.........這裏部分代碼省略.........
agenr(n, &dst, res) // temporarily use dst
regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
agenr(res, &dst, res)
agenr(n, &src, nil)
}
var tmp gc.Node
regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &src, &nend)
}
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(arm64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gcmp(arm64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
}
}
regfree(&dst)
regfree(&src)
regfree(&tmp)
}
示例8: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %v\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
示例9: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
//.........這裏部分代碼省略.........
示例10: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// BLS call-to-morestack
bls := obj.Appendp(ctxt, p)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
}
//.........這裏部分代碼省略.........
示例11: blockcopy
//.........這裏部分代碼省略.........
dir = -dir
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &src, &nend)
}
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(arm64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gcmp(arm64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
gc.Regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
}
}
gc.Regfree(&dst)
gc.Regfree(&src)
gc.Regfree(&tmp)
}
示例12: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.CFunc() {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// BLS call-to-morestack
bls := obj.Appendp(ctxt, p)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
}
//.........這裏部分代碼省略.........
示例13: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// MOVW.LS R14, R3
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.Scond = C_SCOND_LS
p.From.Type = obj.TYPE_REG
p.From.Reg = REGLINK
p.To.Type = obj.TYPE_REG
//.........這裏部分代碼省略.........
示例14: preprocess
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
autosize := int32(0)
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
}
ctxt.Cursym = cursym
if cursym.Text == nil || cursym.Text.Link == nil {
return
}
softfloat(ctxt, cursym)
p := cursym.Text
autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
cursym.Locals = autoffset
cursym.Args = p.To.U.Argsize
if ctxt.Debugzerostack != 0 {
if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
// MOVW $4(R13), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REG_R13
p.From.Offset = 4
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
// MOVW $n(R13), R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REG_R13
p.From.Offset = 4 + int64(autoffset)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
// MOVW $0, R3
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
// L:
// MOVW.nil R3, 0(R1) +4
// CMP R1, R2
// BNE L
pl := obj.Appendp(ctxt, p)
p := pl
p.As = AMOVW
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.To.Type = obj.TYPE_MEM
p.To.Reg = REG_R1
p.To.Offset = 4
p.Scond |= C_PBIT
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ABNE
p.To.Type = obj.TYPE_BRANCH
p.Pcond = pl
}
}
/*
* find leaf subroutines
* strip NOPs
* expand RET
* expand BECOME pseudo
*/
var q1 *obj.Prog
var q *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case ACASE:
if ctxt.Flag_shared != 0 {
linkcase(p)
}
case obj.ATEXT:
p.Mark |= LEAF
//.........這裏部分代碼省略.........
示例15: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
l := n.Left
var t1 gc.Node
if !l.Addable {
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
gc.Regalloc(&t1, lo1.Type, nil)
var al gc.Node
gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
gc.Regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gmove(ncon(0), &t1)
p1 := gins(arm.ASUB, &al, &t1)
p1.Scond |= arm.C_SBIT
gins(arm.AMOVW, &t1, &lo2)
gmove(ncon(0), &t1)
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&al)
gc.Regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
gc.Regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
var n1 gc.Node
gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &lo2)
gins(arm.AMOVW, &hi1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&n1)
splitclean()
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR,
gc.OLROT:
break
}
// setup for binary operators
r := n.Right
if r != nil && !r.Addable {
var t2 gc.Node
//.........這裏部分代碼省略.........