本文整理匯總了Golang中bootstrap/internal/obj.Prog.Reg方法的典型用法代碼示例。如果您正苦於以下問題:Golang Prog.Reg方法的具體用法?Golang Prog.Reg怎麽用?Golang Prog.Reg使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類bootstrap/internal/obj.Prog
的用法示例。
在下文中一共展示了Prog.Reg方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
p1 := p
p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例2: raddr
/*
* insert n into reg slot of p
*/
func raddr(n *gc.Node, p *obj.Prog) {
var a obj.Addr
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
} else {
gc.Fatalf("bad in raddr: <null>")
}
p.Reg = 0
} else {
p.Reg = a.Reg
}
}
示例3: copysub1
// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
// Returns 1 on failure to substitute (it always succeeds on mips).
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau1(p1, v) {
p1.Reg = s.Reg
}
}
return 0
}
示例4: cgen_hmul
/*
* generate high multiply
* res = (nl * nr) >> wordsize
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
t := nl.Type
w := int(t.Width * 8)
var n1 gc.Node
gc.Regalloc(&n1, t, res)
gc.Cgen(nl, &n1)
var n2 gc.Node
gc.Regalloc(&n2, t, nil)
gc.Cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
case gc.TUINT8,
gc.TUINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
// perform a long multiplication.
case gc.TINT32,
gc.TUINT32:
var p *obj.Prog
if gc.Issigned[t.Etype] {
p = gins(arm.AMULL, &n2, nil)
} else {
p = gins(arm.AMULLU, &n2, nil)
}
// n2 * n1 -> (n1 n2)
p.Reg = n1.Reg
p.To.Type = obj.TYPE_REGREG
p.To.Reg = n1.Reg
p.To.Offset = int64(n2.Reg)
default:
gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
示例5: copyu
//.........這裏部分代碼省略.........
mips.AADDVU,
mips.ASUBV,
mips.ASUBVU,
mips.ASLLV,
mips.ASRLV,
mips.ASRAV,
mips.AADDF,
mips.AADDD,
mips.ASUBF,
mips.ASUBD,
mips.AMULF,
mips.AMULD,
mips.ADIVF,
mips.ADIVD:
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
}
if copysub1(p, v, s, 1) != 0 {
return 1
}
// Update only indirect uses of v in p->to
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
}
return 0
}
if copyas(&p.To, v) {
if p.Reg == 0 {
// Fix up implicit reg (e.g., ADD
// R3,R4 -> ADD R3,R4,R4) so we can
// update reg and to separately.
p.Reg = p.To.Reg
}
if copyau(&p.From, v) {
return 4
}
if copyau1(p, v) {
return 4
}
return 3
}
if copyau(&p.From, v) {
return 1
}
if copyau1(p, v) {
return 1
}
if copyau(&p.To, v) {
return 1
}
return 0
case obj.ACHECKNIL, /* read p->from */
mips.ABEQ, /* read p->from, read p->reg */
mips.ABNE,
mips.ABGTZ,
mips.ABGEZ,
mips.ABLTZ,
示例6: peep
// UNUSED
func peep(firstp *obj.Prog) {
g := (*gc.Graph)(gc.Flowstart(firstp, nil))
if g == nil {
return
}
gactive = 0
var r *gc.Flow
var p *obj.Prog
var t int
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
}
t = 0
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
/*
* elide shift into TYPE_SHIFT operand of subsequent instruction
*/
// if(shiftprop(r)) {
// excise(r);
// t++;
// break;
// }
case arm.ASLL,
arm.ASRL,
arm.ASRA:
break
case arm.AMOVB,
arm.AMOVH,
arm.AMOVW,
arm.AMOVF,
arm.AMOVD:
if regtyp(&p.From) {
if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
if p.Scond == arm.C_SCOND_NONE {
if copyprop(g, r) {
excise(r)
t++
break
}
if subprop(r) && copyprop(g, r) {
excise(r)
t++
break
}
}
}
}
case arm.AMOVHS,
arm.AMOVHU,
arm.AMOVBS,
arm.AMOVBU:
if p.From.Type == obj.TYPE_REG {
if shortprop(r) {
t++
}
}
}
}
/*
if(p->scond == C_SCOND_NONE)
if(regtyp(&p->to))
if(isdconst(&p->from)) {
constprop(&p->from, &p->to, r->s1);
}
break;
*/
if t != 0 {
goto loop1
}
for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
/*
* EOR -1,x,y => MVN x,y
*/
case arm.AEOR:
if isdconst(&p.From) && p.From.Offset == -1 {
p.As = arm.AMVN
p.From.Type = obj.TYPE_REG
if p.Reg != 0 {
p.From.Reg = p.Reg
} else {
p.From.Reg = p.To.Reg
}
p.Reg = 0
}
}
}
//.........這裏部分代碼省略.........
示例7: shiftprop
/*
* ASLL x,y,w
* .. (not use w, not set x y w)
* AXXX w,a,b (a != w)
* .. (not use w)
* (set w)
* ----------- changed to
* ..
* AXXX (x<<y),a,b
* ..
*/
func shiftprop(r *gc.Flow) bool {
p := (*obj.Prog)(r.Prog)
if p.To.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
}
return false
}
n := int(int(p.To.Reg))
a := obj.Addr(obj.Addr{})
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
}
if gc.Debug['P'] != 0 {
fmt.Printf("shiftprop\n%v", p)
}
r1 := (*gc.Flow)(r)
var p1 *obj.Prog
for {
/* find first use of shift result; abort if shift operands or result are changed */
r1 = gc.Uniqs(r1)
if r1 == nil {
if gc.Debug['P'] != 0 {
fmt.Printf("\tbranch; FAILURE\n")
}
return false
}
if gc.Uniqp(r1) == nil {
if gc.Debug['P'] != 0 {
fmt.Printf("\tmerge; FAILURE\n")
}
return false
}
p1 = r1.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("\n%v", p1)
}
switch copyu(p1, &p.To, nil) {
case 0: /* not used or set */
if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
if gc.Debug['P'] != 0 {
fmt.Printf("\targs modified; FAILURE\n")
}
return false
}
continue
case 3: /* set, not used */
{
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: noref; FAILURE\n")
}
return false
}
}
break
}
/* check whether substitution can be done */
switch p1.As {
default:
if gc.Debug['P'] != 0 {
fmt.Printf("\tnon-dpi; FAILURE\n")
}
return false
case arm.AAND,
arm.AEOR,
arm.AADD,
arm.AADC,
arm.AORR,
arm.ASUB,
arm.ASBC,
arm.ARSB,
arm.ARSC:
if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) {
if p1.From.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
return false
}
//.........這裏部分代碼省略.........
示例8: preprocess
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
// TODO(minux): add morestack short-cuts with small fixed frame-size.
ctxt.Cursym = cursym
if cursym.Text == nil || cursym.Text.Link == nil {
return
}
p := cursym.Text
textstksiz := p.To.Offset
if textstksiz == -8 {
// Compatibility hack.
p.From3.Offset |= obj.NOFRAME
textstksiz = 0
}
if textstksiz%8 != 0 {
ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
}
if p.From3.Offset&obj.NOFRAME != 0 {
if textstksiz != 0 {
ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
}
}
cursym.Args = p.To.Val.(int32)
cursym.Locals = int32(textstksiz)
/*
* find leaf subroutines
* strip NOPs
* expand RET
* expand BECOME pseudo
*/
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
}
ctxt.Bso.Flush()
var q *obj.Prog
var q1 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
q = p
p.Mark |= LABEL | LEAF | SYNC
if p.Link != nil {
p.Link.Mark |= LABEL
}
case ANOR:
q = p
if p.To.Type == obj.TYPE_REG {
if p.To.Reg == REGZERO {
p.Mark |= LABEL | SYNC
}
}
case ALWAR,
ASTWCCC,
AECIWX,
AECOWX,
AEIEIO,
AICBI,
AISYNC,
ATLBIE,
ATLBIEL,
ASLBIA,
ASLBIE,
ASLBMFEE,
ASLBMFEV,
ASLBMTE,
ADCBF,
ADCBI,
ADCBST,
ADCBT,
ADCBTST,
ADCBZ,
ASYNC,
ATLBSYNC,
APTESYNC,
ATW,
AWORD,
ARFI,
ARFCI,
ARFID,
AHRFID:
q = p
p.Mark |= LABEL | SYNC
continue
case AMOVW, AMOVWZ, AMOVD:
q = p
if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL {
p.Mark |= LABEL | SYNC
}
continue
case AFABS,
//.........這裏部分代碼省略.........
示例9: stacksplit
/*
// instruction scheduling
if(debug['Q'] == 0)
return;
curtext = nil;
q = nil; // p - 1
q1 = firstp; // top of block
o = 0; // count of instructions
for(p = firstp; p != nil; p = p1) {
p1 = p->link;
o++;
if(p->mark & NOSCHED){
if(q1 != p){
sched(q1, q);
}
for(; p != nil; p = p->link){
if(!(p->mark & NOSCHED))
break;
q = p;
}
p1 = p;
q1 = p;
o = 0;
continue;
}
if(p->mark & (LABEL|SYNC)) {
if(q1 != p)
sched(q1, q);
q1 = p;
o = 1;
}
if(p->mark & (BRANCH|SYNC)) {
sched(q1, p);
q1 = p1;
o = 0;
}
if(o >= NSCHED) {
sched(q1, p);
q1 = p1;
o = 0;
}
q = p;
}
*/
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVD g_stackguard(g), R3
p = obj.Appendp(ctxt, p)
p.As = AMOVD
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
var q *obj.Prog
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMPU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// ADD $-framesize, SP, R4
// CMP stackguard, R4
p = obj.Appendp(ctxt, p)
p.As = AADD
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-framesize)
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R4
p = obj.Appendp(ctxt, p)
p.As = ACMPU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R4
} else {
// Such a large stack we need to protect against wraparound.
// If SP is close to zero:
// SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
//
// Preemption sets stackguard to StackPreempt, a very large value.
// That breaks the math above, so we have to check for that explicitly.
// // stackguard is R3
// CMP R3, $StackPreempt
//.........這裏部分代碼省略.........
示例10: preprocess
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
// TODO(minux): add morestack short-cuts with small fixed frame-size.
ctxt.Cursym = cursym
// a switch for enabling/disabling instruction scheduling
nosched := true
if cursym.Text == nil || cursym.Text.Link == nil {
return
}
p := cursym.Text
textstksiz := p.To.Offset
cursym.Args = p.To.Val.(int32)
cursym.Locals = int32(textstksiz)
/*
* find leaf subroutines
* strip NOPs
* expand RET
* expand BECOME pseudo
*/
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
}
ctxt.Bso.Flush()
var q *obj.Prog
var q1 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
q = p
p.Mark |= LABEL | LEAF | SYNC
if p.Link != nil {
p.Link.Mark |= LABEL
}
/* too hard, just leave alone */
case AMOVW,
AMOVV:
q = p
if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL {
p.Mark |= LABEL | SYNC
break
}
if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL {
p.Mark |= LABEL | SYNC
}
/* too hard, just leave alone */
case ASYSCALL,
AWORD,
ATLBWR,
ATLBWI,
ATLBP,
ATLBR:
q = p
p.Mark |= LABEL | SYNC
case ANOR:
q = p
if p.To.Type == obj.TYPE_REG {
if p.To.Reg == REGZERO {
p.Mark |= LABEL | SYNC
}
}
case ABGEZAL,
ABLTZAL,
AJAL,
obj.ADUFFZERO,
obj.ADUFFCOPY:
cursym.Text.Mark &^= LEAF
fallthrough
case AJMP,
ABEQ,
ABGEZ,
ABGTZ,
ABLEZ,
ABLTZ,
ABNE,
ABFPT, ABFPF:
if p.As == ABFPT || p.As == ABFPF {
// We don't treat ABFPT and ABFPF as branches here,
// so that we will always fill nop (0x0) in their
// delay slot during assembly.
// This is to workaround a kernel FPU emulator bug
// where it uses the user stack to simulate the
// instruction in the delay slot if it's not 0x0,
// and somehow that leads to SIGSEGV when the kernel
// jump to the stack.
p.Mark |= SYNC
} else {
p.Mark |= BRANCH
}
//.........這裏部分代碼省略.........
示例11: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVV g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVV
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
var q *obj.Prog
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// AGTU SP, stackguard, R1
p = obj.Appendp(ctxt, p)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// ADDV $-framesize, SP, R2
// SGTU R2, stackguard, R1
p = obj.Appendp(ctxt, p)
p.As = AADDV
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-framesize)
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R2
p.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
} else {
// Such a large stack we need to protect against wraparound.
// If SP is close to zero:
// SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
//
// Preemption sets stackguard to StackPreempt, a very large value.
// That breaks the math above, so we have to check for that explicitly.
// // stackguard is R1
// MOVV $StackPreempt, R2
// BEQ R1, R2, label-of-call-to-morestack
// ADDV $StackGuard, SP, R2
// SUBVU R1, R2
// MOVV $(framesize+(StackGuard-StackSmall)), R1
// SGTU R2, R1, R1
p = obj.Appendp(ctxt, p)
p.As = AMOVV
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackPreempt
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
q = p
p.As = ABEQ
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
p.To.Type = obj.TYPE_BRANCH
p.Mark |= BRANCH
p = obj.Appendp(ctxt, p)
p.As = AADDV
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackGuard
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ASUBVU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = AMOVV
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
//.........這裏部分代碼省略.........
示例12: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// BLS call-to-morestack
bls := obj.Appendp(ctxt, p)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
}
//.........這裏部分代碼省略.........
示例13: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOV g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVD
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
q := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// MOV SP, R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// SUB $framesize, SP, R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize)
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// BEQ label_of_call_to_morestack
// ADD $StackGuard, SP, R2
// SUB R1, R2
// MOV $(framesize+(StackGuard-StackSmall)), R3
// CMP R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackPreempt
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
q = p
p.As = ABEQ
p.To.Type = obj.TYPE_BRANCH
p = obj.Appendp(ctxt, p)
p.As = AADD
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackGuard
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = AMOVD
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
//.........這裏部分代碼省略.........