本文整理匯總了Golang中cmd/avail/obj.Prog.Scond方法的典型用法代碼示例。如果您正苦於以下問題:Golang Prog.Scond方法的具體用法?Golang Prog.Scond怎麽用?Golang Prog.Scond使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cmd/avail/obj.Prog
的用法示例。
在下文中一共展示了Prog.Scond方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: expandchecks
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var reg int
var p1 *obj.Prog
for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v", p)
}
reg = int(p.From.Reg)
// check is
// CMP arg, $0
// MOV.EQ arg, 0(arg)
p1 = gc.Ctxt.NewProg()
gc.Clearp(p1)
p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p1.Pc = 9999
p1.As = arm.AMOVW
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(reg)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = int16(reg)
p1.To.Offset = 0
p1.Scond = arm.C_SCOND_EQ
p.As = arm.ACMP
p.From.Type = obj.TYPE_CONST
p.From.Reg = 0
p.From.Offset = 0
p.Reg = int16(reg)
}
}
示例2: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *r0 == 0 {
p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
示例3: stacksplit
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.Cfunc {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// BLS call-to-morestack
bls := obj.Appendp(ctxt, p)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
}
//.........這裏部分代碼省略.........
示例4: cgen64
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatalf("cgen64 %v of %v", n.Op, res.Op)
}
l := n.Left
var t1 gc.Node
if !l.Addable {
gc.Tempname(&t1, l.Type)
gc.Cgen(l, &t1)
l = &t1
}
var hi1 gc.Node
var lo1 gc.Node
split64(l, &lo1, &hi1)
switch n.Op {
default:
gc.Fatalf("cgen64 %v", n.Op)
case gc.OMINUS:
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
gc.Regalloc(&t1, lo1.Type, nil)
var al gc.Node
gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
gc.Regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gmove(ncon(0), &t1)
p1 := gins(arm.ASUB, &al, &t1)
p1.Scond |= arm.C_SBIT
gins(arm.AMOVW, &t1, &lo2)
gmove(ncon(0), &t1)
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&al)
gc.Regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
gc.Regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
var n1 gc.Node
gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &lo2)
gins(arm.AMOVW, &hi1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
gc.Regfree(&t1)
gc.Regfree(&n1)
splitclean()
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR,
gc.OLROT:
break
}
// setup for binary operators
r := n.Right
if r != nil && !r.Addable {
var t2 gc.Node
//.........這裏部分代碼省略.........
示例5: preprocess
//.........這裏部分代碼省略.........
aoffset = 0xF0
}
if cursym.Text.Mark&LEAF != 0 {
cursym.Leaf = true
if ctxt.Autosize == 0 {
break
}
aoffset = 0
}
q = p
if ctxt.Autosize > aoffset {
q = ctxt.NewProg()
q.As = ASUB
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
q.To.Type = obj.TYPE_REG
q.To.Reg = REGSP
q.Spadj = int32(q.From.Offset)
q.Link = p.Link
p.Link = q
if cursym.Text.Mark&LEAF != 0 {
break
}
}
q1 = ctxt.NewProg()
q1.As = AMOVD
q1.Lineno = p.Lineno
q1.From.Type = obj.TYPE_REG
q1.From.Reg = REGLINK
q1.To.Type = obj.TYPE_MEM
q1.Scond = C_XPRE
q1.To.Offset = int64(-aoffset)
q1.To.Reg = REGSP
q1.Link = q.Link
q1.Spadj = aoffset
q.Link = q1
if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOV g_panic(g), R1
// CMP ZR, R1
// BEQ end
// MOV panic_argp(R1), R2
// ADD $(autosize+8), RSP, R3
// CMP R2, R3
// BNE end
// ADD $8, RSP, R4
// MOVD R4, panic_argp(R1)
// end:
// NOP
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes.
q = q1
q = obj.Appendp(ctxt, q)
q.As = AMOVD
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R1
示例6: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if nl.Type.Align < 4 {
q = 0
c = w
}
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
if c > 4 {
// Loop to zero unaligned memory.
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(c)
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
c = 0
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
//.........這裏部分代碼省略.........
示例7: cgen_shift
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int64()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int64())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
//.........這裏部分代碼省略.........
示例8: blockcopy
//.........這裏部分代碼省略.........
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
// set up end marker
var nend gc.Node
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
p := gins(arm.AMOVW, &src, &nend)
p.From.Type = obj.TYPE_ADDR
if dir < 0 {
p.From.Offset = int64(dir)
} else {
p.From.Offset = w
}
}
// move src and dest to the end of block if necessary
if dir < 0 {
p := gins(arm.AMOVW, &src, &src)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
p = gins(arm.AMOVW, &dst, &dst)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
}
// move
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(arm.ACMP, &src, nil)
raddr(&nend, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
gc.Regfree(&nend)
} else {
var p *obj.Prog
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
}
}
gc.Regfree(&dst)
gc.Regfree(&src)
gc.Regfree(&tmp)
}