本文整理匯總了Golang中cmd/compile/internal/gc.Linksym函數的典型用法代碼示例。如果您正苦於以下問題:Golang Linksym函數的具體用法?Golang Linksym怎麽用?Golang Linksym使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Linksym函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: scratchFpMem
// scratchFpMem initializes an Addr (field of a Prog)
// to reference the scratchpad memory for movement between
// F and G registers for FP conversions.
func scratchFpMem(s *gc.SSAGenState, a *obj.Addr) {
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
a.Node = s.ScratchFpMem
a.Sym = gc.Linksym(s.ScratchFpMem.Sym)
a.Reg = ppc64.REGSP
}
示例2: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *ax == 0 {
p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
if cnt <= int64(4*gc.Widthreg) {
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
} else {
p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例3: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *ax == 0 {
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
lo += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
if cnt <= int64(4*gc.Widthreg) {
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
q := cnt / int64(gc.Widthreg)
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
} else {
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例4: blockcopy
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
//.........這裏部分代碼省略.........
示例5: ssaGenValue
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x == y {
return
}
as := arm64.AMOVD
if v.Type.IsFloat() {
switch v.Type.Size() {
case 4:
as = arm64.AFMOVS
case 8:
as = arm64.AFMOVD
default:
panic("bad float size")
}
}
p := gc.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
p.To.Reg = y
case ssa.OpARM64MOVDnop:
if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
p.From.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.From.Name = obj.NAME_PARAM
p.From.Offset += n.Xoffset
} else {
p.From.Name = obj.NAME_AUTO
}
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
case ssa.OpStoreReg:
if v.Type.IsFlags() {
v.Unimplementedf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
n, off := gc.AutoVar(v)
p.To.Type = obj.TYPE_MEM
p.To.Node = n
p.To.Sym = gc.Linksym(n.Sym)
p.To.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.To.Name = obj.NAME_PARAM
p.To.Offset += n.Xoffset
} else {
p.To.Name = obj.NAME_AUTO
}
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
ssa.OpARM64OR,
ssa.OpARM64XOR,
ssa.OpARM64BIC,
ssa.OpARM64MUL,
ssa.OpARM64MULW,
ssa.OpARM64MULH,
ssa.OpARM64UMULH,
ssa.OpARM64MULL,
ssa.OpARM64UMULL,
ssa.OpARM64DIV,
ssa.OpARM64UDIV,
ssa.OpARM64DIVW,
ssa.OpARM64UDIVW,
ssa.OpARM64MOD,
ssa.OpARM64UMOD,
ssa.OpARM64MODW,
ssa.OpARM64UMODW,
ssa.OpARM64SLL,
//.........這裏部分代碼省略.........
示例6: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........
示例7: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
switch v.Op {
case ssa.OpAMD64MOVBstoreconstidx1:
p.From.Offset = int64(int8(sc.Val()))
p.To.Scale = 1
case ssa.OpAMD64MOVWstoreconstidx2:
p.From.Offset = int64(int16(sc.Val()))
p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4:
p.From.Offset = int64(int32(sc.Val()))
p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8:
p.From.Offset = sc.Val()
p.To.Scale = 8
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
p.To.Index = gc.SSARegNum(v.Args[1])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpAMD64DUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Unimplementedf("MOVOconst can only do constant=0")
}
r := gc.SSARegNum(v)
opregreg(x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x != y {
opregreg(moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
示例8: ssaGenBlock
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(arm.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm.REG_R0
p = gc.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
case ssa.BlockARMEQ, ssa.BlockARMNE,
ssa.BlockARMLT, ssa.BlockARMGE,
ssa.BlockARMLE, ssa.BlockARMGT,
ssa.BlockARMULT, ssa.BlockARMUGT,
ssa.BlockARMULE, ssa.BlockARMUGE:
jmp := blockJump[b.Kind]
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
default:
b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
}
}
示例9: ssaGenBlock
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
ssa.BlockMIPSFPT, ssa.BlockMIPSFPF:
jmp := blockJump[b.Kind]
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
if !b.Control.Type.IsFlags() {
p.From.Type = obj.TYPE_REG
p.From.Reg = b.Control.Reg()
}
default:
b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
}
}
示例10: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
lo += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
if cnt == 8 {
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
if *x0 == 0 {
p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*x0 = 1
}
for i := int64(0); i < cnt/16; i++ {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
}
if cnt%16 != 0 {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
}
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
if *x0 == 0 {
p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*x0 = 1
}
p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
if cnt%16 != 0 {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = gc.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = gc.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例11: ssaGenBlock
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
case ssa.BlockDefer:
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0
p = gc.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
ssa.BlockPPC64LT, ssa.BlockPPC64GE,
ssa.BlockPPC64LE, ssa.BlockPPC64GT,
ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
jmp := blockJump[b.Kind]
likely := b.Likely
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if jmp.invasmun {
// TODO: The second branch is probably predict-not-taken since it is for FP unordered
q := gc.Prog(ppc64.ABVS)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
default:
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
q := gc.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
// liblink reorders the instruction stream as it sees fit.
// Pass along what we know so liblink can make use of it.
// TODO: Once we've fully switched to SSA,
// make liblink leave our output alone.
//switch likely {
//case ssa.BranchUnlikely:
// p.From.Type = obj.TYPE_CONST
// p.From.Offset = 0
//case ssa.BranchLikely:
// p.From.Type = obj.TYPE_CONST
// p.From.Offset = 1
//}
default:
b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
}
}
示例12: ssaGenValue
//.........這裏部分代碼省略.........
ssa.OpARM64RBIT,
ssa.OpARM64RBITW,
ssa.OpARM64CLZ,
ssa.OpARM64CLZW:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64CSELULT,
ssa.OpARM64CSELULT0:
r1 := int16(arm64.REGZERO)
if v.Op == ssa.OpARM64CSELULT {
r1 = v.Args[1].Reg()
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = arm64.COND_LO
p.Reg = v.Args[0].Reg()
p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1}
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO:
// runtime.duffzero expects start address - 8 in R16
p := gc.Prog(arm64.ASUB)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REG_R16
p = gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
// MOVD.P ZR, 8(R16)
// CMP Rarg1, R16
// BLE -2(PC)
// arg1 is the address of the last element to zero
p := gc.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REG_R16
p.To.Offset = 8
p2 := gc.Prog(arm64.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm64.REG_R16
p3 := gc.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARM64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredMove:
// MOVD.P 8(R16), Rtmp
// MOVD.P Rtmp, 8(R17)
// CMP Rarg2, R16
// BLE -3(PC)
// arg2 is the address of the last element of src
示例13: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例14: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Scale = 1
if i == x86.REG_SP {
r, i = i, r
}
case ssa.OpAMD64MOVWstoreconstidx2:
p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4:
p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8:
p.To.Scale = 8
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Index = i
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpAMD64DUFFZERO:
off := duffStart(v.AuxInt)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
p = gc.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = adj
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
p = gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Unimplementedf("MOVOconst can only do constant=0")
}
r := gc.SSARegNum(v)
opregreg(x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x != y {
opregreg(moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
示例15: ssaGenValue
//.........這裏部分代碼省略.........
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
p4 := gc.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
p5 := gc.Prog(mips.AADDU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
p6 := gc.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2)
case ssa.OpMIPSCALLstatic:
if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLdefer:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLgo:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Newproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()