本文整理匯總了Golang中cmd/compile/internal/gc.Pkglookup函數的典型用法代碼示例。如果您正苦於以下問題:Golang Pkglookup函數的具體用法?Golang Pkglookup怎麽用?Golang Pkglookup使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Pkglookup函數的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *ax == 0 {
p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
if cnt <= int64(4*gc.Widthreg) {
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
} else {
p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例2: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if *ax == 0 {
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
lo += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
if cnt <= int64(4*gc.Widthreg) {
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
q := cnt / int64(gc.Widthreg)
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
} else {
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例3: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
switch v.Op {
case ssa.OpAMD64MOVBstoreconstidx1:
p.From.Offset = int64(int8(sc.Val()))
p.To.Scale = 1
case ssa.OpAMD64MOVWstoreconstidx2:
p.From.Offset = int64(int16(sc.Val()))
p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4:
p.From.Offset = int64(int32(sc.Val()))
p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8:
p.From.Offset = sc.Val()
p.To.Scale = 8
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
p.To.Index = gc.SSARegNum(v.Args[1])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpAMD64DUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Unimplementedf("MOVOconst can only do constant=0")
}
r := gc.SSARegNum(v)
opregreg(x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x != y {
opregreg(moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
示例4: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSL.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSL loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
// 1 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 1 * (128 - int64(q))
} else {
for q > 0 {
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
q--
}
}
for c > 0 {
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
c--
}
}
示例5: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLdefer:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLgo:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Newproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMDUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
// CMP Rarg1, R1
// BLE -2(PC)
// arg1 is the address of the last element to zero
// arg2 is known to be zero
// auxint is alignment
var sz int64
var mov obj.As
switch {
case v.AuxInt%4 == 0:
sz = 4
mov = arm.AMOVW
case v.AuxInt%2 == 0:
示例6: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Scale = 1
if i == x86.REG_SP {
r, i = i, r
}
case ssa.OpAMD64MOVWstoreconstidx2:
p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4:
p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8:
p.To.Scale = 8
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Index = i
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpAMD64DUFFZERO:
off := duffStart(v.AuxInt)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
p = gc.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = adj
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
p = gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Unimplementedf("MOVOconst can only do constant=0")
}
r := gc.SSARegNum(v)
opregreg(x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x != y {
opregreg(moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
示例7: blockcopy
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
//.........這裏部分代碼省略.........
示例8: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例9: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
w := nl.Type.Width
if w > 1024 || (w >= 64 && (gc.Nacl || isPlan9)) {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
gconreg(movptr, w/8, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
if w%8 != 0 {
n1.Op = gc.OINDREG
clearfat_tail(&n1, w%8)
}
restx(&n1, &oldn1)
restx(&ax, &oldax)
return
}
if w >= 64 {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var vec_zero gc.Node
var old_x0 gc.Node
savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
gins(x86.AXORPS, &vec_zero, &vec_zero)
if di := dzDI(w); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(w)
if w%16 != 0 {
n1.Op = gc.OINDREG
n1.Xoffset -= 16 - w%16
gins(x86.AMOVUPS, &vec_zero, &n1)
}
restx(&vec_zero, &old_x0)
restx(&n1, &oldn1)
return
}
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
clearfat_tail(&n1, w)
gc.Regfree(&n1)
}
示例10: ssaGenValue387
//.........這裏部分代碼省略.........
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
switch v.Op {
case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
p.To.Scale = 1
p.To.Index = v.Args[1].Reg()
case ssa.Op386MOVSSstoreidx4:
p.To.Scale = 4
p.To.Index = v.Args[1].Reg()
case ssa.Op386MOVSDstoreidx8:
p.To.Scale = 8
p.To.Index = v.Args[1].Reg()
}
return true
case ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// Push arg1 on top of stack
push(s, v.Args[1])
// Set precision if needed. 64 bits is the default.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFSTCW)
s.AddrScratch(&p.To)
p = gc.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Linksym(gc.Pkglookup("controlWord32", gc.Runtimepkg))
}
var op obj.As
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386ADDSD:
op = x86.AFADDDP
case ssa.Op386SUBSS, ssa.Op386SUBSD:
op = x86.AFSUBDP
case ssa.Op386MULSS, ssa.Op386MULSD:
op = x86.AFMULDP
case ssa.Op386DIVSS, ssa.Op386DIVSD:
op = x86.AFDIVDP
}
p := gc.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
p.To.Reg = s.SSEto387[v.Reg()] + 1
// Restore precision if needed.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFLDCW)
s.AddrScratch(&p.From)
}
return true
case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
push(s, v.Args[0])
// Compare.
示例11: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Offset = 0
p.To.Reg = gc.SSARegNum(v.Args[0])
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLdefer:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLgo:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Newproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = gc.SSARegNum(v.Args[0])
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMDUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload,
ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload,
ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore,
ssa.OpARMMOVFstore, ssa.OpARMMOVDstore:
// arg0 is ptr, auxint is offset
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
示例12: zerorange
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
lo += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
if cnt == 8 {
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
if *x0 == 0 {
p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*x0 = 1
}
for i := int64(0); i < cnt/16; i++ {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
}
if cnt%16 != 0 {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
}
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
if *x0 == 0 {
p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*x0 = 1
}
p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
if cnt%16 != 0 {
p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *ax == 0 {
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
p = gc.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = gc.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
示例13: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........