本文整理匯總了Golang中cmd/compile/internal/gc.Agenr函數的典型用法代碼示例。如果您正苦於以下問題:Golang Agenr函數的具體用法?Golang Agenr怎麽用?Golang Agenr使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Agenr函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = mips.AMOVB
case 2:
op = mips.AMOVH
case 4:
op = mips.AMOVW
case 8:
op = mips.AMOVV
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && odst < osrc+w {
dir = -dir
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(mips.AMOVV, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(mips.AMOVV, &src, &nend)
}
p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(mips.AMOVV, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
ploop := p
//.........這裏部分代碼省略.........
示例2: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........
示例3: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm.AMOVB
case 2:
op = arm.AMOVH
case 4:
op = arm.AMOVW
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R0 + 1
var r2 gc.Node
r2.Op = gc.OREGISTER
r2.Reg = arm.REG_R0 + 2
var src gc.Node
gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
gc.Agen(n, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
// eval res first
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
gc.Agen(n, &src)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
//.........這裏部分代碼省略.........
示例4: blockcopy
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
//.........這裏部分代碼省略.........
示例5: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
w := nl.Type.Width
if w > 1024 || (w >= 64 && (gc.Nacl || isPlan9)) {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
gconreg(movptr, w/8, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
if w%8 != 0 {
n1.Op = gc.OINDREG
clearfat_tail(&n1, w%8)
}
restx(&n1, &oldn1)
restx(&ax, &oldax)
return
}
if w >= 64 {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var vec_zero gc.Node
var old_x0 gc.Node
savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
gins(x86.AXORPS, &vec_zero, &vec_zero)
if di := dzDI(w); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(w)
if w%16 != 0 {
n1.Op = gc.OINDREG
n1.Xoffset -= 16 - w%16
gins(x86.AMOVUPS, &vec_zero, &n1)
}
restx(&vec_zero, &old_x0)
restx(&n1, &oldn1)
return
}
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
clearfat_tail(&n1, w)
gc.Regfree(&n1)
}
示例6: blockcopy
// blockcopy copies w bytes from &n to &res
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(s390x.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
defer gc.Regfree(&src)
defer gc.Regfree(&dst)
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
defer gc.Regfree(&tmp)
offset := int64(0)
dir := _FORWARDS
if osrc < odst && odst < osrc+w {
// Reverse. Can't use MVC, fall back onto basic moves.
dir = _BACKWARDS
const copiesPerIter = 2
if w >= 8*copiesPerIter {
cnt := w - (w % (8 * copiesPerIter))
ginscon(s390x.AADD, w, &src)
ginscon(s390x.AADD, w, &dst)
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(s390x.ASUB, nil, &end)
p.From.Type = obj.TYPE_CONST
p.From.Offset = cnt
p.Reg = src.Reg
var label *obj.Prog
for i := 0; i < copiesPerIter; i++ {
offset := int64(-8 * (i + 1))
p := gins(s390x.AMOVD, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = offset
if i == 0 {
label = p
}
p = gins(s390x.AMOVD, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = offset
}
ginscon(s390x.ASUB, 8*copiesPerIter, &src)
ginscon(s390x.ASUB, 8*copiesPerIter, &dst)
gins(s390x.ACMP, &src, &end)
gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label)
gc.Regfree(&end)
w -= cnt
} else {
offset = w
}
}
if dir == _FORWARDS && w > 1024 {
// Loop over MVCs
cnt := w - (w % 256)
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
add := gins(s390x.AADD, nil, &end)
add.From.Type = obj.TYPE_CONST
add.From.Offset = cnt
add.Reg = src.Reg
mvc := gins(s390x.AMVC, &src, &dst)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Offset = 0
mvc.To.Type = obj.TYPE_MEM
mvc.To.Offset = 0
mvc.From3 = new(obj.Addr)
mvc.From3.Type = obj.TYPE_CONST
mvc.From3.Offset = 256
ginscon(s390x.AADD, 256, &src)
ginscon(s390x.AADD, 256, &dst)
gins(s390x.ACMP, &src, &end)
gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc)
gc.Regfree(&end)
w -= cnt
}
for w > 0 {
//.........這裏部分代碼省略.........