本文整理匯總了Golang中cmd/compile/internal/gc.Node.Op方法的典型用法代碼示例。如果您正苦於以下問題:Golang Node.Op方法的具體用法?Golang Node.Op怎麽用?Golang Node.Op使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cmd/compile/internal/gc.Node
的用法示例。
在下文中一共展示了Node.Op方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm.AMOVB
case 2:
op = arm.AMOVH
case 4:
op = arm.AMOVW
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R0 + 1
var r2 gc.Node
r2.Op = gc.OREGISTER
r2.Reg = arm.REG_R0 + 2
var src gc.Node
gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
gc.Agen(n, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
// eval res first
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
gc.Agen(n, &src)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
//.........這裏部分代碼省略.........
示例2: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........這裏部分代碼省略.........
示例3: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSL.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSL loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
// 1 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 1 * (128 - int64(q))
} else {
for q > 0 {
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
q--
}
}
for c > 0 {
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
c--
}
}
示例4: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %v\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
示例5: blockcopy
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
//.........這裏部分代碼省略.........
示例6: clearfat
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
w := nl.Type.Width
if w > 1024 || (w >= 64 && (gc.Nacl || isPlan9)) {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
gconreg(movptr, w/8, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
if w%8 != 0 {
n1.Op = gc.OINDREG
clearfat_tail(&n1, w%8)
}
restx(&n1, &oldn1)
restx(&ax, &oldax)
return
}
if w >= 64 {
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var vec_zero gc.Node
var old_x0 gc.Node
savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
gins(x86.AXORPS, &vec_zero, &vec_zero)
if di := dzDI(w); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(w)
if w%16 != 0 {
n1.Op = gc.OINDREG
n1.Xoffset -= 16 - w%16
gins(x86.AMOVUPS, &vec_zero, &n1)
}
restx(&vec_zero, &old_x0)
restx(&n1, &oldn1)
return
}
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
clearfat_tail(&n1, w)
gc.Regfree(&n1)
}
示例7: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........