本文整理匯總了Golang中cmd/compile/internal/gc.Gvardef函數的典型用法代碼示例。如果您正苦於以下問題:Golang Gvardef函數的具體用法?Golang Gvardef怎麽用?Golang Gvardef使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Gvardef函數的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm.AMOVB
case 2:
op = arm.AMOVH
case 4:
op = arm.AMOVW
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R0 + 1
var r2 gc.Node
r2.Op = gc.OREGISTER
r2.Reg = arm.REG_R0 + 2
var src gc.Node
gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
gc.Agen(n, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
// eval res first
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
gc.Agen(n, &src)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
//.........這裏部分代碼省略.........
示例2: ssaGenValue
//.........這裏部分代碼省略.........
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1
p.To.Offset = sz
p2 := gc.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm.REG_R1
p3 := gc.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
// CMP Rarg2, R1
// BLE -3(PC)
// arg2 is the address of the last element of src
// auxint is alignment
var sz int64
var mov obj.As
switch {
case v.AuxInt%4 == 0:
sz = 4
mov = arm.AMOVW
case v.AuxInt%2 == 0:
sz = 2
mov = arm.AMOVH
default:
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
p2 := gc.Prog(mov)
p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2
p2.To.Offset = sz
p3 := gc.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm.REG_R1
p4 := gc.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
gc.KeepAlive(v)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
ssa.OpARMLessEqual,
ssa.OpARMGreaterThan,
ssa.OpARMGreaterEqual,
ssa.OpARMLessThanU,
ssa.OpARMLessEqualU,
ssa.OpARMGreaterThanU,
ssa.OpARMGreaterEqualU:
// generate boolean values
// use conditional move
p := gc.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p = gc.Prog(arm.AMOVW)
p.Scond = condBits[v.Op]
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpSelect0, ssa.OpSelect1:
// nothing to do
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpARMFlagEQ,
ssa.OpARMFlagLT_ULT,
ssa.OpARMFlagLT_UGT,
ssa.OpARMFlagGT_ULT,
ssa.OpARMFlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpARMInvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
}
示例3: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Reg = gc.SSARegNum(v)
case ssa.OpAMD64SETNEF:
p := gc.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
q := gc.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ORL avoids partial register write and is smaller than ORQ, used by old compiler
opregreg(x86.AORL, gc.SSARegNum(v), x86.REG_AX)
case ssa.OpAMD64SETEQF:
p := gc.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
q := gc.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
opregreg(x86.AANDL, gc.SSARegNum(v), x86.REG_AX)
case ssa.OpAMD64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpAMD64REPSTOSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.ASTOSQ)
case ssa.OpAMD64REPMOVSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.AMOVSQ)
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpAMD64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload,
ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore,
ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVOload,
ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVOstore:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
示例4: ssaGenValue
//.........這裏部分代碼省略.........
p.To.Reg = gc.SSARegNum(v)
case ssa.OpAMD64SETNEF:
p := gc.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
q := gc.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ORL avoids partial register write and is smaller than ORQ, used by old compiler
opregreg(x86.AORL, gc.SSARegNum(v), x86.REG_AX)
case ssa.OpAMD64SETEQF:
p := gc.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
q := gc.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
opregreg(x86.AANDL, gc.SSARegNum(v), x86.REG_AX)
case ssa.OpAMD64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpAMD64REPSTOSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.ASTOSQ)
case ssa.OpAMD64REPMOVSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.AMOVSQ)
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
if !v.Args[0].Type.IsPtrShaped() {
v.Fatalf("keeping non-pointer alive %v", v.Args[0])
}
n, off := gc.AutoVar(v.Args[0])
if n == nil {
v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
}
if off != 0 {
v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
}
gc.Gvarlive(n)
case ssa.OpAMD64LoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
示例5: ssaGenValue
//.........這裏部分代碼省略.........
case ssa.OpMIPSLoweredAtomicCas:
// MOVW $0, Rout
// SYNC
// LL (Rarg0), Rtmp
// BNE Rtmp, Rarg1, 4(PC)
// MOVW Rarg2, Rout
// SC Rout, (Rarg0)
// BEQ Rout, -4(PC)
// SYNC
p := gc.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
gc.Prog(mips.ASYNC)
p1 := gc.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ABNE)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(mips.AMOVW)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg0()
p4 := gc.Prog(mips.ASC)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Reg0()
p4.To.Type = obj.TYPE_MEM
p4.To.Reg = v.Args[0].Reg()
p5 := gc.Prog(mips.ABEQ)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
gc.Prog(mips.ASYNC)
p6 := gc.Prog(obj.ANOP)
gc.Patch(p2, p6)
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
gc.KeepAlive(v)
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpMIPSFPFlagTrue,
ssa.OpMIPSFPFlagFalse:
// MOVW $1, r
// CMOVF R0, r
cmov := mips.ACMOVF
if v.Op == ssa.OpMIPSFPFlagFalse {
cmov = mips.ACMOVT
}
p := gc.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p1 := gc.Prog(cmov)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = mips.REGZERO
p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg()
case ssa.OpMIPSLoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
}
示例6: ssaGenValue
//.........這裏部分代碼省略.........
ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload,
ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload,
ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore,
ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore:
// arg0 is ptr, auxint is offset
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero:
// arg0 is ptr
if w.Args[0] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpARM64LoweredMove:
// arg0 is dst ptr, arg1 is src ptr
if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
default:
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if arg is nil.
p := gc.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
if !v.Args[0].Type.IsPtrShaped() {
v.Fatalf("keeping non-pointer alive %v", v.Args[0])
}
n, off := gc.AutoVar(v.Args[0])
if n == nil {
v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
}
if off != 0 {
v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
}
gc.Gvarlive(n)
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
ssa.OpARM64LessThan,
ssa.OpARM64LessEqual,
ssa.OpARM64GreaterThan,
ssa.OpARM64GreaterEqual,
ssa.OpARM64LessThanU,
ssa.OpARM64LessEqualU,
ssa.OpARM64GreaterThanU,
ssa.OpARM64GreaterEqualU:
// generate boolean values using CSET
p := gc.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Op]
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpSelect0, ssa.OpSelect1:
// nothing to do
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64FlagEQ,
ssa.OpARM64FlagLT_ULT,
ssa.OpARM64FlagLT_UGT,
ssa.OpARM64FlagGT_ULT,
ssa.OpARM64FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
default:
v.Unimplementedf("genValue not implemented: %s", v.LongString())
}
}
示例7: blockcopy
func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
gc.Agenr(ns, &nodl, &noddi)
gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Reg != x86.REG_DI {
gmove(&nodl, &noddi)
}
if nodr.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
gc.Regfree(&nodl)
gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
var oldcx gc.Node
var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(addptr, w-1, x86.REG_SI)
gconreg(addptr, w-1, x86.REG_DI)
gconreg(movptr, c, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(addptr, -7, x86.REG_SI)
gconreg(addptr, -7, x86.REG_DI)
} else {
gconreg(addptr, w-8, x86.REG_SI)
gconreg(addptr, w-8, x86.REG_DI)
}
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
// normal direction
if q > 128 || (gc.Nacl && q >= 4) {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
var oldx0 gc.Node
var x0 gc.Node
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 64 blocks taking 14 bytes each
// see ../../../../runtime/mkduff.go
p.To.Offset = 14 * (64 - q/2)
restx(&x0, &oldx0)
if q%2 != 0 {
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
}
} else if !gc.Nacl && c == 0 {
// We don't need the MOVSQ side-effect of updating SI and DI,
// and issuing a sequence of MOVQs directly is faster.
nodsi.Op = gc.OINDREG
noddi.Op = gc.OINDREG
for q > 0 {
gmove(&nodsi, &cx) // MOVQ x+(SI),CX
gmove(&cx, &noddi) // MOVQ CX,x+(DI)
//.........這裏部分代碼省略.........
示例8: ssaGenValue
//.........這裏部分代碼省略.........
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XNOT, ssa.OpS390XNOTW:
v.Fatalf("NOT/NOTW generated %s", v.LongString())
case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE,
ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE,
ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE,
ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XFSQRT:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
gc.KeepAlive(v)
case ssa.OpS390XInvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck:
// Issue a load which will fault if the input is nil.
p := gc.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
p := gc.Prog(s390x.AMVC)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
p.From.Offset = vo.Off()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
p.To.Offset = vo.Off()
p.From3 = new(obj.Addr)
p.From3.Type = obj.TYPE_CONST
示例9: blockcopy
// blockcopy copies w bytes from &n to &res
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(s390x.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
defer gc.Regfree(&src)
defer gc.Regfree(&dst)
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
defer gc.Regfree(&tmp)
offset := int64(0)
dir := _FORWARDS
if osrc < odst && odst < osrc+w {
// Reverse. Can't use MVC, fall back onto basic moves.
dir = _BACKWARDS
const copiesPerIter = 2
if w >= 8*copiesPerIter {
cnt := w - (w % (8 * copiesPerIter))
ginscon(s390x.AADD, w, &src)
ginscon(s390x.AADD, w, &dst)
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(s390x.ASUB, nil, &end)
p.From.Type = obj.TYPE_CONST
p.From.Offset = cnt
p.Reg = src.Reg
var label *obj.Prog
for i := 0; i < copiesPerIter; i++ {
offset := int64(-8 * (i + 1))
p := gins(s390x.AMOVD, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = offset
if i == 0 {
label = p
}
p = gins(s390x.AMOVD, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = offset
}
ginscon(s390x.ASUB, 8*copiesPerIter, &src)
ginscon(s390x.ASUB, 8*copiesPerIter, &dst)
gins(s390x.ACMP, &src, &end)
gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label)
gc.Regfree(&end)
w -= cnt
} else {
offset = w
}
}
if dir == _FORWARDS && w > 1024 {
// Loop over MVCs
cnt := w - (w % 256)
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
add := gins(s390x.AADD, nil, &end)
add.From.Type = obj.TYPE_CONST
add.From.Offset = cnt
add.Reg = src.Reg
mvc := gins(s390x.AMVC, &src, &dst)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Offset = 0
mvc.To.Type = obj.TYPE_MEM
mvc.To.Offset = 0
mvc.From3 = new(obj.Addr)
mvc.From3.Type = obj.TYPE_CONST
mvc.From3.Offset = 256
ginscon(s390x.AADD, 256, &src)
ginscon(s390x.AADD, 256, &dst)
gins(s390x.ACMP, &src, &end)
gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc)
gc.Regfree(&end)
w -= cnt
}
for w > 0 {
//.........這裏部分代碼省略.........
示例10: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !n.Addable {
gc.Agen(n, &tsrc)
}
if !res.Addable {
gc.Agen(res, &tdst)
}
if n.Addable {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........這裏部分代碼省略.........
示例11: blockcopy
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
var op int
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = mips.AMOVB
case 2:
op = mips.AMOVH
case 4:
op = mips.AMOVW
case 8:
op = mips.AMOVV
}
if w%int64(align) != 0 {
gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
if osrc < odst && odst < osrc+w {
dir = -dir
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(mips.AMOVV, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(mips.AMOVV, &src, &nend)
}
p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(mips.AMOVV, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
ploop := p
//.........這裏部分代碼省略.........