本文整理汇总了C++中MachineBasicBlock::findDebugLoc方法的典型用法代码示例。如果您正苦于以下问题:C++ MachineBasicBlock::findDebugLoc方法的具体用法?C++ MachineBasicBlock::findDebugLoc怎么用?C++ MachineBasicBlock::findDebugLoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MachineBasicBlock
的用法示例。
在下文中一共展示了MachineBasicBlock::findDebugLoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LowerSI_INTERP
void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
{
unsigned tmp = MRI.createVirtualRegister(&AMDIL::VReg_32RegClass);
MachineOperand dst = MI->getOperand(0);
MachineOperand iReg = MI->getOperand(1);
MachineOperand jReg = MI->getOperand(2);
MachineOperand attr_chan = MI->getOperand(3);
MachineOperand attr = MI->getOperand(4);
MachineOperand params = MI->getOperand(5);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
.addReg(AMDIL::M0)
.addOperand(params);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P1_F32), tmp)
.addOperand(iReg)
.addOperand(attr_chan)
.addOperand(attr);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P2_F32))
.addOperand(dst)
.addReg(tmp)
.addOperand(jReg)
.addOperand(attr_chan)
.addOperand(attr);
MI->eraseFromParent();
}
示例2: storeRegToStackSlot
void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill,
int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
DebugLoc DL = MBB.findDebugLoc(MI);
unsigned KillFlag = isKill ? RegState::Kill : 0;
if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
unsigned Lane = MFI->SpillTracker.getNextLane(MRI);
BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
MFI->SpillTracker.LaneVGPR)
.addReg(SrcReg, KillFlag)
.addImm(Lane);
MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
Lane);
} else {
for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(MBB, MI, MBB.findDebugLoc(MI), get(AMDGPU::COPY), SubReg)
.addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
storeRegToStackSlot(MBB, MI, SubReg, isKill, FrameIndex + i,
&AMDGPU::SReg_32RegClass, TRI);
}
}
}
示例3: LowerSI_V_CNDLT
void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
{
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CMP_LT_F32_e32))
.addOperand(MI->getOperand(1))
.addReg(AMDIL::SREG_LIT_0);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CNDMASK_B32))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(2))
.addOperand(MI->getOperand(3));
MI->eraseFromParent();
}
示例4: shouldUseShadow
bool Z80FrameLowering::restoreCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
bool UseShadow = shouldUseShadow(MF);
DebugLoc DL = MBB.findDebugLoc(MI);
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
// Non-index registers can be spilled to shadow registers.
if (UseShadow && !Z80::I24RegClass.contains(Reg) &&
!Z80::I16RegClass.contains(Reg))
continue;
MachineInstrBuilder MIB;
if (Reg == Z80::AF)
MIB = BuildMI(MBB, MI, DL, TII.get(Is24Bit ? Z80::POP24AF
: Z80::POP16AF));
else
MIB = BuildMI(MBB, MI, DL, TII.get(Is24Bit ? Z80::POP24r : Z80::POP16r),
Reg);
MIB.setMIFlag(MachineInstr::FrameDestroy);
}
if (UseShadow)
shadowCalleeSavedRegisters(MBB, MI, DL, MachineInstr::FrameDestroy, CSI);
return true;
}
示例5: BuildMI
void R600TargetLowering::lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const
{
MachineBasicBlock::iterator I = *MI;
unsigned PtrReg = MRI.createVirtualRegister(&AMDGPU::R600_TReg32_XRegClass);
MRI.setRegClass(MI->getOperand(0).getReg(), &AMDGPU::R600_TReg32_XRegClass);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::MOV), PtrReg)
.addReg(AMDGPU::ALU_LITERAL_X)
.addImm(dword_offset * 4);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::VTX_READ_PARAM_i32_eg))
.addOperand(MI->getOperand(0))
.addReg(PtrReg)
.addImm(0);
}
示例6: BuildMI
bool X86FrameInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
DebugLoc DL = MBB.findDebugLoc(MI);
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
unsigned FPReg = TRI->getFrameRegister(MF);
bool isWin64 = STI.isTargetWin64();
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
if (Reg == FPReg)
// X86RegisterInfo::emitEpilogue will handle restoring of frame register.
continue;
if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
} else {
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
RC, TRI);
}
}
return true;
}
示例7: ClauseFile
ClauseFile
MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
const {
MachineBasicBlock::iterator ClauseHead = I;
std::vector<MachineInstr *> ClauseContent;
unsigned AluInstCount = 0;
bool IsTex = TII->usesTextureCache(ClauseHead);
std::set<unsigned> DstRegs, SrcRegs;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
if (IsTrivialInst(I))
continue;
if (AluInstCount >= MaxFetchInst)
break;
if ((IsTex && !TII->usesTextureCache(I)) ||
(!IsTex && !TII->usesVertexCache(I)))
break;
if (!isCompatibleWithClause(I, DstRegs, SrcRegs))
break;
AluInstCount ++;
ClauseContent.push_back(I);
}
MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
getHWInstrDesc(IsTex?CF_TC:CF_VC))
.addImm(0) // ADDR
.addImm(AluInstCount - 1); // COUNT
return ClauseFile(MIb, ClauseContent);
}
示例8: if
void
AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill,
int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBB.findDebugLoc(MBBI);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FrameIdx);
MachineMemOperand *MMO
= MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
Align);
unsigned StoreOp = 0;
if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
switch(RC->getSize()) {
case 4: StoreOp = AArch64::LS32_STR; break;
case 8: StoreOp = AArch64::LS64_STR; break;
default:
llvm_unreachable("Unknown size for regclass");
}
} else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
RC->hasType(MVT::f128)) {
switch (RC->getSize()) {
case 4: StoreOp = AArch64::LSFP32_STR; break;
case 8: StoreOp = AArch64::LSFP64_STR; break;
case 16: StoreOp = AArch64::LSFP128_STR; break;
default:
llvm_unreachable("Unknown size for regclass");
}
} else { // The spill of D tuples is implemented by Q tuples
if (RC == &AArch64::QPairRegClass)
StoreOp = AArch64::ST1x2_16B;
else if (RC == &AArch64::QTripleRegClass)
StoreOp = AArch64::ST1x3_16B;
else if (RC == &AArch64::QQuadRegClass)
StoreOp = AArch64::ST1x4_16B;
else
llvm_unreachable("Unknown reg class");
MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
// Vector store has different operands from other store instructions.
NewMI.addFrameIndex(FrameIdx)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
return;
}
MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
NewMI.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FrameIdx)
.addImm(0)
.addMemOperand(MMO);
}
示例9: loadRegFromStackSlot
void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc dl = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FrameIndex);
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIndex),
MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), Align);
assert(MMO && "Couldn't get MachineMemOperand for store to stack.");
assert(TRI->getSpillSize(*RC) == 4 &&
"Only support 4-byte loads from stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
DEBUG(dbgs() << "Created load reg=" << PrintReg(DestReg, TRI)
<< " from FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, dl, get(ARC::LD_rs9))
.addReg(DestReg, RegState::Define)
.addFrameIndex(FrameIndex)
.addImm(0)
.addMemOperand(MMO);
}
示例10: getOffsetOfLocalArea
void AArch64FrameLowering::emitCalleeSavedFrameMoves(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
DebugLoc DL = MBB.findDebugLoc(MBBI);
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty())
return;
for (const auto &Info : CSI) {
unsigned Reg = Info.getReg();
int64_t Offset =
MFI->getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
}
}
示例11: loadRegFromStackSlot
void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
DebugLoc DL = MBB.findDebugLoc(MI);
if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
SIMachineFunctionInfo::SpilledReg Spill =
MFI->SpillTracker.getSpilledReg(FrameIndex);
assert(Spill.VGPR);
BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
.addReg(Spill.VGPR)
.addImm(Spill.Lane);
} else {
for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
unsigned Flags = RegState::Define;
if (i == 0) {
Flags |= RegState::Undef;
}
unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
loadRegFromStackSlot(MBB, MI, SubReg, FrameIndex + i,
&AMDGPU::SReg_32RegClass, TRI);
BuildMI(MBB, MI, DL, get(AMDGPU::COPY))
.addReg(DestReg, Flags, RI.getSubRegFromChannel(i))
.addReg(SubReg);
}
}
}
示例12: LowerSI_INTERP_CONST
void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
MachineBasicBlock &BB, MachineBasicBlock::iterator I) const
{
MachineOperand dst = MI->getOperand(0);
MachineOperand attr_chan = MI->getOperand(1);
MachineOperand attr = MI->getOperand(2);
MachineOperand params = MI->getOperand(3);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
.addReg(AMDIL::M0)
.addOperand(params);
BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_MOV_F32))
.addOperand(dst)
.addOperand(attr_chan)
.addOperand(attr);
MI->eraseFromParent();
}
示例13: BuildMI
MachineBasicBlock::iterator
ARCInstrInfo::loadImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, unsigned Reg,
uint64_t Value) const {
DebugLoc dl = MBB.findDebugLoc(MI);
if (isInt<12>(Value)) {
return BuildMI(MBB, MI, dl, get(ARC::MOV_rs12), Reg)
.addImm(Value)
.getInstr();
}
llvm_unreachable("Need Arc long immediate instructions.");
}
示例14: hasFP
void AArch64FrameLowering::emitCalleeSavedFrameMoves(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned FramePtr) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
DebugLoc DL = MBB.findDebugLoc(MBBI);
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty())
return;
const DataLayout &TD = MF.getDataLayout();
bool HasFP = hasFP(MF);
// Calculate amount of bytes used for return address storing.
int stackGrowth = -TD.getPointerSize(0);
// Calculate offsets.
int64_t saveAreaOffset = (HasFP ? 2 : 1) * stackGrowth;
unsigned TotalSkipped = 0;
for (const auto &Info : CSI) {
unsigned Reg = Info.getReg();
int64_t Offset = MFI->getObjectOffset(Info.getFrameIdx()) -
getOffsetOfLocalArea() + saveAreaOffset;
// Don't output a new CFI directive if we're re-saving the frame pointer or
// link register. This happens when the PrologEpilogInserter has inserted an
// extra "STP" of the frame pointer and link register -- the "emitPrologue"
// method automatically generates the directives when frame pointers are
// used. If we generate CFI directives for the extra "STP"s, the linker will
// lose track of the correct values for the frame pointer and link register.
if (HasFP && (FramePtr == Reg || Reg == AArch64::LR)) {
TotalSkipped += stackGrowth;
continue;
}
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
nullptr, DwarfReg, Offset - TotalSkipped));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
}
}
示例15: switch
void
EpiphanyInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill,
int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBB.findDebugLoc(MBBI);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FrameIdx);
MachineMemOperand *MMO
= MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
Align);
unsigned StoreOp = 0;
if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
switch(RC->getSize()) {
case 4:
StoreOp = Epiphany::LS32_STR;
break;
//case 8: StoreOp = Epiphany::LS64_STR; break;
default:
llvm_unreachable("Unknown size for regclass");
}
} else {
assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64))
&& "Expected integer or floating type for store");
switch (RC->getSize()) {
case 4:
StoreOp = Epiphany::LSFP32_STR;
break;
//case 8: StoreOp = Epiphany::LSFP64_STR; break;
default:
llvm_unreachable("Unknown size for regclass");
}
}
MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
NewMI.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FrameIdx)
.addImm(0)
.addMemOperand(MMO);
}