本文整理汇总了C++中MachineBasicBlock::getParent方法的典型用法代码示例。如果您正苦于以下问题:C++ MachineBasicBlock::getParent方法的具体用法?C++ MachineBasicBlock::getParent怎么用?C++ MachineBasicBlock::getParent使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MachineBasicBlock
的用法示例。
在下文中一共展示了MachineBasicBlock::getParent方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findReachingDefs
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &KillMBB,
SlotIndex Kill, unsigned PhysReg) {
unsigned KillMBBNum = KillMBB.getNumber();
// Block numbers where LR should be live-in.
SmallVector<unsigned, 16> WorkList(1, KillMBBNum);
// Remember if we have seen more than one value.
bool UniqueVNI = true;
VNInfo *TheVNI = 0;
// Using Seen as a visited set, perform a BFS for all reaching defs.
for (unsigned i = 0; i != WorkList.size(); ++i) {
MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);
#ifndef NDEBUG
if (MBB->pred_empty()) {
MBB->getParent()->verify();
llvm_unreachable("Use not jointly dominated by defs.");
}
if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
!MBB->isLiveIn(PhysReg)) {
MBB->getParent()->verify();
errs() << "The register needs to be live in to BB#" << MBB->getNumber()
<< ", but is missing from the live-in list.\n";
llvm_unreachable("Invalid global physical register");
}
#endif
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
MachineBasicBlock *Pred = *PI;
// Is this a known live-out block?
if (Seen.test(Pred->getNumber())) {
if (VNInfo *VNI = LiveOut[Pred].first) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
}
continue;
}
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(Pred);
// First time we see Pred. Try to determine the live-out value, but set
// it as null if Pred is live-through with an unknown value.
VNInfo *VNI = LR.extendInBlock(Start, End);
setLiveOutValue(Pred, VNI);
if (VNI) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
continue;
}
// No, we need a live-in value for Pred as well
if (Pred != &KillMBB)
WorkList.push_back(Pred->getNumber());
else
// Loopback to KillMBB, so value is really live through.
Kill = SlotIndex();
}
}
LiveIn.clear();
// Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
// neither require it. Skip the sorting overhead for small updates.
if (WorkList.size() > 4)
array_pod_sort(WorkList.begin(), WorkList.end());
// If a unique reaching def was found, blit in the live ranges immediately.
if (UniqueVNI) {
LiveRangeUpdater Updater(&LR);
for (SmallVectorImpl<unsigned>::const_iterator I = WorkList.begin(),
E = WorkList.end(); I != E; ++I) {
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(*I);
// Trim the live range in KillMBB.
if (*I == KillMBBNum && Kill.isValid())
End = Kill;
else
LiveOut[MF->getBlockNumbered(*I)] =
LiveOutPair(TheVNI, (MachineDomTreeNode *)0);
Updater.add(Start, End, TheVNI);
}
return true;
}
// Multiple values were found, so transfer the work list to the LiveIn array
// where UpdateSSA will use it as a work list.
LiveIn.reserve(WorkList.size());
for (SmallVectorImpl<unsigned>::const_iterator
I = WorkList.begin(), E = WorkList.end(); I != E; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(*I);
addLiveInBlock(LR, DomTree->getNode(MBB));
if (MBB == &KillMBB)
//.........这里部分代码省略.........
示例2: assert
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops, int FI,
LiveIntervals *LIS) const {
auto Flags = MachineMemOperand::MONone;
for (unsigned OpIdx : Ops)
Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
: MachineMemOperand::MOLoad;
MachineBasicBlock *MBB = MI.getParent();
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
// If we're not folding a load into a subreg, the size of the load is the
// size of the spill slot. But if we are, we need to figure out what the
// actual load size is.
int64_t MemSize = 0;
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (Flags & MachineMemOperand::MOStore) {
MemSize = MFI.getObjectSize(FI);
} else {
for (unsigned OpIdx : Ops) {
int64_t OpSize = MFI.getObjectSize(FI);
if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
if (SubRegSize > 0 && !(SubRegSize % 8))
OpSize = SubRegSize / 8;
}
MemSize = std::max(MemSize, OpSize);
}
}
assert(MemSize && "Did not expect a zero-sized stack slot");
MachineInstr *NewMI = nullptr;
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT ||
MI.getOpcode() == TargetOpcode::STATEPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
}
if (NewMI) {
NewMI->setMemRefs(MF, MI.memoperands());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
"Folded a def to a non-store!");
assert((!(Flags & MachineMemOperand::MOLoad) ||
NewMI->mayLoad()) &&
"Folded a use to a non-load!");
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
return NewMI;
}
// Straight COPY may fold as load/store.
if (!MI.isCopy() || Ops.size() != 1)
return nullptr;
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
if (!RC)
return nullptr;
const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
MachineBasicBlock::iterator Pos = MI;
if (Flags == MachineMemOperand::MOStore)
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
else
loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
return &*--Pos;
}
示例3: expandAtomicCmpSwap
bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI) {
const unsigned Size =
I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I32_POSTRA ? 4 : 8;
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ, MOVE;
if (Size == 4) {
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
BNE = Mips::BNE;
BEQ = Mips::BEQ;
}
ZERO = Mips::ZERO;
MOVE = Mips::OR;
} else {
LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
ZERO = Mips::ZERO_64;
BNE = Mips::BNE64;
BEQ = Mips::BEQ64;
MOVE = Mips::OR64;
}
unsigned Dest = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned OldVal = I->getOperand(2).getReg();
unsigned NewVal = I->getOperand(3).getReg();
unsigned Scratch = I->getOperand(4).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loop1MBB);
MF->insert(It, loop2MBB);
MF->insert(It, exitMBB);
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), &BB,
std::next(MachineBasicBlock::iterator(I)), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
// thisMBB:
// ...
// fallthrough --> loop1MBB
BB.addSuccessor(loop1MBB, BranchProbability::getOne());
loop1MBB->addSuccessor(exitMBB);
loop1MBB->addSuccessor(loop2MBB);
loop1MBB->normalizeSuccProbs();
loop2MBB->addSuccessor(loop1MBB);
loop2MBB->addSuccessor(exitMBB);
loop2MBB->normalizeSuccProbs();
// loop1MBB:
// ll dest, 0(ptr)
// bne dest, oldval, exitMBB
BuildMI(loop1MBB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
BuildMI(loop1MBB, DL, TII->get(BNE))
.addReg(Dest, RegState::Kill).addReg(OldVal).addMBB(exitMBB);
// loop2MBB:
// move scratch, NewVal
// sc Scratch, Scratch, 0(ptr)
// beq Scratch, $0, loop1MBB
BuildMI(loop2MBB, DL, TII->get(MOVE), Scratch).addReg(NewVal).addReg(ZERO);
BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
.addReg(Scratch).addReg(Ptr).addImm(0);
BuildMI(loop2MBB, DL, TII->get(BEQ))
.addReg(Scratch, RegState::Kill).addReg(ZERO).addMBB(loop1MBB);
LivePhysRegs LiveRegs;
computeAndAddLiveIns(LiveRegs, *loop1MBB);
computeAndAddLiveIns(LiveRegs, *loop2MBB);
computeAndAddLiveIns(LiveRegs, *exitMBB);
NMBBI = BB.end();
I->eraseFromParent();
return true;
}
示例4: expandAtomicBinOp
bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI,
unsigned Size) {
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC, ZERO, BEQ;
if (Size == 4) {
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
BEQ = Mips::BEQ;
}
ZERO = Mips::ZERO;
} else {
LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
ZERO = Mips::ZERO_64;
BEQ = Mips::BEQ64;
}
unsigned OldVal = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned Incr = I->getOperand(2).getReg();
unsigned Scratch = I->getOperand(3).getReg();
unsigned Opcode = 0;
unsigned OR = 0;
unsigned AND = 0;
unsigned NOR = 0;
bool IsNand = false;
switch (I->getOpcode()) {
case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
Opcode = Mips::ADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
Opcode = Mips::SUBu;
break;
case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
Opcode = Mips::AND;
break;
case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
Opcode = Mips::OR;
break;
case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
Opcode = Mips::XOR;
break;
case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
IsNand = true;
AND = Mips::AND;
NOR = Mips::NOR;
break;
case Mips::ATOMIC_SWAP_I32_POSTRA:
OR = Mips::OR;
break;
case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
Opcode = Mips::DADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
Opcode = Mips::DSUBu;
break;
case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
Opcode = Mips::AND64;
break;
case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
Opcode = Mips::OR64;
break;
case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
Opcode = Mips::XOR64;
break;
case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
IsNand = true;
AND = Mips::AND64;
NOR = Mips::NOR64;
break;
case Mips::ATOMIC_SWAP_I64_POSTRA:
OR = Mips::OR64;
break;
default:
llvm_unreachable("Unknown pseudo atomic!");
}
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loopMBB);
//.........这里部分代码省略.........
示例5: computeCalleeSaveRegisterPairs
bool AArch64FrameLowering::restoreCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
DebugLoc DL;
SmallVector<RegPairInfo, 8> RegPairs;
if (MI != MBB.end())
DL = MI->getDebugLoc();
computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);
for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE;
++RPII) {
RegPairInfo RPI = *RPII;
unsigned Reg1 = RPI.Reg1;
unsigned Reg2 = RPI.Reg2;
// Issue sequence of non-sp increment and sp-pi restores for cs regs. Only
// the last load is sp-pi post-increment and de-allocates the stack:
// For example:
// ldp fp, lr, [sp, #32] // addImm(+4)
// ldp x20, x19, [sp, #16] // addImm(+2)
// ldp x22, x21, [sp], #48 // addImm(+6)
// Note: see comment in spillCalleeSavedRegisters()
unsigned LdrOpc;
bool BumpSP = RPII == std::prev(RegPairs.end());
if (RPI.IsGPR) {
if (BumpSP)
LdrOpc = RPI.isPaired() ? AArch64::LDPXpost : AArch64::LDRXpost;
else
LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
} else {
if (BumpSP)
LdrOpc = RPI.isPaired() ? AArch64::LDPDpost : AArch64::LDRDpost;
else
LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
}
DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1);
if (RPI.isPaired())
dbgs() << ", " << TRI->getName(Reg2);
dbgs() << ") -> fi#(" << RPI.FrameIdx;
if (RPI.isPaired())
dbgs() << ", " << RPI.FrameIdx+1;
dbgs() << ")\n");
const int Offset = RPI.Offset;
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
if (BumpSP)
MIB.addReg(AArch64::SP, RegState::Define);
if (RPI.isPaired())
MIB.addReg(Reg2, getDefRegState(true))
.addReg(Reg1, getDefRegState(true))
.addReg(AArch64::SP)
.addImm(Offset) // [sp], #offset * 8 or [sp, #offset * 8]
// where the factor * 8 is implicit
.setMIFlag(MachineInstr::FrameDestroy);
else
MIB.addReg(Reg1, getDefRegState(true))
.addReg(AArch64::SP)
.addImm(BumpSP ? Offset * 8 : Offset) // post-dec version is unscaled
.setMIFlag(MachineInstr::FrameDestroy);
}
return true;
}
示例6: finalizeBundle
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI,
MachineBasicBlock::instr_iterator LastMI) {
assert(FirstMI != LastMI && "Empty bundle?");
MIBundleBuilder Bundle(MBB, FirstMI, LastMI);
const TargetMachine &TM = MBB.getParent()->getTarget();
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
MachineInstrBuilder MIB = BuildMI(*MBB.getParent(), FirstMI->getDebugLoc(),
TII->get(TargetOpcode::BUNDLE));
Bundle.prepend(MIB);
SmallVector<unsigned, 32> LocalDefs;
SmallSet<unsigned, 32> LocalDefSet;
SmallSet<unsigned, 8> DeadDefSet;
SmallSet<unsigned, 16> KilledDefSet;
SmallVector<unsigned, 8> ExternUses;
SmallSet<unsigned, 8> ExternUseSet;
SmallSet<unsigned, 8> KilledUseSet;
SmallSet<unsigned, 8> UndefUseSet;
SmallVector<MachineOperand*, 4> Defs;
for (; FirstMI != LastMI; ++FirstMI) {
for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = FirstMI->getOperand(i);
if (!MO.isReg())
continue;
if (MO.isDef()) {
Defs.push_back(&MO);
continue;
}
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
if (LocalDefSet.count(Reg)) {
MO.setIsInternalRead();
if (MO.isKill())
// Internal def is now killed.
KilledDefSet.insert(Reg);
} else {
if (ExternUseSet.insert(Reg)) {
ExternUses.push_back(Reg);
if (MO.isUndef())
UndefUseSet.insert(Reg);
}
if (MO.isKill())
// External def is now killed.
KilledUseSet.insert(Reg);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
MachineOperand &MO = *Defs[i];
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (LocalDefSet.insert(Reg)) {
LocalDefs.push_back(Reg);
if (MO.isDead()) {
DeadDefSet.insert(Reg);
}
} else {
// Re-defined inside the bundle, it's no longer killed.
KilledDefSet.erase(Reg);
if (!MO.isDead())
// Previously defined but dead.
DeadDefSet.erase(Reg);
}
if (!MO.isDead()) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg))
LocalDefs.push_back(SubReg);
}
}
}
Defs.clear();
}
SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
if (Added.insert(Reg)) {
// If it's not live beyond end of the bundle, mark it dead.
bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
getImplRegState(true));
}
//.........这里部分代码省略.........
示例7: emitPushInst
void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
unsigned StmOpc, unsigned StrOpc,
bool NoGap,
bool(*Func)(unsigned, bool),
unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
SmallVector<std::pair<unsigned,bool>, 4> Regs;
unsigned i = CSI.size();
while (i != 0) {
unsigned LastReg = 0;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
if (!(Func)(Reg, STI.isTargetDarwin())) continue;
// Add the callee-saved register as live-in unless it's LR and
// @llvm.returnaddress is called. If LR is returned for
// @llvm.returnaddress then it's already added to the function and
// entry block live-in sets.
bool isKill = true;
if (Reg == ARM::LR) {
if (MF.getFrameInfo()->isReturnAddressTaken() &&
MF.getRegInfo().isLiveIn(Reg))
isKill = false;
}
if (isKill)
MBB.addLiveIn(Reg);
// If NoGap is true, push consecutive registers and then leave the rest
// for other instructions. e.g.
// vpush {d8, d10, d11} -> vpush {d8}, vpush {d10, d11}
if (NoGap && LastReg && LastReg != Reg-1)
break;
LastReg = Reg;
Regs.push_back(std::make_pair(Reg, isKill));
}
if (Regs.empty())
continue;
if (Regs.size() > 1 || StrOpc== 0) {
MachineInstrBuilder MIB =
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP)
.addReg(ARM::SP).setMIFlags(MIFlags));
for (unsigned i = 0, e = Regs.size(); i < e; ++i)
MIB.addReg(Regs[i].first, getKillRegState(Regs[i].second));
} else if (Regs.size() == 1) {
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc),
ARM::SP)
.addReg(Regs[0].first, getKillRegState(Regs[0].second))
.addReg(ARM::SP).setMIFlags(MIFlags);
// ARM mode needs an extra reg0 here due to addrmode2. Will go away once
// that refactoring is complete (eventually).
if (StrOpc == ARM::STR_PRE) {
MIB.addReg(0);
MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::sub, 4, ARM_AM::no_shift));
} else
MIB.addImm(-4);
AddDefaultPred(MIB);
}
Regs.clear();
}
}
示例8: if
void MipsSEInstrInfo::
loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI, int64_t Offset) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
const Function *Func = MBB.getParent()->getFunction();
bool ReqIndirectLoad = Func->hasFnAttribute("interrupt") &&
(DestReg == Mips::LO0 || DestReg == Mips::LO0_64 ||
DestReg == Mips::HI0 || DestReg == Mips::HI0_64);
if (Mips::GPR32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
else if (Mips::GPR64RegClass.hasSubClassEq(RC))
Opc = Mips::LD;
else if (Mips::ACC64RegClass.hasSubClassEq(RC))
Opc = Mips::LOAD_ACC64;
else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
Opc = Mips::LOAD_ACC64DSP;
else if (Mips::ACC128RegClass.hasSubClassEq(RC))
Opc = Mips::LOAD_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
Opc = Mips::LOAD_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
Opc = Mips::LWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC164;
else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
Opc = Mips::LD_B;
else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
TRI->isTypeLegalForClass(*RC, MVT::v8f16))
Opc = Mips::LD_H;
else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
TRI->isTypeLegalForClass(*RC, MVT::v4f32))
Opc = Mips::LD_W;
else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
TRI->isTypeLegalForClass(*RC, MVT::v2f64))
Opc = Mips::LD_D;
else if (Mips::HI32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
else if (Mips::HI64RegClass.hasSubClassEq(RC))
Opc = Mips::LD;
else if (Mips::LO32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
else if (Mips::LO64RegClass.hasSubClassEq(RC))
Opc = Mips::LD;
assert(Opc && "Register class not handled!");
if (!ReqIndirectLoad)
BuildMI(MBB, I, DL, get(Opc), DestReg)
.addFrameIndex(FI)
.addImm(Offset)
.addMemOperand(MMO);
else {
// Load HI/LO through K0. Notably the DestReg is encoded into the
// instruction itself.
unsigned Reg = Mips::K0;
unsigned LdOp = Mips::MTLO;
if (DestReg == Mips::HI0)
LdOp = Mips::MTHI;
if (Subtarget.getABI().ArePtrs64bit()) {
Reg = Mips::K0_64;
if (DestReg == Mips::HI0_64)
LdOp = Mips::MTHI64;
else
LdOp = Mips::MTLO64;
}
BuildMI(MBB, I, DL, get(Opc), Reg)
.addFrameIndex(FI)
.addImm(Offset)
.addMemOperand(MMO);
BuildMI(MBB, I, DL, get(LdOp)).addReg(Reg);
}
}
示例9: ReplaceDominatedUses
// Replace uses of FromReg with ToReg if they are dominated by MI.
static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
unsigned FromReg, unsigned ToReg,
const MachineRegisterInfo &MRI,
MachineDominatorTree &MDT,
LiveIntervals &LIS) {
bool Changed = false;
LiveInterval *FromLI = &LIS.getInterval(FromReg);
LiveInterval *ToLI = &LIS.getInterval(ToReg);
SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
SmallVector<SlotIndex, 4> Indices;
for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end();
I != E;) {
MachineOperand &O = *I++;
MachineInstr *Where = O.getParent();
// Check that MI dominates the instruction in the normal way.
if (&MI == Where || !MDT.dominates(&MI, Where))
continue;
// If this use gets a different value, skip it.
SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
if (WhereVNI && WhereVNI != FromVNI)
continue;
// Make sure ToReg isn't clobbered before it gets there.
VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
if (ToVNI && ToVNI != FromVNI)
continue;
Changed = true;
LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
<< MI << "\n");
O.setReg(ToReg);
// If the store's def was previously dead, it is no longer.
if (!O.isUndef()) {
MI.getOperand(0).setIsDead(false);
Indices.push_back(WhereIdx.getRegSlot());
}
}
if (Changed) {
// Extend ToReg's liveness.
LIS.extendToIndices(*ToLI, Indices);
// Shrink FromReg's liveness.
LIS.shrinkToUses(FromLI);
// If we replaced all dominated uses, FromReg is now killed at MI.
if (!FromLI->liveAt(FromIdx.getDeadSlot()))
MI.addRegisterKilled(FromReg, MBB.getParent()
->getSubtarget<WebAssemblySubtarget>()
.getRegisterInfo());
}
return Changed;
}
示例10: print
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
bool HasAttributes = false;
if (const auto *BB = MBB.getBasicBlock()) {
if (BB->hasName()) {
OS << "." << BB->getName();
} else {
HasAttributes = true;
OS << " (";
int Slot = MST.getLocalSlot(BB);
if (Slot == -1)
OS << "<ir-block badref>";
else
OS << (Twine("%ir-block.") + Twine(Slot)).str();
}
}
if (MBB.hasAddressTaken()) {
OS << (HasAttributes ? ", " : " (");
OS << "address-taken";
HasAttributes = true;
}
if (MBB.isEHPad()) {
OS << (HasAttributes ? ", " : " (");
OS << "landing-pad";
HasAttributes = true;
}
if (MBB.getAlignment()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << MBB.getAlignment();
HasAttributes = true;
}
if (HasAttributes)
OS << ")";
OS << ":\n";
bool HasLineAttributes = false;
// Print the successors
if (!MBB.succ_empty()) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
if (MBB.hasSuccessorProbabilities())
OS << '(' << MBB.getSuccProbability(I) << ')';
}
OS << "\n";
HasLineAttributes = true;
}
// Print the live in registers.
const auto *TRI = MBB.getParent()->getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
if (!MBB.livein_empty()) {
OS.indent(2) << "liveins: ";
bool First = true;
for (const auto &LI : MBB.liveins()) {
if (!First)
OS << ", ";
First = false;
printReg(LI.PhysReg, OS, TRI);
if (LI.LaneMask != ~0u)
OS << ':' << PrintLaneMask(LI.LaneMask);
}
OS << "\n";
HasLineAttributes = true;
}
if (HasLineAttributes)
OS << "\n";
bool IsInBundle = false;
for (auto I = MBB.instr_begin(), E = MBB.instr_end(); I != E; ++I) {
const MachineInstr &MI = *I;
if (IsInBundle && !MI.isInsideBundle()) {
OS.indent(2) << "}\n";
IsInBundle = false;
}
OS.indent(IsInBundle ? 4 : 2);
print(MI);
if (!IsInBundle && MI.getFlag(MachineInstr::BundledSucc)) {
OS << " {";
IsInBundle = true;
}
OS << "\n";
}
if (IsInBundle)
OS.indent(2) << "}\n";
}
示例11: print
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
bool HasAttributes = false;
if (const auto *BB = MBB.getBasicBlock()) {
if (BB->hasName()) {
OS << "." << BB->getName();
} else {
HasAttributes = true;
OS << " (";
int Slot = MST.getLocalSlot(BB);
if (Slot == -1)
OS << "<ir-block badref>";
else
OS << (Twine("%ir-block.") + Twine(Slot)).str();
}
}
if (MBB.hasAddressTaken()) {
OS << (HasAttributes ? ", " : " (");
OS << "address-taken";
HasAttributes = true;
}
if (MBB.isEHPad()) {
OS << (HasAttributes ? ", " : " (");
OS << "landing-pad";
HasAttributes = true;
}
if (MBB.getAlignment()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << MBB.getAlignment();
HasAttributes = true;
}
if (HasAttributes)
OS << ")";
OS << ":\n";
bool HasLineAttributes = false;
// Print the successors
bool canPredictProbs = canPredictBranchProbabilities(MBB);
if (!MBB.succ_empty() && (!SimplifyMIR || !canPredictProbs ||
!canPredictSuccessors(MBB))) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
if (!SimplifyMIR || !canPredictProbs)
OS << '('
<< format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator())
<< ')';
}
OS << "\n";
HasLineAttributes = true;
}
// Print the live in registers.
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
if (MRI.tracksLiveness() && !MBB.livein_empty()) {
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
OS.indent(2) << "liveins: ";
bool First = true;
for (const auto &LI : MBB.liveins()) {
if (!First)
OS << ", ";
First = false;
printReg(LI.PhysReg, OS, &TRI);
if (!LI.LaneMask.all())
OS << ":0x" << PrintLaneMask(LI.LaneMask);
}
OS << "\n";
HasLineAttributes = true;
}
if (HasLineAttributes)
OS << "\n";
bool IsInBundle = false;
for (auto I = MBB.instr_begin(), E = MBB.instr_end(); I != E; ++I) {
const MachineInstr &MI = *I;
if (IsInBundle && !MI.isInsideBundle()) {
OS.indent(2) << "}\n";
IsInBundle = false;
}
OS.indent(IsInBundle ? 4 : 2);
print(MI);
if (!IsInBundle && MI.getFlag(MachineInstr::BundledSucc)) {
OS << " {";
IsInBundle = true;
}
OS << "\n";
}
if (IsInBundle)
OS.indent(2) << "}\n";
}
示例12: switch
/// If \p MBBI is a pseudo instruction, this method expands
/// it to the corresponding (sequence of) actual instruction(s).
/// \returns true if \p MBBI has been expanded.
bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
switch (Opcode) {
default:
return false;
case X86::TCRETURNdi:
case X86::TCRETURNdicc:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNdi64cc:
case X86::TCRETURNri64:
case X86::TCRETURNmi64: {
bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
MachineOperand &JumpTarget = MBBI->getOperand(0);
MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();
int MaxTCDelta = X86FI->getTCReturnAddrDelta();
int Offset = 0;
assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
// Incoporate the retaddr area.
Offset = StackAdj - MaxTCDelta;
assert(Offset >= 0 && "Offset should never be negative");
if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {
assert(Offset == 0 && "Conditional tail call cannot adjust the stack.");
}
if (Offset) {
// Check for possible merge with preceding ADD instruction.
Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
X86FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
}
// Jump to label or value in register.
bool IsWin64 = STI->isTargetWin64();
if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||
Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {
unsigned Op;
switch (Opcode) {
case X86::TCRETURNdi:
Op = X86::TAILJMPd;
break;
case X86::TCRETURNdicc:
Op = X86::TAILJMPd_CC;
break;
case X86::TCRETURNdi64cc:
assert(!IsWin64 && "Conditional tail calls confuse the Win64 unwinder.");
// TODO: We could do it for Win64 "leaf" functions though; PR30337.
Op = X86::TAILJMPd64_CC;
break;
default:
// Note: Win64 uses REX prefixes indirect jumps out of functions, but
// not direct ones.
Op = X86::TAILJMPd64;
break;
}
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
if (JumpTarget.isGlobal()) {
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
} else {
assert(JumpTarget.isSymbol());
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
JumpTarget.getTargetFlags());
}
if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) {
MIB.addImm(MBBI->getOperand(2).getImm());
}
} else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
unsigned Op = (Opcode == X86::TCRETURNmi)
? X86::TAILJMPm
: (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
for (unsigned i = 0; i != 5; ++i)
MIB.addOperand(MBBI->getOperand(i));
} else if (Opcode == X86::TCRETURNri64) {
BuildMI(MBB, MBBI, DL,
TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
.addReg(JumpTarget.getReg(), RegState::Kill);
} else {
BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
.addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr &NewMI = *std::prev(MBBI);
NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
// Delete the pseudo instruction TCRETURN.
//.........这里部分代码省略.........
示例13: runOnMachineBasicBlock
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
/// We assume there is only one delay slot per delayed instruction.
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>();
bool InMicroMipsMode = STI.inMicroMipsMode();
const MipsInstrInfo *TII = STI.getInstrInfo();
for (Iter I = MBB.begin(); I != MBB.end(); ++I) {
if (!hasUnoccupiedSlot(&*I))
continue;
++FilledSlots;
Changed = true;
// Delay slot filling is disabled at -O0.
if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) {
bool Filled = false;
if (searchBackward(MBB, I)) {
Filled = true;
} else if (I->isTerminator()) {
if (searchSuccBBs(MBB, I)) {
Filled = true;
}
} else if (searchForward(MBB, I)) {
Filled = true;
}
if (Filled) {
// Get instruction with delay slot.
MachineBasicBlock::instr_iterator DSI(I);
if (InMicroMipsMode && TII->GetInstSizeInBytes(std::next(DSI)) == 2 &&
DSI->isCall()) {
// If instruction in delay slot is 16b change opcode to
// corresponding instruction with short delay slot.
DSI->setDesc(TII->get(getEquivalentCallShort(DSI->getOpcode())));
}
continue;
}
}
// If instruction is BEQ or BNE with one ZERO register, then instead of
// adding NOP replace this instruction with the corresponding compact
// branch instruction, i.e. BEQZC or BNEZC.
unsigned Opcode = I->getOpcode();
if (InMicroMipsMode &&
(Opcode == Mips::BEQ || Opcode == Mips::BNE) &&
((unsigned) I->getOperand(1).getReg()) == Mips::ZERO) {
I = replaceWithCompactBranch(MBB, I, I->getDebugLoc());
} else {
// Bundle the NOP to the instruction with the delay slot.
BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
MIBundleBuilder(MBB, I, std::next(I, 2));
}
}
return Changed;
}
示例14: convertToHardwareLoop
/// converToHardwareLoop - check if the loop is a candidate for
/// converting to a hardware loop. If so, then perform the
/// transformation.
///
/// This function works on innermost loops first. A loop can
/// be converted if it is a counting loop; either a register
/// value or an immediate.
///
/// The code makes several assumptions about the representation
/// of the loop in llvm.
bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
bool Changed = false;
// Process nested loops first.
for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
Changed |= convertToHardwareLoop(*I);
}
// If a nested loop has been converted, then we can't convert this loop.
if (Changed) {
return Changed;
}
// Are we able to determine the trip count for the loop?
CountValue *TripCount = getTripCount(L);
if (TripCount == 0) {
return false;
}
// Does the loop contain any invalid instructions?
if (containsInvalidInstruction(L)) {
return false;
}
MachineBasicBlock *Preheader = L->getLoopPreheader();
// No preheader means there's not place for the loop instr.
if (Preheader == 0) {
return false;
}
MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
MachineBasicBlock *LastMBB = L->getExitingBlock();
// Don't generate hw loop if the loop has more than one exit.
if (LastMBB == 0) {
return false;
}
MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
// Determine the loop start.
MachineBasicBlock *LoopStart = L->getTopBlock();
if (L->getLoopLatch() != LastMBB) {
// When the exit and latch are not the same, use the latch block as the
// start.
// The loop start address is used only after the 1st iteration, and the loop
// latch may contains instrs. that need to be executed after the 1st iter.
LoopStart = L->getLoopLatch();
// Make sure the latch is a successor of the exit, otherwise it won't work.
if (!LastMBB->isSuccessor(LoopStart)) {
return false;
}
}
// Convert the loop to a hardware loop
DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
if (TripCount->isReg()) {
// Create a copy of the loop count register.
MachineFunction *MF = LastMBB->getParent();
const TargetRegisterClass *RC =
MF->getRegInfo().getRegClass(TripCount->getReg());
unsigned CountReg = MF->getRegInfo().createVirtualRegister(RC);
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(TargetOpcode::COPY), CountReg).addReg(TripCount->getReg());
if (TripCount->isNeg()) {
unsigned CountReg1 = CountReg;
CountReg = MF->getRegInfo().createVirtualRegister(RC);
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::NEG), CountReg).addReg(CountReg1);
}
// Add the Loop instruction to the begining of the loop.
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg);
} else {
assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
// Add the Loop immediate instruction to the beginning of the loop.
int64_t CountImm = TripCount->getImm();
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::LOOP0_i)).addMBB(LoopStart).addImm(CountImm);
}
// Make sure the loop start always has a reference in the CFG. We need to
// create a BlockAddress operand to get this mechanism to work both the
// MachineBasicBlock and BasicBlock objects need the flag set.
LoopStart->setHasAddressTaken();
// This line is needed to set the hasAddressTaken flag on the BasicBlock
// object
BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
// Replace the loop branch with an endloop instruction.
DebugLoc dl = LastI->getDebugLoc();
BuildMI(*LastMBB, LastI, dl, TII->get(Hexagon::ENDLOOP0)).addMBB(LoopStart);
// The loop ends with either:
// - a conditional branch followed by an unconditional branch, or
//.........这里部分代码省略.........
示例15: emitPopInst
void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
unsigned LdmOpc, unsigned LdrOpc,
bool isVarArg, bool NoGap,
bool(*Func)(unsigned, bool)) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
DebugLoc DL = MI->getDebugLoc();
unsigned RetOpcode = MI->getOpcode();
bool isTailCall = (RetOpcode == ARM::TCRETURNdi ||
RetOpcode == ARM::TCRETURNdiND ||
RetOpcode == ARM::TCRETURNri ||
RetOpcode == ARM::TCRETURNriND);
SmallVector<unsigned, 4> Regs;
unsigned i = CSI.size();
while (i != 0) {
unsigned LastReg = 0;
bool DeleteRet = false;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
if (!(Func)(Reg, STI.isTargetDarwin())) continue;
if (Reg == ARM::LR && !isTailCall && !isVarArg && STI.hasV5TOps()) {
Reg = ARM::PC;
LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_RET : ARM::LDMIA_RET;
// Fold the return instruction into the LDM.
DeleteRet = true;
}
// If NoGap is true, pop consecutive registers and then leave the rest
// for other instructions. e.g.
// vpop {d8, d10, d11} -> vpop {d8}, vpop {d10, d11}
if (NoGap && LastReg && LastReg != Reg-1)
break;
LastReg = Reg;
Regs.push_back(Reg);
}
if (Regs.empty())
continue;
if (Regs.size() > 1 || LdrOpc == 0) {
MachineInstrBuilder MIB =
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP)
.addReg(ARM::SP));
for (unsigned i = 0, e = Regs.size(); i < e; ++i)
MIB.addReg(Regs[i], getDefRegState(true));
if (DeleteRet)
MI->eraseFromParent();
MI = MIB;
} else if (Regs.size() == 1) {
// If we adjusted the reg to PC from LR above, switch it back here. We
// only do that for LDM.
if (Regs[0] == ARM::PC)
Regs[0] = ARM::LR;
MachineInstrBuilder MIB =
BuildMI(MBB, MI, DL, TII.get(LdrOpc), Regs[0])
.addReg(ARM::SP, RegState::Define)
.addReg(ARM::SP);
// ARM mode needs an extra reg0 here due to addrmode2. Will go away once
// that refactoring is complete (eventually).
if (LdrOpc == ARM::LDR_POST) {
MIB.addReg(0);
MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::add, 4, ARM_AM::no_shift));
} else
MIB.addImm(4);
AddDefaultPred(MIB);
}
Regs.clear();
}
}