本文整理汇总了C++中machinefunction::iterator::end方法的典型用法代码示例。如果您正苦于以下问题:C++ iterator::end方法的具体用法?C++ iterator::end怎么用?C++ iterator::end使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类machinefunction::iterator
的用法示例。
在下文中一共展示了iterator::end方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: runOnMachineFunction
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
if (DisablePeephole)
return false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
SmallSet<unsigned, 16> FoldAsLoadDefCandidates;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// We may be erasing MI below, increment MII now.
++MII;
LocalMIs.insert(MI);
// Skip debug values. They should not affect this peephole optimization.
if (MI->isDebugValue())
continue;
// If there exists an instruction which belongs to the following
// categories, we will discard the load candidates.
if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() ||
MI->hasUnmodeledSideEffects()) {
FoldAsLoadDefCandidates.clear();
continue;
}
if (MI->mayStore() || MI->isCall())
FoldAsLoadDefCandidates.clear();
if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) ||
(MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
(MI->isSelect() && optimizeSelect(MI))) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
continue;
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
// optimizeExtInstr might have created new instructions after MI
// and before the already incremented MII. Adjust MII so that the
// next iteration sees the new instructions.
MII = MI;
++MII;
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
// Check whether MI is a load candidate for folding into a later
// instruction. If MI is not a candidate, check whether we can fold an
// earlier load into MI.
if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
!FoldAsLoadDefCandidates.empty()) {
const MCInstrDesc &MIDesc = MI->getDesc();
for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
++i) {
const MachineOperand &MOp = MI->getOperand(i);
if (!MOp.isReg())
continue;
unsigned FoldAsLoadDefReg = MOp.getReg();
if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
// We need to fold load after optimizeCmpInstr, since
// optimizeCmpInstr can enable folding by converting SUB to CMP.
// Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
// we need it for markUsesInDebugValueAsUndef().
unsigned FoldedReg = FoldAsLoadDefReg;
MachineInstr *DefMI = nullptr;
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
FoldAsLoadDefReg,
DefMI);
if (FoldMI) {
// Update LocalMIs since we replaced MI with FoldMI and deleted
// DefMI.
DEBUG(dbgs() << "Replacing: " << *MI);
DEBUG(dbgs() << " With: " << *FoldMI);
LocalMIs.erase(MI);
LocalMIs.erase(DefMI);
LocalMIs.insert(FoldMI);
//.........这里部分代码省略.........
示例2: scavengeFrameVirtualRegs
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
///
/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
/// iterate over the vreg use list, which at this point only contains machine
/// operands for which eliminateFrameIndex need a new scratch reg.
void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Run through the instructions and find any virtual registers.
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
RS->enterBasicBlock(BB);
int SPAdj = 0;
// The instruction stream may change in the loop, so check BB->end()
// directly.
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
// We might end up here again with a NULL iterator if we scavenged a
// register for which we inserted spill code for definition by what was
// originally the first instruction in BB.
if (I == MachineBasicBlock::iterator(NULL))
I = BB->begin();
MachineInstr *MI = I;
MachineBasicBlock::iterator J = llvm::next(I);
MachineBasicBlock::iterator P = I == BB->begin() ?
MachineBasicBlock::iterator(NULL) : llvm::prior(I);
// RS should process this instruction before we might scavenge at this
// location. This is because we might be replacing a virtual register
// defined by this instruction, and if so, registers killed by this
// instruction are available, and defined registers are not.
RS->forward(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).isReg()) {
MachineOperand &MO = MI->getOperand(i);
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
// When we first encounter a new virtual register, it
// must be a definition.
assert(MI->getOperand(i).isDef() &&
"frame index virtual missing def!");
// Scavenge a new scratch register
const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
++NumScavengedRegs;
// Replace this reference to the virtual register with the
// scratch register.
assert (ScratchReg && "Missing scratch register!");
Fn.getRegInfo().replaceRegWith(Reg, ScratchReg);
// Because this instruction was processed by the RS before this
// register was allocated, make sure that the RS now records the
// register as being used.
RS->setUsed(ScratchReg);
}
}
// If the scavenger needed to use one of its spill slots, the
// spill code will have been inserted in between I and J. This is a
// problem because we need the spill code before I: Move I to just
// prior to J.
if (I != llvm::prior(J)) {
BB->splice(J, BB, I);
// Before we move I, we need to prepare the RS to visit I again.
// Specifically, RS will assert if it sees uses of registers that
// it believes are undefined. Because we have already processed
// register kills in I, when it visits I again, it will believe that
// those registers are undefined. To avoid this situation, unprocess
// the instruction I.
assert(RS->getCurrentPosition() == I &&
"The register scavenger has an unexpected position");
I = P;
RS->unprocess(P);
} else
++I;
}
}
}
示例3: runOnMachineFunction
bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
LiveIntervals& LI = getAnalysis<LiveIntervals>();
// Compute DFS numbers of each block
computeDFS(Fn);
// Determine which phi node operands need copies
for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
if (!I->empty() && I->begin()->isPHI())
processBlock(I);
// Break interferences where two different phis want to coalesce
// in the same register.
std::set<unsigned> seen;
typedef std::map<unsigned, std::map<unsigned, MachineBasicBlock*> >
RenameSetType;
for (RenameSetType::iterator I = RenameSets.begin(), E = RenameSets.end();
I != E; ++I) {
for (std::map<unsigned, MachineBasicBlock*>::iterator
OI = I->second.begin(), OE = I->second.end(); OI != OE; ) {
if (!seen.count(OI->first)) {
seen.insert(OI->first);
++OI;
} else {
Waiting[OI->second].insert(std::make_pair(OI->first, I->first));
unsigned reg = OI->first;
++OI;
I->second.erase(reg);
DEBUG(dbgs() << "Removing Renaming: " << reg << " -> " << I->first
<< "\n");
}
}
}
// Insert copies
// FIXME: This process should probably preserve LiveIntervals
SmallPtrSet<MachineBasicBlock*, 16> visited;
MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
InsertCopies(MDT.getRootNode(), visited);
// Perform renaming
for (RenameSetType::iterator I = RenameSets.begin(), E = RenameSets.end();
I != E; ++I)
while (I->second.size()) {
std::map<unsigned, MachineBasicBlock*>::iterator SI = I->second.begin();
DEBUG(dbgs() << "Renaming: " << SI->first << " -> " << I->first << "\n");
if (SI->first != I->first) {
if (mergeLiveIntervals(I->first, SI->first)) {
Fn.getRegInfo().replaceRegWith(SI->first, I->first);
if (RenameSets.count(SI->first)) {
I->second.insert(RenameSets[SI->first].begin(),
RenameSets[SI->first].end());
RenameSets.erase(SI->first);
}
} else {
// Insert a last-minute copy if a conflict was detected.
const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(I->first);
TII->copyRegToReg(*SI->second, SI->second->getFirstTerminator(),
I->first, SI->first, RC, RC, DebugLoc());
LI.renumber();
LiveInterval& Int = LI.getOrCreateInterval(I->first);
SlotIndex instrIdx =
LI.getInstructionIndex(--SI->second->getFirstTerminator());
if (Int.liveAt(instrIdx.getDefIndex()))
Int.removeRange(instrIdx.getDefIndex(),
LI.getMBBEndIdx(SI->second).getNextSlot(), true);
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first,
--SI->second->getFirstTerminator());
R.valno->setCopy(--SI->second->getFirstTerminator());
R.valno->def = instrIdx.getDefIndex();
DEBUG(dbgs() << "Renaming failed: " << SI->first << " -> "
<< I->first << "\n");
}
}
LiveInterval& Int = LI.getOrCreateInterval(I->first);
const LiveRange* LR =
Int.getLiveRangeContaining(LI.getMBBEndIdx(SI->second));
LR->valno->setHasPHIKill(true);
I->second.erase(SI->first);
}
// Remove PHIs
std::vector<MachineInstr*> phis;
for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
for (MachineBasicBlock::iterator BI = I->begin(), BE = I->end();
BI != BE; ++BI)
if (BI->isPHI())
phis.push_back(BI);
}
//.........这里部分代码省略.........
示例4: replaceFrameIndices
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (!Fn.getFrameInfo()->hasStackObjects()) return; // Nothing to do?
const TargetMachine &TM = Fn.getTarget();
assert(TM.getRegisterInfo() && "TM::getRegisterInfo() must be implemented!");
const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
const TargetFrameLowering *TFI = TM.getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int FrameSetupOpcode = TII.getCallFrameSetupOpcode();
int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
#ifndef NDEBUG
int SPAdjCount = 0; // frame setup / destroy count.
#endif
int SPAdj = 0; // SP offset due to call frame setup / destroy.
if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
#ifndef NDEBUG
// Track whether we see even pairs of them
SPAdjCount += I->getOpcode() == FrameSetupOpcode ? 1 : -1;
#endif
// Remember how much SP has been adjusted to create the call
// frame.
int Size = I->getOperand(0).getImm();
if ((!StackGrowsDown && I->getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && I->getOpcode() == FrameDestroyOpcode))
Size = -Size;
SPAdj += Size;
MachineBasicBlock::iterator PrevI = BB->end();
if (I != BB->begin()) PrevI = prior(I);
TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
// Visit the instructions created by eliminateCallFramePseudoInstr().
if (PrevI == BB->end())
I = BB->begin(); // The replaced instr was the first in the block.
else
I = llvm::next(PrevI);
continue;
}
MachineInstr *MI = I;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (!MI->getOperand(i).isFI())
continue;
// Some instructions (e.g. inline asm instructions) can have
// multiple frame indices and/or cause eliminateFrameIndex
// to insert more than one instruction. We need the register
// scavenger to go through all of these instructions so that
// it can update its register information. We keep the
// iterator at the point before insertion so that we can
// revisit them in full.
bool AtBeginning = (I == BB->begin());
if (!AtBeginning) --I;
// If this instruction has a FrameIndex operand, we need to
// use that target machine register info object to eliminate
// it.
TRI.eliminateFrameIndex(MI, SPAdj, i,
FrameIndexVirtualScavenging ? NULL : RS);
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
I = BB->begin();
DoIncr = false;
}
MI = 0;
break;
}
if (DoIncr && I != BB->end()) ++I;
// Update register states.
if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
}
// If we have evenly matched pairs of frame setup / destroy instructions,
// make sure the adjustments come out to zero. If we don't have matched
// pairs, we can't be sure the missing bit isn't in another basic block
// due to a custom inserter playing tricks, so just asserting SPAdj==0
// isn't sufficient. See tMOVCC on Thumb1, for example.
assert((SPAdjCount || SPAdj == 0) &&
"Unbalanced call frame setup / destroy pairs?");
}
//.........这里部分代码省略.........
示例5: runOnMachineFunction
bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
TII = MF.getTarget().getInstrInfo();
DT = &getAnalysis<MachineDominatorTree>();
LI = &getAnalysis<LiveIntervals>();
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
BBI != BBE && BBI->isPHI(); ++BBI) {
unsigned DestReg = BBI->getOperand(0).getReg();
addReg(DestReg);
PHISrcDefs[I].push_back(BBI);
for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
MachineOperand &SrcMO = BBI->getOperand(i);
unsigned SrcReg = SrcMO.getReg();
addReg(SrcReg);
unionRegs(DestReg, SrcReg);
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
if (DefMI)
PHISrcDefs[DefMI->getParent()].push_back(DefMI);
}
}
}
// Perform a depth-first traversal of the dominator tree, splitting
// interferences amongst PHI-congruence classes.
DenseMap<unsigned, unsigned> CurrentDominatingParent;
DenseMap<unsigned, unsigned> ImmediateDominatingParent;
for (df_iterator<MachineDomTreeNode*> DI = df_begin(DT->getRootNode()),
DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
SplitInterferencesForBasicBlock(*DI->getBlock(),
CurrentDominatingParent,
ImmediateDominatingParent);
}
// Insert copies for all PHI source and destination registers.
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
BBI != BBE && BBI->isPHI(); ++BBI) {
InsertCopiesForPHI(BBI, I);
}
}
// FIXME: Preserve the equivalence classes during copy insertion and use
// the preversed equivalence classes instead of recomputing them.
RegNodeMap.clear();
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
BBI != BBE && BBI->isPHI(); ++BBI) {
unsigned DestReg = BBI->getOperand(0).getReg();
addReg(DestReg);
for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
unsigned SrcReg = BBI->getOperand(i).getReg();
addReg(SrcReg);
unionRegs(DestReg, SrcReg);
}
}
}
DenseMap<unsigned, unsigned> RegRenamingMap;
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I) {
MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
while (BBI != BBE && BBI->isPHI()) {
MachineInstr *PHI = BBI;
assert(PHI->getNumOperands() > 0);
unsigned SrcReg = PHI->getOperand(1).getReg();
unsigned SrcColor = getRegColor(SrcReg);
unsigned NewReg = RegRenamingMap[SrcColor];
if (!NewReg) {
NewReg = SrcReg;
RegRenamingMap[SrcColor] = SrcReg;
}
MergeLIsAndRename(SrcReg, NewReg);
unsigned DestReg = PHI->getOperand(0).getReg();
if (!InsertedDestCopies.count(DestReg))
MergeLIsAndRename(DestReg, NewReg);
for (unsigned i = 3; i < PHI->getNumOperands(); i += 2) {
unsigned SrcReg = PHI->getOperand(i).getReg();
MergeLIsAndRename(SrcReg, NewReg);
}
++BBI;
LI->RemoveMachineInstrFromMaps(PHI);
PHI->eraseFromParent();
Changed = true;
}
}
//.........这里部分代码省略.........
示例6: runOnMachineFunction
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (DisablePeephole)
return false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
bool Changed = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
unsigned FoldAsLoadDefReg;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
LocalMIs.clear();
ImmDefRegs.clear();
ImmDefMIs.clear();
FoldAsLoadDefReg = 0;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// We may be erasing MI below, increment MII now.
++MII;
LocalMIs.insert(MI);
// If there exists an instruction which belongs to the following
// categories, we will discard the load candidate.
if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
MI->hasUnmodeledSideEffects()) {
FoldAsLoadDefReg = 0;
continue;
}
if (MI->mayStore() || MI->isCall())
FoldAsLoadDefReg = 0;
if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) ||
(MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
(MI->isSelect() && optimizeSelect(MI))) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
continue;
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
// optimizeExtInstr might have created new instructions after MI
// and before the already incremented MII. Adjust MII so that the
// next iteration sees the new instructions.
MII = MI;
++MII;
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
// Check whether MI is a load candidate for folding into a later
// instruction. If MI is not a candidate, check whether we can fold an
// earlier load into MI.
if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
// We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
// can enable folding by converting SUB to CMP.
MachineInstr *DefMI = 0;
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
FoldAsLoadDefReg, DefMI);
if (FoldMI) {
// Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
LocalMIs.erase(MI);
LocalMIs.erase(DefMI);
LocalMIs.insert(FoldMI);
MI->eraseFromParent();
DefMI->eraseFromParent();
++NumLoadFold;
// MI is replaced with FoldMI.
Changed = true;
continue;
}
}
}
}
return Changed;
}
示例7: runOnMachineFunction
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (DisablePeephole)
return false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
bool Changed = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
LocalMIs.clear();
ImmDefRegs.clear();
ImmDefMIs.clear();
bool First = true;
MachineBasicBlock::iterator PMII;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
LocalMIs.insert(MI);
if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
MI->hasUnmodeledSideEffects()) {
++MII;
continue;
}
if (MI->isBitcast()) {
if (optimizeBitcastInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
}
} else if (MI->isCompare()) {
if (optimizeCmpInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
}
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
First = false;
PMII = MII;
++MII;
}
}
return Changed;
}
示例8: insertFrameReferenceRegisters
bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Scan the function's instructions looking for frame index references.
// For each, ask the target if it wants a virtual base register for it
// based on what we can tell it about where the local will end up in the
// stack frame. If it wants one, re-use a suitable one we've previously
// allocated, or if there isn't one that fits the bill, allocate a new one
// and ask the target to create a defining instruction for it.
bool UsedBaseReg = false;
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Collect all of the instructions in the block that reference
// a frame index. Also store the frame index referenced to ease later
// lookup. (For any insn that has more than one FI reference, we arbitrarily
// choose the first one).
SmallVector<FrameRef, 64> FrameReferenceInsns;
// A base register definition is a register + offset pair.
SmallVector<std::pair<unsigned, int64_t>, 8> BaseRegisters;
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
MachineInstr *MI = I;
// Debug value instructions can't be out of range, so they don't need
// any updates.
if (MI->isDebugValue())
continue;
// For now, allocate the base register(s) within the basic block
// where they're used, and don't try to keep them around outside
// of that. It may be beneficial to try sharing them more broadly
// than that, but the increased register pressure makes that a
// tricky thing to balance. Investigate if re-materializing these
// becomes an issue.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
// Consider replacing all frame index operands that reference
// an object allocated in the local block.
if (MI->getOperand(i).isFI()) {
// Don't try this with values not in the local block.
if (!MFI->isObjectPreAllocated(MI->getOperand(i).getIndex()))
break;
FrameReferenceInsns.
push_back(FrameRef(MI, LocalOffsets[MI->getOperand(i).getIndex()]));
break;
}
}
}
}
// Sort the frame references by local offset
array_pod_sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
MachineBasicBlock *Entry = Fn.begin();
// Loop through the frame references and allocate for them as necessary.
for (int ref = 0, e = FrameReferenceInsns.size(); ref < e ; ++ref) {
MachineBasicBlock::iterator I =
FrameReferenceInsns[ref].getMachineInstr();
MachineInstr *MI = I;
for (unsigned idx = 0, e = MI->getNumOperands(); idx != e; ++idx) {
// Consider replacing all frame index operands that reference
// an object allocated in the local block.
if (MI->getOperand(idx).isFI()) {
int FrameIdx = MI->getOperand(idx).getIndex();
assert(MFI->isObjectPreAllocated(FrameIdx) &&
"Only pre-allocated locals expected!");
DEBUG(dbgs() << "Considering: " << *MI);
if (TRI->needsFrameBaseReg(MI, LocalOffsets[FrameIdx])) {
unsigned BaseReg = 0;
int64_t Offset = 0;
int64_t FrameSizeAdjust =
StackGrowsDown ? MFI->getLocalFrameSize() : 0;
DEBUG(dbgs() << " Replacing FI in: " << *MI);
// If we have a suitable base register available, use it; otherwise
// create a new one. Note that any offset encoded in the
// instruction itself will be taken into account by the target,
// so we don't have to adjust for it here when reusing a base
// register.
std::pair<unsigned, int64_t> RegOffset;
if (lookupCandidateBaseReg(BaseRegisters, RegOffset,
FrameSizeAdjust,
LocalOffsets[FrameIdx],
MI, TRI)) {
DEBUG(dbgs() << " Reusing base register " <<
RegOffset.first << "\n");
// We found a register to reuse.
BaseReg = RegOffset.first;
Offset = FrameSizeAdjust + LocalOffsets[FrameIdx] -
RegOffset.second;
} else {
// No previously defined register was in range, so create a
//.........这里部分代码省略.........
示例9: runOnMachineFunction
bool BitLevelInfo::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
VFInfo *VFI = MF.getInfo<VFInfo>();
// No need to run the pass if bitwidth information not available anymore.
if (!VFI->isBitWidthAnnotated())
return false;
// Annotate the bit width information to target flag.
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
BI != BE; ++BI)
for (MachineBasicBlock::iterator I = BI->begin(), E = BI->end();
I != E; ++I) {
MachineInstr &Instr = *I;
bool isShifts = false;
switch (Instr.getOpcode()) {
default: break;
case VTM::IMPLICIT_DEF:
continue;
case VTM::VOpRet: {
// Setup the bit width for predicate operand.
MachineOperand &Op = Instr.getOperand(0);
VInstrInfo::setBitWidth(Op, 1);
continue;
}
case VTM::VOpToState:
case VTM::VOpToStateb: {
// Setup the bit width for predicate operand.
MachineOperand &Op = Instr.getOperand(0);
if (Op.isImm()) {
assert(Op.getImm() && "Unexpected 'never' in unconditional branch!");
Op.ChangeToRegister(0, false);
VInstrInfo::setBitWidth(Op, 1);
}
MachineOperand &Pred = Instr.getOperand(2);
if (Pred.isImm()) {
assert(Pred.getImm() && "Unexpected 'never' in unconditional branch!");
Pred.ChangeToRegister(0, false);
VInstrInfo::setBitWidth(Op, 1);
}
continue;
}
case VTM::COPY: case VTM::PHI:
continue;
case VTM::VOpSRA: case VTM::VOpSRA_c:
case VTM::VOpSRL: case VTM::VOpSRL_c:
case VTM::VOpSHL: case VTM::VOpSHL_c:
isShifts = true;
break;
}
BitWidthAnnotator Annotator(Instr);
if (isShifts) {
// Fix the RHS operand width.
Annotator.setBitWidth(Log2_32_Ceil(Annotator.getBitWidth(1)), 2);
Annotator.updateBitWidth();
}
for (unsigned i = 0, e = Instr.getNumOperands() - 2; i < e; ++i) {
MachineOperand &MO = Instr.getOperand(i);
if (!MO.isReg() && !MO.isImm() && !MO.isSymbol()) continue;
// Do not disturb the original target flags.
if (MO.isSymbol() && MO.getTargetFlags() != 0) continue;
unsigned BitWidth = Annotator.getBitWidthOrZero(i);
if (BitWidth == 0) {
// Already have bitwidth information.
if (MO.getTargetFlags()) continue;
assert(Instr.getOpcode() == VTM::VOpInternalCall && MO.isImm()
&& "Bitwidth info not available!");
BitWidth = 64;
}
bool Changed = updateBitWidth(MO, BitWidth);
if (MO.isReg() && MO.isDef() && Changed)
propagateBitWidth(MO);
}
Annotator.changeToDefaultPred();
}
DEBUG(dbgs() << "---------- After bit width annotation.\n");
DEBUG(MF.dump());
// Tell the MachineFunctionInfo that we had changed all annotators to default
// predicate operand.
VFI->removeBitWidthAnnotators();
return false;
}
示例10: Packetizer
bool
MSPUPacketizer::runOnMachineFunction(MachineFunction &Fn)
{
const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
// Instantiate the packetizer.
MSPUPacketizerList Packetizer(Fn, MLI, MDT);
// DFA state table should not be empty.
assert(Packetizer.getResourceTracker() && "Empty DFA table!");
//
// Loop over all basic blocks and remove KILL pseudo-instructions
// These instructions confuse the dependence analysis. Consider:
// D0 = ... (Insn 0)
// R0 = KILL R0, D0 (Insn 1)
// R0 = ... (Insn 2)
// Here, Insn 1 will result in the dependence graph not emitting an output
// dependence between Insn 0 and Insn 2. This can lead to incorrect
// packetization
//
for(MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
MachineBasicBlock::iterator End = MBB->end();
MachineBasicBlock::iterator MI = MBB->begin();
while(MI != End) {
if(MI->isKill()) {
MachineBasicBlock::iterator DeleteMI = MI;
++MI;
MBB->erase(DeleteMI);
End = MBB->end();
continue;
}
++MI;
}
}
// Loop over all of the basic blocks.
for(MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
// Find scheduling regions and schedule / packetize each region.
unsigned RemainingCount = MBB->size();
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin();) {
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
for(; I != MBB->begin(); --I, --RemainingCount) {
if(TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
break;
}
I = MBB->begin();
// Skip empty scheduling regions.
if(I == RegionEnd) {
RegionEnd = llvm::prior(RegionEnd);
--RemainingCount;
continue;
}
// Skip regions with one instruction.
if(I == llvm::prior(RegionEnd)) {
RegionEnd = llvm::prior(RegionEnd);
continue;
}
// PacketizeMIs() does a VLIW scheduling on MachineInstr list and packetizing.
Packetizer.PacketizeMIs(MBB, I, RegionEnd);
RegionEnd = I;
}
}
return true;
}
示例11: rewrite
void VirtRegMap::rewrite(SlotIndexes *Indexes) {
DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
<< "********** Function: "
<< MF->getFunction()->getName() << '\n');
DEBUG(dump());
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
DEBUG(MBBI->print(dbgs(), Indexes));
for (MachineBasicBlock::iterator MII = MBBI->begin(), MIE = MBBI->end();
MII != MIE;) {
MachineInstr *MI = MII;
++MII;
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
unsigned PhysReg = getPhys(VirtReg);
assert(PhysReg != NO_PHYS_REG && "Instruction uses unmapped VirtReg");
// Preserve semantics of sub-register operands.
if (MO.getSubReg()) {
// A virtual register kill refers to the whole register, so we may
// have to add <imp-use,kill> operands for the super-register. A
// partial redef always kills and redefines the super-register.
if (MO.readsReg() && (MO.isDef() || MO.isKill()))
SuperKills.push_back(PhysReg);
if (MO.isDef()) {
// The <def,undef> flag only makes sense for sub-register defs, and
// we are substituting a full physreg. An <imp-use,kill> operand
// from the SuperKills list will represent the partial read of the
// super-register.
MO.setIsUndef(false);
// Also add implicit defs for the super-register.
if (MO.isDead())
SuperDeads.push_back(PhysReg);
else
SuperDefs.push_back(PhysReg);
}
// PhysReg operands cannot have subregister indexes.
PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg());
assert(PhysReg && "Invalid SubReg for physical register");
MO.setSubReg(0);
}
// Rewrite. Note we could have used MachineOperand::substPhysReg(), but
// we need the inlining here.
MO.setReg(PhysReg);
}
// Add any missing super-register kills after rewriting the whole
// instruction.
while (!SuperKills.empty())
MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true);
while (!SuperDeads.empty())
MI->addRegisterDead(SuperDeads.pop_back_val(), TRI, true);
while (!SuperDefs.empty())
MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI);
DEBUG(dbgs() << "> " << *MI);
// Finally, remove any identity copies.
if (MI->isIdentityCopy()) {
++NumIdCopies;
if (MI->getNumOperands() == 2) {
DEBUG(dbgs() << "Deleting identity copy.\n");
RemoveMachineInstrFromMaps(MI);
if (Indexes)
Indexes->removeMachineInstrFromMaps(MI);
// It's safe to erase MI because MII has already been incremented.
MI->eraseFromParent();
} else {
// Transform identity copy to a KILL to deal with subregisters.
MI->setDesc(TII->get(TargetOpcode::KILL));
DEBUG(dbgs() << "Identity copy: " << *MI);
}
}
}
}
// Tell MRI about physical registers in use.
for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg)
if (!MRI->reg_nodbg_empty(Reg))
MRI->setPhysRegUsed(Reg);
}
示例12: scavengeFrameVirtualRegs
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Run through the instructions and find any virtual registers.
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
RS->enterBasicBlock(BB);
// FIXME: The logic flow in this function is still too convoluted.
// It needs a cleanup refactoring. Do that in preparation for tracking
// more than one scratch register value and using ranges to find
// available scratch registers.
unsigned CurrentVirtReg = 0;
unsigned CurrentScratchReg = 0;
bool havePrevValue = false;
TargetRegisterInfo::FrameIndexValue PrevValue(0,0);
TargetRegisterInfo::FrameIndexValue Value(0,0);
MachineInstr *PrevLastUseMI = NULL;
unsigned PrevLastUseOp = 0;
bool trackingCurrentValue = false;
int SPAdj = 0;
// The instruction stream may change in the loop, so check BB->end()
// directly.
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
MachineInstr *MI = I;
bool isDefInsn = false;
bool isKillInsn = false;
bool clobbersScratchReg = false;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).isReg()) {
MachineOperand &MO = MI->getOperand(i);
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
// If we have a previous scratch reg, check and see if anything
// here kills whatever value is in there.
if (Reg == CurrentScratchReg) {
if (MO.isUse()) {
// Two-address operands implicitly kill
if (MO.isKill() || MI->isRegTiedToDefOperand(i))
clobbersScratchReg = true;
} else {
assert (MO.isDef());
clobbersScratchReg = true;
}
}
continue;
}
// If this is a def, remember that this insn defines the value.
// This lets us properly consider insns which re-use the scratch
// register, such as r2 = sub r2, #imm, in the middle of the
// scratch range.
if (MO.isDef())
isDefInsn = true;
// Have we already allocated a scratch register for this virtual?
if (Reg != CurrentVirtReg) {
// When we first encounter a new virtual register, it
// must be a definition.
assert(MI->getOperand(i).isDef() &&
"frame index virtual missing def!");
// We can't have nested virtual register live ranges because
// there's only a guarantee of one scavenged register at a time.
assert (CurrentVirtReg == 0 &&
"overlapping frame index virtual registers!");
// If the target gave us information about what's in the register,
// we can use that to re-use scratch regs.
DenseMap<unsigned, FrameConstantEntry>::iterator Entry =
FrameConstantRegMap.find(Reg);
trackingCurrentValue = Entry != FrameConstantRegMap.end();
if (trackingCurrentValue) {
SPAdj = (*Entry).second.second;
Value = (*Entry).second.first;
} else {
SPAdj = 0;
Value.first = 0;
Value.second = 0;
}
// If the scratch register from the last allocation is still
// available, see if the value matches. If it does, just re-use it.
if (trackingCurrentValue && havePrevValue && PrevValue == Value) {
// FIXME: This assumes that the instructions in the live range
// for the virtual register are exclusively for the purpose
// of populating the value in the register. That's reasonable
// for these frame index registers, but it's still a very, very
// strong assumption. rdar://7322732. Better would be to
// explicitly check each instruction in the range for references
// to the virtual register. Only delete those insns that
// touch the virtual register.
// Find the last use of the new virtual register. Remove all
// instruction between here and there, and update the current
// instruction to reference the last use insn instead.
MachineBasicBlock::iterator LastUseMI =
//.........这里部分代码省略.........
示例13: replaceFrameIndices
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (!Fn.getFrameInfo()->hasStackObjects()) return; // Nothing to do?
const TargetMachine &TM = Fn.getTarget();
assert(TM.getRegisterInfo() && "TM::getRegisterInfo() must be implemented!");
const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
const TargetFrameInfo *TFI = TM.getFrameInfo();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
int FrameSetupOpcode = TRI.getCallFrameSetupOpcode();
int FrameDestroyOpcode = TRI.getCallFrameDestroyOpcode();
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
int SPAdj = 0; // SP offset due to call frame setup / destroy.
if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
// Remember how much SP has been adjusted to create the call
// frame.
int Size = I->getOperand(0).getImm();
if ((!StackGrowsDown && I->getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && I->getOpcode() == FrameDestroyOpcode))
Size = -Size;
SPAdj += Size;
MachineBasicBlock::iterator PrevI = BB->end();
if (I != BB->begin()) PrevI = prior(I);
TRI.eliminateCallFramePseudoInstr(Fn, *BB, I);
// Visit the instructions created by eliminateCallFramePseudoInstr().
if (PrevI == BB->end())
I = BB->begin(); // The replaced instr was the first in the block.
else
I = llvm::next(PrevI);
continue;
}
MachineInstr *MI = I;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
if (MI->getOperand(i).isFI()) {
// Some instructions (e.g. inline asm instructions) can have
// multiple frame indices and/or cause eliminateFrameIndex
// to insert more than one instruction. We need the register
// scavenger to go through all of these instructions so that
// it can update its register information. We keep the
// iterator at the point before insertion so that we can
// revisit them in full.
bool AtBeginning = (I == BB->begin());
if (!AtBeginning) --I;
// If this instruction has a FrameIndex operand, we need to
// use that target machine register info object to eliminate
// it.
TargetRegisterInfo::FrameIndexValue Value;
unsigned VReg =
TRI.eliminateFrameIndex(MI, SPAdj, &Value,
FrameIndexVirtualScavenging ? NULL : RS);
if (VReg) {
assert (FrameIndexVirtualScavenging &&
"Not scavenging, but virtual returned from "
"eliminateFrameIndex()!");
FrameConstantRegMap[VReg] = FrameConstantEntry(Value, SPAdj);
}
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
I = BB->begin();
DoIncr = false;
}
MI = 0;
break;
}
if (DoIncr && I != BB->end()) ++I;
// Update register states.
if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
}
assert(SPAdj == 0 && "Unbalanced call frame setup / destroy pairs?");
}
}
示例14: runOnMachineFunction
bool GCMachineCodeFixup::runOnMachineFunction(MachineFunction &MF) {
// Quick exit for functions that do not use GC.
if (!MF.getFunction()->hasGC())
return false;
const TargetMachine &TM = MF.getTarget();
const TargetInstrInfo *TII = TM.getInstrInfo();
GCModuleInfo &GMI = getAnalysis<GCModuleInfo>();
GCFunctionInfo &GCFI = GMI.getFunctionInfo(*MF.getFunction());
for (MachineFunction::iterator MBBI = MF.begin(),
MBBE = MF.end(); MBBI != MBBE; ++MBBI) {
for (MachineBasicBlock::iterator MII = MBBI->begin(),
MIE = MBBI->end(); MII != MIE;) {
if (!MII->isGCRegRoot() || !MII->getOperand(0).isReg()) {
++MII;
continue;
}
// Trace the register back to its location at the site of the call (either
// a physical reg or a frame index).
bool TracingReg = true;
unsigned TracedReg = MII->getOperand(0).getReg();
int FrameIndex;
MachineBasicBlock::iterator PrevII = MII;
for (--PrevII;; --PrevII) {
if (PrevII->isGCRegRoot() && PrevII->getOperand(0).isReg())
break;
if (PrevII->isCall())
break;
int FI;
// Trace back through register reloads.
unsigned Reg =
TM.getInstrInfo()->isLoadFromStackSlotPostFE(&*PrevII, FI);
if (Reg) {
// This is a reload. If we're tracing this register, start tracing the
// frame index instead.
if (TracingReg && TracedReg == Reg) {
TracingReg = false;
FrameIndex = FI;
}
continue;
}
// Trace back through spills.
if (TM.getInstrInfo()->isStoreToStackSlotPostFE(&*PrevII, FI))
continue;
// Trace back through register-to-register copies.
if (PrevII->isCopy()) {
if (TracingReg && TracedReg == PrevII->getOperand(0).getReg())
TracedReg = PrevII->getOperand(1).getReg();
continue;
}
// Trace back through non-register GC_REG_ROOT instructions.
if (PrevII->isGCRegRoot() && !PrevII->getOperand(0).isReg())
continue;
DEBUG(dbgs() << "Bad instruction: " << *PrevII);
llvm_unreachable("GC_REG_ROOT found in an unexpected location!");
}
// Now we've reached either a call or another GC_REG_ROOT instruction.
// Move the GC_REG_ROOT instruction we're considering to the right place,
// and rewrite it if necessary.
//
// Also, tell the GCFunctionInfo about the frame index, since this is
// our only chance -- the frame indices will be deleted by the time
// GCMachineCodeAnalysis runs.
++PrevII;
unsigned RootIndex = MII->getOperand(1).getImm();
MachineInstr *NewMI;
if (TracingReg) {
MachineInstrBuilder MIB = BuildMI(MF, MII->getDebugLoc(),
TII->get(TargetOpcode::GC_REG_ROOT));
MIB.addReg(TracedReg).addImm(RootIndex);
NewMI = MIB;
} else {
NewMI = TII->emitFrameIndexGCRegRoot(MF, FrameIndex, RootIndex,
MII->getDebugLoc());
GCFI.spillRegRoot(RootIndex, FrameIndex);
}
MBBI->insert(PrevII, NewMI);
MachineBasicBlock::iterator NextII = MII;
++NextII;
MII->eraseFromParent();
MII = NextII;
}
}
return true;
}