本文整理汇总了C++中SmallSet类的典型用法代码示例。如果您正苦于以下问题:C++ SmallSet类的具体用法?C++ SmallSet怎么用?C++ SmallSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SmallSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: FoldImmediate
/// FoldImmediate - Try folding register operands that are defined by move
/// immediate instructions, i.e. a trivial constant folding optimization, if
/// and only if the def and use are in the same BB.
bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (ImmDefRegs.count(Reg) == 0)
continue;
DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
assert(II != ImmDefMIs.end());
if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
++NumImmFold;
return true;
}
}
return false;
}
示例2: isLoadFoldable
/// isLoadFoldable - Check whether MI is a candidate for folding into a later
/// instruction. We only fold loads to virtual registers and the virtual
/// register defined has a single use.
bool PeepholeOptimizer::isLoadFoldable(
MachineInstr *MI,
SmallSet<unsigned, 16> &FoldAsLoadDefCandidates) {
if (!MI->canFoldAsLoad() || !MI->mayLoad())
return false;
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI->getOperand(0).getReg();
// To reduce compilation time, we check MRI->hasOneNonDBGUse when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI->getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
MRI->hasOneNonDBGUse(Reg)) {
FoldAsLoadDefCandidates.insert(Reg);
return true;
}
return false;
}
示例3: HandlePhysRegDef
void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallVector<unsigned, 4> &Defs) {
// What parts of the register are previously defined?
SmallSet<unsigned, 32> Live;
if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
Live.insert(Reg);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Live.insert(*SubRegs);
} else {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
// If a register isn't itself defined, but all parts that make up of it
// are defined, then consider it also defined.
// e.g.
// AL =
// AH =
// = AX
if (Live.count(SubReg))
continue;
if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
Live.insert(SubReg);
for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
Live.insert(*SS);
}
}
}
// Start from the largest piece, find the last time any part of the register
// is referenced.
HandlePhysRegKill(Reg, MI);
// Only some of the sub-registers are used.
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (!Live.count(SubReg))
// Skip if this sub-register isn't defined.
continue;
HandlePhysRegKill(SubReg, MI);
}
if (MI)
Defs.push_back(Reg); // Remember this def.
}
示例4: createTopOrderingFrom
// Find a topological ordering starting from BB, writing the result to Result and using Visited to keep track of visited blocks.
// LI is the LoopInfo object for BB's parent function and MyL is the loop context we're currently exploring.
// Child loops are handled by continuing our own top-ordering from the loop exit blocks and then independently
// ordering the loop's blocks disregarding its latch edge.
void llvm::createTopOrderingFrom(BasicBlock* BB, std::vector<BasicBlock*>& Result, SmallSet<BasicBlock*, 8>& Visited, LoopInfo* LI, const Loop* MyL) {
const Loop* BBL = LI ? LI->getLoopFor(BB) : 0;
// Drifted out of scope?
if(MyL != BBL && ((!BBL) || (BBL->contains(MyL))))
return;
// Already been here?
auto insertres = Visited.insert(BB);
if(!insertres.second)
return;
// Follow loop exiting edges if any.
// Ordering here: first the loop's successors, then the loop body (by walking to the header
// with succ_iterator below) and then this block, the preheader.
if(MyL != BBL) {
SmallVector<BasicBlock*, 4> ExitBlocks;
BBL->getExitBlocks(ExitBlocks);
for(SmallVector<BasicBlock*, 4>::iterator it = ExitBlocks.begin(), it2 = ExitBlocks.end(); it != it2; ++it) {
createTopOrderingFrom(*it, Result, Visited, LI, MyL);
}
}
// Explore all successors within this loop. This will enter a child loop if this BB is a preheader;
// note BBL may not equal MyL, but is certainly its child.
for(succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) {
createTopOrderingFrom(*SI, Result, Visited, LI, BBL);
}
Result.push_back(BB);
}
示例5: ProcessSourceNode
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
InstrEmitter &Emitter,
DenseMap<SDValue, unsigned> &VRBaseMap,
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
SmallSet<unsigned, 8> &Seen) {
unsigned Order = DAG->GetOrdering(N);
if (!Order || !Seen.insert(Order)) {
// Process any valid SDDbgValues even if node does not have any order
// assigned.
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
return;
}
MachineBasicBlock *BB = Emitter.getBlock();
if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
// Did not insert any instruction.
Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
return;
}
Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
示例6: ProcessSourceNode
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
InstrEmitter &Emitter,
DenseMap<SDValue, unsigned> &VRBaseMap,
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
SmallSet<unsigned, 8> &Seen) {
unsigned Order = DAG->GetOrdering(N);
if (!Order || !Seen.insert(Order))
return;
MachineBasicBlock *BB = Emitter.getBlock();
if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
// Did not insert any instruction.
Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
return;
}
Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
if (!N->getHasDebugValue())
return;
// Opportunistically insert immediate dbg_value uses, i.e. those with source
// order number right after the N.
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
if (DVs[i]->isInvalidated())
continue;
unsigned DVOrder = DVs[i]->getOrder();
if (DVOrder == ++Order) {
MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
if (DbgMI) {
Orders.push_back(std::make_pair(DVOrder, DbgMI));
BB->insert(InsertPos, DbgMI);
}
DVs[i]->setIsInvalidated();
}
}
}
示例7: DEBUG
// Handle special instruction operand like early clobbers and tied ops when
// there are additional physreg defines.
void RAFast::handleThroughOperands(MachineInstr *MI,
SmallVectorImpl<unsigned> &VirtDead) {
DEBUG(dbgs() << "Scanning for through registers:");
SmallSet<unsigned, 8> ThroughRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
(MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
if (ThroughRegs.insert(Reg))
DEBUG(dbgs() << ' ' << PrintReg(Reg));
}
}
// If any physreg defines collide with preallocated through registers,
// we must spill and reallocate.
DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
UsedInInstr.set(*AI);
if (ThroughRegs.count(PhysRegState[*AI]))
definePhysReg(MI, *AI, regFree);
}
}
SmallVector<unsigned, 8> PartialDefs;
DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
unsigned DefIdx = 0;
if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
<< DefIdx << ".\n");
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
unsigned PhysReg = LRI->PhysReg;
setPhysReg(MI, i, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
} else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) {
DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
PartialDefs.push_back(LRI->PhysReg);
}
}
DEBUG(dbgs() << "Allocating early clobbers.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!MO.isEarlyClobber())
continue;
// Note: defineVirtReg may invalidate MO.
LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
unsigned PhysReg = LRI->PhysReg;
if (setPhysReg(MI, i, PhysReg))
VirtDead.push_back(Reg);
}
// Restore UsedInInstr to a state usable for allocating normal virtual uses.
UsedInInstr.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
DEBUG(dbgs() << "\tSetting " << PrintReg(Reg, TRI)
<< " as used in instr\n");
UsedInInstr.set(Reg);
}
// Also mark PartialDefs as used to avoid reallocation.
for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
UsedInInstr.set(PartialDefs[i]);
}
示例8: determinePointerReadAttrs
/// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static Attribute::AttrKind
determinePointerReadAttrs(Argument *A,
const SmallPtrSet<Argument *, 8> &SCCNodes) {
SmallVector<Use *, 32> Worklist;
SmallSet<Use *, 32> Visited;
// inalloca arguments are always clobbered by the call.
if (A->hasInAllocaAttr())
return Attribute::None;
bool IsRead = false;
// We don't need to track IsWritten. If A is written to, return immediately.
for (Use &U : A->uses()) {
Visited.insert(&U);
Worklist.push_back(&U);
}
while (!Worklist.empty()) {
Use *U = Worklist.pop_back_val();
Instruction *I = cast<Instruction>(U->getUser());
switch (I->getOpcode()) {
case Instruction::BitCast:
case Instruction::GetElementPtr:
case Instruction::PHI:
case Instruction::Select:
case Instruction::AddrSpaceCast:
// The original value is not read/written via this if the new value isn't.
for (Use &UU : I->uses())
if (Visited.insert(&UU).second)
Worklist.push_back(&UU);
break;
case Instruction::Call:
case Instruction::Invoke: {
bool Captures = true;
if (I->getType()->isVoidTy())
Captures = false;
auto AddUsersToWorklistIfCapturing = [&] {
if (Captures)
for (Use &UU : I->uses())
if (Visited.insert(&UU).second)
Worklist.push_back(&UU);
};
CallSite CS(I);
if (CS.doesNotAccessMemory()) {
AddUsersToWorklistIfCapturing();
continue;
}
Function *F = CS.getCalledFunction();
if (!F) {
if (CS.onlyReadsMemory()) {
IsRead = true;
AddUsersToWorklistIfCapturing();
continue;
}
return Attribute::None;
}
// Note: the callee and the two successor blocks *follow* the argument
// operands. This means there is no need to adjust UseIndex to account
// for these.
unsigned UseIndex = std::distance(CS.arg_begin(), U);
// U cannot be the callee operand use: since we're exploring the
// transitive uses of an Argument, having such a use be a callee would
// imply the CallSite is an indirect call or invoke; and we'd take the
// early exit above.
assert(UseIndex < CS.data_operands_size() &&
"Data operand use expected!");
bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands();
if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
assert(F->isVarArg() && "More params than args in non-varargs call");
return Attribute::None;
}
Captures &= !CS.doesNotCapture(UseIndex);
// Since the optimizer (by design) cannot see the data flow corresponding
// to a operand bundle use, these cannot participate in the optimistic SCC
// analysis. Instead, we model the operand bundle uses as arguments in
// call to a function external to the SCC.
if (IsOperandBundleUse ||
!SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) {
// The accessors used on CallSite here do the right thing for calls and
// invokes with operand bundles.
if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex))
return Attribute::None;
//.........这里部分代码省略.........
示例9: getParent
//.........这里部分代码省略.........
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(I);
DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
}
// Update relevant live-through information.
LV->addNewBlock(NMBB, this, Succ);
}
if (LIS) {
// After splitting the edge and updating SlotIndexes, live intervals may be
// in one of two situations, depending on whether this block was the last in
// the function. If the original block was the last in the function, all live
// intervals will end prior to the beginning of the new split block. If the
// original block was not at the end of the function, all live intervals will
// extend to the end of the new split block.
bool isLastMBB =
std::next(MachineFunction::iterator(NMBB)) == getParent()->end();
SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
SlotIndex PrevIndex = StartIndex.getPrevSlot();
SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);
// Find the registers used from NMBB in PHIs in Succ.
SmallSet<unsigned, 8> PHISrcRegs;
for (MachineBasicBlock::instr_iterator
I = Succ->instr_begin(), E = Succ->instr_end();
I != E && I->isPHI(); ++I) {
for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
if (I->getOperand(ni+1).getMBB() == NMBB) {
MachineOperand &MO = I->getOperand(ni);
unsigned Reg = MO.getReg();
PHISrcRegs.insert(Reg);
if (MO.isUndef())
continue;
LiveInterval &LI = LIS->getInterval(Reg);
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
assert(VNI && "PHI sources should be live out of their predecessors.");
LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
}
}
}
MachineRegisterInfo *MRI = &getParent()->getRegInfo();
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
continue;
LiveInterval &LI = LIS->getInterval(Reg);
if (!LI.liveAt(PrevIndex))
continue;
bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
if (isLiveOut && isLastMBB) {
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
示例10: runOnMachineFunction
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (DisablePeephole)
return false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
bool Changed = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
LocalMIs.clear();
ImmDefRegs.clear();
ImmDefMIs.clear();
bool First = true;
MachineBasicBlock::iterator PMII;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
LocalMIs.insert(MI);
if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
MI->hasUnmodeledSideEffects()) {
++MII;
continue;
}
if (MI->isBitcast()) {
if (optimizeBitcastInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
}
} else if (MI->isCompare()) {
if (optimizeCmpInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
}
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
First = false;
PMII = MII;
++MII;
}
}
return Changed;
}
示例11: HandlePhysRegKill
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
return false;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
// The whole register is used.
// AL =
// AH =
//
// = AX
// = AL, AX<imp-use, kill>
// AX =
//
// Or whole register is defined, but not used at all.
// AX<dead> =
// ...
// AX =
//
// Or whole register is defined, but only partly used.
// AX<dead> = AL<imp-def>
// = AL<kill>
// AX =
MachineInstr *LastPartDef = nullptr;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
// def, keep track of the last one.
unsigned Dist = DistanceMap[Def];
if (Dist > LastPartDefDist) {
LastPartDefDist = Dist;
LastPartDef = Def;
}
continue;
}
if (MachineInstr *Use = PhysRegUse[SubReg]) {
for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true); SS.isValid();
++SS)
PartUses.insert(*SS);
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
LastRefOrPartRefDist = Dist;
LastRefOrPartRef = Use;
}
}
}
if (!PhysRegUse[Reg]) {
// Partial uses. Mark register def dead and add implicit def of
// sub-registers which are used.
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (!PartUses.count(SubReg))
continue;
bool NeedDef = true;
if (PhysRegDef[Reg] == PhysRegDef[SubReg]) {
MachineOperand *MO = PhysRegDef[Reg]->findRegisterDefOperand(SubReg);
if (MO) {
NeedDef = false;
assert(!MO->isDead());
}
}
if (NeedDef)
PhysRegDef[Reg]->addOperand(MachineOperand::CreateReg(SubReg,
true/*IsDef*/, true/*IsImp*/));
MachineInstr *LastSubRef = FindLastRefOrPartRef(SubReg);
if (LastSubRef)
LastSubRef->addRegisterKilled(SubReg, TRI, true);
else {
LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true);
for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true);
SS.isValid(); ++SS)
PhysRegUse[*SS] = LastRefOrPartRef;
}
for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
PartUses.erase(*SS);
}
} else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) {
if (LastPartDef)
// The last partial def kills the register.
LastPartDef->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
true/*IsImp*/, true/*IsKill*/));
else {
MachineOperand *MO =
LastRefOrPartRef->findRegisterDefOperand(Reg, false, TRI);
bool NeedEC = MO->isEarlyClobber() && MO->getReg() != Reg;
// If the last reference is the last def, then it's not used at all.
// That is, unless we are currently processing the last reference itself.
LastRefOrPartRef->addRegisterDead(Reg, TRI, true);
if (NeedEC) {
// If we are adding a subreg def and the superreg def is marked early
//.........这里部分代码省略.........
示例12: calculateFrameObjectOffsets
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
unsigned MaxAlign = 0;
StackProtector *SP = &getAnalysis<StackProtector>();
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
continue;
}
llvm_unreachable("Unexpected SSPLayoutKind.");
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
}
// Remember how big this blob of stack space is
MFI->setLocalFrameSize(Offset);
MFI->setLocalFrameMaxAlign(MaxAlign);
}
示例13: DEBUG
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (skipOptnoneFunction(*MF.getFunction()))
return false;
DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
if (DisablePeephole)
return false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
bool Changed = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
SmallSet<unsigned, 16> FoldAsLoadDefCandidates;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
// We may be erasing MI below, increment MII now.
++MII;
LocalMIs.insert(MI);
// Skip debug values. They should not affect this peephole optimization.
if (MI->isDebugValue())
continue;
// If there exists an instruction which belongs to the following
// categories, we will discard the load candidates.
if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() ||
MI->hasUnmodeledSideEffects()) {
FoldAsLoadDefCandidates.clear();
continue;
}
if (MI->mayStore() || MI->isCall())
FoldAsLoadDefCandidates.clear();
if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) ||
(MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
(MI->isSelect() && optimizeSelect(MI))) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
continue;
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
// optimizeExtInstr might have created new instructions after MI
// and before the already incremented MII. Adjust MII so that the
// next iteration sees the new instructions.
MII = MI;
++MII;
if (SeenMoveImm)
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
// Check whether MI is a load candidate for folding into a later
// instruction. If MI is not a candidate, check whether we can fold an
// earlier load into MI.
if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
!FoldAsLoadDefCandidates.empty()) {
const MCInstrDesc &MIDesc = MI->getDesc();
for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
++i) {
const MachineOperand &MOp = MI->getOperand(i);
if (!MOp.isReg())
continue;
unsigned FoldAsLoadDefReg = MOp.getReg();
if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
// We need to fold load after optimizeCmpInstr, since
// optimizeCmpInstr can enable folding by converting SUB to CMP.
// Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
// we need it for markUsesInDebugValueAsUndef().
unsigned FoldedReg = FoldAsLoadDefReg;
MachineInstr *DefMI = nullptr;
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
FoldAsLoadDefReg,
DefMI);
if (FoldMI) {
// Update LocalMIs since we replaced MI with FoldMI and deleted
// DefMI.
DEBUG(dbgs() << "Replacing: " << *MI);
DEBUG(dbgs() << " With: " << *FoldMI);
LocalMIs.erase(MI);
LocalMIs.erase(DefMI);
LocalMIs.insert(FoldMI);
//.........这里部分代码省略.........
示例14: verifyCTRBranch
static bool verifyCTRBranch(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I) {
MachineBasicBlock::iterator BI = I;
SmallSet<MachineBasicBlock *, 16> Visited;
SmallVector<MachineBasicBlock *, 8> Preds;
bool CheckPreds;
if (I == MBB->begin()) {
Visited.insert(MBB);
goto queue_preds;
} else
--I;
check_block:
Visited.insert(MBB);
if (I == MBB->end())
goto queue_preds;
CheckPreds = true;
for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
unsigned Opc = I->getOpcode();
if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
CheckPreds = false;
break;
}
if (I != BI && clobbersCTR(I)) {
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
MBB->getFullName() << ") instruction " << *I <<
" clobbers CTR, invalidating " << "BB#" <<
BI->getParent()->getNumber() << " (" <<
BI->getParent()->getFullName() << ") instruction " <<
*BI << "\n");
return false;
}
if (I == IE)
break;
}
if (!CheckPreds && Preds.empty())
return true;
if (CheckPreds) {
queue_preds:
if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
BI->getParent()->getNumber() << " (" <<
BI->getParent()->getFullName() << ") instruction " <<
*BI << "\n");
return false;
}
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PIE = MBB->pred_end(); PI != PIE; ++PI)
Preds.push_back(*PI);
}
do {
MBB = Preds.pop_back_val();
if (!Visited.count(MBB)) {
I = MBB->getLastNonDebugInstr();
goto check_block;
}
} while (!Preds.empty());
return true;
}
示例15: LLVM_DEBUG
// Handles special instruction operand like early clobbers and tied ops when
// there are additional physreg defines.
void RegAllocFast::handleThroughOperands(MachineInstr &MI,
SmallVectorImpl<unsigned> &VirtDead) {
LLVM_DEBUG(dbgs() << "Scanning for through registers:");
SmallSet<unsigned, 8> ThroughRegs;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
(MO.getSubReg() && MI.readsVirtualRegister(Reg))) {
if (ThroughRegs.insert(Reg).second)
LLVM_DEBUG(dbgs() << ' ' << printReg(Reg));
}
}
// If any physreg defines collide with preallocated through registers,
// we must spill and reallocate.
LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
markRegUsedInInstr(Reg);
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
if (ThroughRegs.count(PhysRegState[*AI]))
definePhysReg(MI, *AI, regFree);
}
}
SmallVector<unsigned, 8> PartialDefs;
LLVM_DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
if (!MO.isTied()) continue;
LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
<< ") is tied to operand " << MI.findTiedOperandIdx(I)
<< ".\n");
LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
MCPhysReg PhysReg = LR.PhysReg;
setPhysReg(MI, MO, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
} else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << '\n');
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
PartialDefs.push_back(LR.PhysReg);
}
}
LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n");
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!MO.isEarlyClobber())
continue;
// Note: defineVirtReg may invalidate MO.
MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, 0);
if (setPhysReg(MI, MI.getOperand(I), PhysReg))
VirtDead.push_back(Reg);
}
// Restore UsedInInstr to a state usable for allocating normal virtual uses.
UsedInInstr.clear();
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
<< " as used in instr\n");
markRegUsedInInstr(Reg);
}
// Also mark PartialDefs as used to avoid reallocation.
for (unsigned PartialDef : PartialDefs)
markRegUsedInInstr(PartialDef);
}