本文整理汇总了C++中SmallSetVector::pop_back_val方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallSetVector::pop_back_val方法的具体用法?C++ SmallSetVector::pop_back_val怎么用?C++ SmallSetVector::pop_back_val使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallSetVector
的用法示例。
在下文中一共展示了SmallSetVector::pop_back_val方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: propagateSiblingValue
/// propagateSiblingValue - Propagate the value in SVI to dependents if it is
/// known. Otherwise remember the dependency for later.
///
/// @param SVIIter SibValues entry to propagate.
/// @param VNI Dependent value, or NULL to propagate to all saved dependents.
void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,
VNInfo *VNI) {
SibValueMap::value_type *SVI = &*SVIIter;
// When VNI is non-NULL, add it to SVI's deps, and only propagate to that.
TinyPtrVector<VNInfo*> FirstDeps;
if (VNI) {
FirstDeps.push_back(VNI);
SVI->second.Deps.push_back(VNI);
}
// Has the value been completely determined yet? If not, defer propagation.
if (!SVI->second.hasDef())
return;
// Work list of values to propagate.
SmallSetVector<SibValueMap::value_type *, 8> WorkList;
WorkList.insert(SVI);
do {
SVI = WorkList.pop_back_val();
TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;
VNI = 0;
SibValueInfo &SV = SVI->second;
if (!SV.SpillMBB)
SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def);
DEBUG(dbgs() << " prop to " << Deps->size() << ": "
<< SVI->first->id << '@' << SVI->first->def << ":\t" << SV);
assert(SV.hasDef() && "Propagating undefined value");
// Should this value be propagated as a preferred spill candidate? We don't
// propagate values of registers that are about to spill.
bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg);
unsigned SpillDepth = ~0u;
for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(),
DepE = Deps->end(); DepI != DepE; ++DepI) {
SibValueMap::iterator DepSVI = SibValues.find(*DepI);
assert(DepSVI != SibValues.end() && "Dependent value not in SibValues");
SibValueInfo &DepSV = DepSVI->second;
if (!DepSV.SpillMBB)
DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def);
bool Changed = false;
// Propagate defining instruction.
if (!DepSV.hasDef()) {
Changed = true;
DepSV.DefMI = SV.DefMI;
DepSV.DefByOrigPHI = SV.DefByOrigPHI;
}
// Propagate AllDefsAreReloads. For PHI values, this computes an AND of
// all predecessors.
if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) {
Changed = true;
DepSV.AllDefsAreReloads = false;
}
// Propagate best spill value.
if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) {
if (SV.SpillMBB == DepSV.SpillMBB) {
// DepSV is in the same block. Hoist when dominated.
if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) {
// This is an alternative def earlier in the same MBB.
// Hoist the spill as far as possible in SpillMBB. This can ease
// register pressure:
//
// x = def
// y = use x
// s = copy x
//
// Hoisting the spill of s to immediately after the def removes the
// interference between x and y:
//
// x = def
// spill x
// y = use x<kill>
//
// This hoist only helps when the DepSV copy kills its source.
Changed = true;
DepSV.SpillReg = SV.SpillReg;
DepSV.SpillVNI = SV.SpillVNI;
DepSV.SpillMBB = SV.SpillMBB;
}
} else {
// DepSV is in a different block.
if (SpillDepth == ~0u)
SpillDepth = Loops.getLoopDepth(SV.SpillMBB);
// Also hoist spills to blocks with smaller loop depth, but make sure
// that the new value dominates. Non-phi dependents are always
//.........这里部分代码省略.........
示例2: Analyzer
/// \brief Figure out if the loop is worth full unrolling.
///
/// Complete loop unrolling can make some loads constant, and we need to know
/// if that would expose any further optimization opportunities. This routine
/// estimates this optimization. It computes cost of unrolled loop
/// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
/// dynamic cost we mean that we won't count costs of blocks that are known not
/// to be executed (i.e. if we have a branch in the loop and we know that at the
/// given iteration its condition would be resolved to true, we won't add up the
/// cost of the 'false'-block).
/// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
/// the analysis failed (no benefits expected from the unrolling, or the loop is
/// too big to analyze), the returned value is None.
static Optional<EstimatedUnrollCost>
analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, DominatorTree &DT,
ScalarEvolution &SE, const TargetTransformInfo &TTI,
int MaxUnrolledLoopSize) {
// We want to be able to scale offsets by the trip count and add more offsets
// to them without checking for overflows, and we already don't want to
// analyze *massive* trip counts, so we force the max to be reasonably small.
assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) &&
"The unroll iterations max is too large!");
// Only analyze inner loops. We can't properly estimate cost of nested loops
// and we won't visit inner loops again anyway.
if (!L->empty())
return None;
// Don't simulate loops with a big or unknown tripcount
if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
TripCount > UnrollMaxIterationsCountToAnalyze)
return None;
SmallSetVector<BasicBlock *, 16> BBWorklist;
SmallSetVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitWorklist;
DenseMap<Value *, Constant *> SimplifiedValues;
SmallVector<std::pair<Value *, Constant *>, 4> SimplifiedInputValues;
// The estimated cost of the unrolled form of the loop. We try to estimate
// this by simplifying as much as we can while computing the estimate.
int UnrolledCost = 0;
// We also track the estimated dynamic (that is, actually executed) cost in
// the rolled form. This helps identify cases when the savings from unrolling
// aren't just exposing dead control flows, but actual reduced dynamic
// instructions due to the simplifications which we expect to occur after
// unrolling.
int RolledDynamicCost = 0;
// We track the simplification of each instruction in each iteration. We use
// this to recursively merge costs into the unrolled cost on-demand so that
// we don't count the cost of any dead code. This is essentially a map from
// <instruction, int> to <bool, bool>, but stored as a densely packed struct.
DenseSet<UnrolledInstState, UnrolledInstStateKeyInfo> InstCostMap;
// A small worklist used to accumulate cost of instructions from each
// observable and reached root in the loop.
SmallVector<Instruction *, 16> CostWorklist;
// PHI-used worklist used between iterations while accumulating cost.
SmallVector<Instruction *, 4> PHIUsedList;
// Helper function to accumulate cost for instructions in the loop.
auto AddCostRecursively = [&](Instruction &RootI, int Iteration) {
assert(Iteration >= 0 && "Cannot have a negative iteration!");
assert(CostWorklist.empty() && "Must start with an empty cost list");
assert(PHIUsedList.empty() && "Must start with an empty phi used list");
CostWorklist.push_back(&RootI);
for (;; --Iteration) {
do {
Instruction *I = CostWorklist.pop_back_val();
// InstCostMap only uses I and Iteration as a key, the other two values
// don't matter here.
auto CostIter = InstCostMap.find({I, Iteration, 0, 0});
if (CostIter == InstCostMap.end())
// If an input to a PHI node comes from a dead path through the loop
// we may have no cost data for it here. What that actually means is
// that it is free.
continue;
auto &Cost = *CostIter;
if (Cost.IsCounted)
// Already counted this instruction.
continue;
// Mark that we are counting the cost of this instruction now.
Cost.IsCounted = true;
// If this is a PHI node in the loop header, just add it to the PHI set.
if (auto *PhiI = dyn_cast<PHINode>(I))
if (PhiI->getParent() == L->getHeader()) {
assert(Cost.IsFree && "Loop PHIs shouldn't be evaluated as they "
"inherently simplify during unrolling.");
if (Iteration == 0)
continue;
// Push the incoming value from the backedge into the PHI used list
// if it is an in-loop instruction. We'll use this to populate the
// cost worklist for the next iteration (as we count backwards).
if (auto *OpI = dyn_cast<Instruction>(
//.........这里部分代码省略.........
示例3: SortBlocks
//.........这里部分代码省略.........
for (MachineBasicBlock *Succ : MBB->successors()) {
// Ignore backedges.
if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
continue;
// Decrement the predecessor count. If it's now zero, it's ready.
if (--NumPredsLeft[Succ->getNumber()] == 0)
Preferred.push(Succ);
}
// Determine the block to follow MBB. First try to find a preferred block,
// to preserve the original block order when possible.
MachineBasicBlock *Next = nullptr;
while (!Preferred.empty()) {
Next = Preferred.top();
Preferred.pop();
// If X isn't dominated by the top active loop header, defer it until that
// loop is done.
if (!Loops.empty() &&
!MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
Loops.back().Deferred.push_back(Next);
Next = nullptr;
continue;
}
// If Next was originally ordered before MBB, and it isn't because it was
// loop-rotated above the header, it's not preferred.
if (Next->getNumber() < MBB->getNumber() &&
(!L || !L->contains(Next) ||
L->getHeader()->getNumber() < Next->getNumber())) {
Ready.push(Next);
Next = nullptr;
continue;
}
break;
}
// If we didn't find a suitable block in the Preferred list, check the
// general Ready list.
if (!Next) {
// If there are no more blocks to process, we're done.
if (Ready.empty()) {
MaybeUpdateTerminator(MBB);
break;
}
for (;;) {
Next = Ready.top();
Ready.pop();
// If Next isn't dominated by the top active loop header, defer it until
// that loop is done.
if (!Loops.empty() &&
!MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
Loops.back().Deferred.push_back(Next);
continue;
}
break;
}
}
// Move the next block into place and iterate.
Next->moveAfter(MBB);
MaybeUpdateTerminator(MBB);
MBB = Next;
}
assert(Loops.empty() && "Active loop list not finished");
MF.RenumberBlocks();
#ifndef NDEBUG
SmallSetVector<MachineLoop *, 8> OnStack;
// Insert a sentinel representing the degenerate loop that starts at the
// function entry block and includes the entire function as a "loop" that
// executes once.
OnStack.insert(nullptr);
for (auto &MBB : MF) {
assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
MachineLoop *Loop = MLI.getLoopFor(&MBB);
if (Loop && &MBB == Loop->getHeader()) {
// Loop header. The loop predecessor should be sorted above, and the other
// predecessors should be backedges below.
for (auto Pred : MBB.predecessors())
assert(
(Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
"Loop header predecessors must be loop predecessors or backedges");
assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
} else {
// Not a loop header. All predecessors should be sorted above.
for (auto Pred : MBB.predecessors())
assert(Pred->getNumber() < MBB.getNumber() &&
"Non-loop-header predecessors should be topologically sorted");
assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
"Blocks must be nested in their loops");
}
while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back()))
OnStack.pop_back();
}
assert(OnStack.pop_back_val() == nullptr &&
"The function entry block shouldn't actually be a loop header");
assert(OnStack.empty() &&
"Control flow stack pushes and pops should be balanced.");
#endif
}
示例4: SortBlocks
/// Sort the blocks in RPO, taking special care to make sure that loops are
/// contiguous even in the case of split backedges.
///
/// TODO: Determine whether RPO is actually worthwhile, or whether we should
/// move to just a stable-topological-sort-based approach that would preserve
/// more of the original order.
static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI) {
// Note that we do our own RPO rather than using
// "llvm/ADT/PostOrderIterator.h" because we want control over the order that
// successors are visited in (see above). Also, we can sort the blocks in the
// MachineFunction as we go.
SmallPtrSet<MachineBasicBlock *, 16> Visited;
SmallVector<POStackEntry, 16> Stack;
MachineBasicBlock *EntryBlock = &*MF.begin();
Visited.insert(EntryBlock);
Stack.push_back(POStackEntry(EntryBlock, MF, MLI));
for (;;) {
POStackEntry &Entry = Stack.back();
SmallVectorImpl<MachineBasicBlock *> &Succs = Entry.Succs;
if (!Succs.empty()) {
MachineBasicBlock *Succ = Succs.pop_back_val();
if (Visited.insert(Succ).second)
Stack.push_back(POStackEntry(Succ, MF, MLI));
continue;
}
// Put the block in its position in the MachineFunction.
MachineBasicBlock &MBB = *Entry.MBB;
MBB.moveBefore(&*MF.begin());
// Branch instructions may utilize a fallthrough, so update them if a
// fallthrough has been added or removed.
if (!MBB.empty() && MBB.back().isTerminator() && !MBB.back().isBranch() &&
!MBB.back().isBarrier())
report_fatal_error(
"Non-branch terminator with fallthrough cannot yet be rewritten");
if (MBB.empty() || !MBB.back().isTerminator() || MBB.back().isBranch())
MBB.updateTerminator();
Stack.pop_back();
if (Stack.empty())
break;
}
// Now that we've sorted the blocks in RPO, renumber them.
MF.RenumberBlocks();
#ifndef NDEBUG
SmallSetVector<MachineLoop *, 8> OnStack;
// Insert a sentinel representing the degenerate loop that starts at the
// function entry block and includes the entire function as a "loop" that
// executes once.
OnStack.insert(nullptr);
for (auto &MBB : MF) {
assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
MachineLoop *Loop = MLI.getLoopFor(&MBB);
if (Loop && &MBB == Loop->getHeader()) {
// Loop header. The loop predecessor should be sorted above, and the other
// predecessors should be backedges below.
for (auto Pred : MBB.predecessors())
assert(
(Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
"Loop header predecessors must be loop predecessors or backedges");
assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
} else {
// Not a loop header. All predecessors should be sorted above.
for (auto Pred : MBB.predecessors())
assert(Pred->getNumber() < MBB.getNumber() &&
"Non-loop-header predecessors should be topologically sorted");
assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
"Blocks must be nested in their loops");
}
while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back()))
OnStack.pop_back();
}
assert(OnStack.pop_back_val() == nullptr &&
"The function entry block shouldn't actually be a loop header");
assert(OnStack.empty() &&
"Control flow stack pushes and pops should be balanced.");
#endif
}