本文整理汇总了C++中SmallPtrSet::count方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallPtrSet::count方法的具体用法?C++ SmallPtrSet::count怎么用?C++ SmallPtrSet::count使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallPtrSet
的用法示例。
在下文中一共展示了SmallPtrSet::count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: findMatInsertPt
/// \brief Find an insertion point that dominates all uses.
Instruction *ConstantHoisting::
findConstantInsertionPoint(const ConstantInfo &ConstInfo) const {
assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry.");
// Collect all IDoms.
SmallPtrSet<BasicBlock *, 8> BBs;
for (auto const &RCI : ConstInfo.RebasedConstants)
BBs.insert(getIDom(RCI));
assert(!BBs.empty() && "No dominators!?");
if (BBs.count(Entry))
return &Entry->front();
while (BBs.size() >= 2) {
BasicBlock *BB, *BB1, *BB2;
BB1 = *BBs.begin();
BB2 = *std::next(BBs.begin());
BB = DT->findNearestCommonDominator(BB1, BB2);
if (BB == Entry)
return &Entry->front();
BBs.erase(BB1);
BBs.erase(BB2);
BBs.insert(BB);
}
assert((BBs.size() == 1) && "Expected only one element.");
Instruction &FirstInst = (*BBs.begin())->front();
return findMatInsertPt(&FirstInst);
}
示例2: runOnFunction
bool UnreachableBlockElim::runOnFunction(Function &F) {
SmallPtrSet<BasicBlock*, 8> Reachable;
// Mark all reachable blocks.
for (df_ext_iterator<Function*, SmallPtrSet<BasicBlock*, 8> > I =
df_ext_begin(&F, Reachable), E = df_ext_end(&F, Reachable); I != E; ++I)
/* Mark all reachable blocks */;
// Loop over all dead blocks, remembering them and deleting all instructions
// in them.
std::vector<BasicBlock*> DeadBlocks;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
if (!Reachable.count(I)) {
BasicBlock *BB = I;
DeadBlocks.push_back(BB);
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
PN->replaceAllUsesWith(Constant::getNullValue(PN->getType()));
BB->getInstList().pop_front();
}
for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
(*SI)->removePredecessor(BB);
BB->dropAllReferences();
}
// Actually remove the blocks now.
ProfileInfo *PI = getAnalysisIfAvailable<ProfileInfo>();
for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i) {
if (PI) PI->removeBlock(DeadBlocks[i]);
DeadBlocks[i]->eraseFromParent();
}
return DeadBlocks.size();
}
示例3: checkPHIOps
// Check PHI instructions at the beginning of MBB. It is assumed that
// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
SmallPtrSet<const MachineBasicBlock*, 8> seen;
for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end();
BBI != BBE && BBI->isPHI(); ++BBI) {
seen.clear();
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
const MachineBasicBlock *Pre = BBI->getOperand(i + 1).getMBB();
if (!Pre->isSuccessor(MBB))
continue;
seen.insert(Pre);
BBInfo &PrInfo = MBBInfoMap[Pre];
if (PrInfo.reachable && !PrInfo.isLiveOut(Reg))
report("PHI operand is not live-out from predecessor",
&BBI->getOperand(i), i);
}
// Did we see all predecessors?
for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
if (!seen.count(*PrI)) {
report("Missing PHI operand", BBI);
*OS << "BB#" << (*PrI)->getNumber()
<< " is a predecessor according to the CFG.\n";
}
}
}
}
示例4: EliminateMultipleEntryLoops
static void EliminateMultipleEntryLoops(MachineFunction &MF,
const MachineLoopInfo &MLI) {
SmallPtrSet<MachineBasicBlock *, 8> InSet;
for (scc_iterator<MachineFunction *> I = scc_begin(&MF), E = scc_end(&MF);
I != E; ++I) {
const std::vector<MachineBasicBlock *> &CurrentSCC = *I;
// Skip trivial SCCs.
if (CurrentSCC.size() == 1)
continue;
InSet.insert(CurrentSCC.begin(), CurrentSCC.end());
MachineBasicBlock *Header = nullptr;
for (MachineBasicBlock *MBB : CurrentSCC) {
for (MachineBasicBlock *Pred : MBB->predecessors()) {
if (InSet.count(Pred))
continue;
if (!Header) {
Header = MBB;
break;
}
// TODO: Implement multiple-entry loops.
report_fatal_error("multiple-entry loops are not supported yet");
}
}
assert(MLI.isLoopHeader(Header));
InSet.clear();
}
}
示例5: isLiveInButUnusedBefore
/// isLiveInButUnusedBefore - Return true if register is livein the MBB not
/// not used before it reaches the MI that defines register.
static bool isLiveInButUnusedBefore(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
const TargetRegisterInfo *TRI,
MachineRegisterInfo* MRI) {
// First check if register is livein.
bool isLiveIn = false;
for (MachineBasicBlock::const_livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I)
if (Reg == *I || TRI->isSuperRegister(Reg, *I)) {
isLiveIn = true;
break;
}
if (!isLiveIn)
return false;
// Is there any use of it before the specified MI?
SmallPtrSet<MachineInstr*, 4> UsesInMBB;
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
UE = MRI->use_end(); UI != UE; ++UI) {
MachineOperand &UseMO = UI.getOperand();
if (UseMO.isReg() && UseMO.isUndef())
continue;
MachineInstr *UseMI = &*UI;
if (UseMI->getParent() == MBB)
UsesInMBB.insert(UseMI);
}
if (UsesInMBB.empty())
return true;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MI; I != E; ++I)
if (UsesInMBB.count(&*I))
return false;
return true;
}
示例6: collectCastsToIgnore
/// Collect cast instructions that can be ignored in the vectorizer's cost
/// model, given a reduction exit value and the minimal type in which the
/// reduction can be represented.
static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit,
Type *RecurrenceType,
SmallPtrSetImpl<Instruction *> &Casts) {
SmallVector<Instruction *, 8> Worklist;
SmallPtrSet<Instruction *, 8> Visited;
Worklist.push_back(Exit);
while (!Worklist.empty()) {
Instruction *Val = Worklist.pop_back_val();
Visited.insert(Val);
if (auto *Cast = dyn_cast<CastInst>(Val))
if (Cast->getSrcTy() == RecurrenceType) {
// If the source type of a cast instruction is equal to the recurrence
// type, it will be eliminated, and should be ignored in the vectorizer
// cost model.
Casts.insert(Cast);
continue;
}
// Add all operands to the work list if they are loop-varying values that
// we haven't yet visited.
for (Value *O : cast<User>(Val)->operands())
if (auto *I = dyn_cast<Instruction>(O))
if (TheLoop->contains(I) && !Visited.count(I))
Worklist.push_back(I);
}
}
示例7: InsertRootInitializers
bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
unsigned Count) {
// Scroll past alloca instructions.
BasicBlock::iterator IP = F.getEntryBlock().begin();
while (isa<AllocaInst>(IP)) ++IP;
// Search for initializers in the initial BB.
SmallPtrSet<AllocaInst*,16> InitedRoots;
for (; !CouldBecomeSafePoint(IP); ++IP)
if (StoreInst *SI = dyn_cast<StoreInst>(IP))
if (AllocaInst *AI =
dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts()))
InitedRoots.insert(AI);
// Add root initializers.
bool MadeChange = false;
for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I)
if (!InitedRoots.count(*I)) {
StoreInst* SI = new StoreInst(ConstantPointerNull::get(cast<PointerType>(
cast<PointerType>((*I)->getType())->getElementType())),
*I);
SI->insertAfter(*I);
MadeChange = true;
}
return MadeChange;
}
示例8: InsertRootInitializers
bool LowerIntrinsics::InsertRootInitializers(Function &F, Instruction **Roots,
unsigned Count) {
// Scroll past alloca instructions.
BasicBlock::iterator IP = F.getEntryBlock().begin();
while (isa<AllocaInst>(IP)) ++IP;
// Search for initializers in the initial BB.
SmallPtrSet<AllocaInst*,16> InitedRoots;
for (; !CouldBecomeSafePoint(IP); ++IP)
if (StoreInst *SI = dyn_cast<StoreInst>(IP))
if (AllocaInst *AI =
dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts()))
InitedRoots.insert(AI);
// Add root initializers.
bool MadeChange = false;
for (Instruction **II = Roots, **IE = Roots + Count; II != IE; ++II) {
// Trace back through GEPs to find the actual alloca.
Instruction *I = *II;
while (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
I = cast<Instruction>(GEP->getPointerOperand());
AllocaInst *AI = cast<AllocaInst>(I);
if (!InitedRoots.count(AI)) {
Type *ElemTy = cast<PointerType>((*II)->getType())->getElementType();
PointerType *PElemTy = cast<PointerType>(ElemTy);
StoreInst* SI = new StoreInst(ConstantPointerNull::get(PElemTy), *II);
SI->insertAfter(*II);
MadeChange = true;
}
}
return MadeChange;
}
示例9: canPaddingBeAccessed
/// \brief Checks if the padding bytes of an argument could be accessed.
bool ArgPromotion::canPaddingBeAccessed(Argument *arg) {
assert(arg->hasByValAttr());
// Track all the pointers to the argument to make sure they are not captured.
SmallPtrSet<Value *, 16> PtrValues;
PtrValues.insert(arg);
// Track all of the stores.
SmallVector<StoreInst *, 16> Stores;
// Scan through the uses recursively to make sure the pointer is always used
// sanely.
SmallVector<Value *, 16> WorkList;
WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end());
while (!WorkList.empty()) {
Value *V = WorkList.back();
WorkList.pop_back();
if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
if (PtrValues.insert(V).second)
WorkList.insert(WorkList.end(), V->user_begin(), V->user_end());
} else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
Stores.push_back(Store);
} else if (!isa<LoadInst>(V)) {
return true;
}
}
// Check to make sure the pointers aren't captured
for (StoreInst *Store : Stores)
if (PtrValues.count(Store->getValueOperand()))
return true;
return false;
}
示例10: runOnFunction
bool CfgNaive::runOnFunction(Function &F) {
errs() << F.getName() << "\n";
SmallPtrSet<BasicBlock*, 8> visitedBlocks;
// Mark all reachable blocks.
for (df_ext_iterator<Function*, SmallPtrSet<BasicBlock*, 8>> currentBlock = df_ext_begin(&F, visitedBlocks),
endBlock = df_ext_end(&F, visitedBlocks);
currentBlock != endBlock;
currentBlock++) {
//do nothing, iterator marks visited nodes automatically
}
// Build set of unreachable blocks
std::vector<BasicBlock*> unreachableBlocks;
for (Function::iterator currentBlock = F.begin(), endBlock = F.end(); currentBlock != endBlock; currentBlock++) {
if (visitedBlocks.count(currentBlock) == 0) {
unreachableBlocks.push_back(currentBlock);
}
}
// Remove unreachable blocks
for (int i = 0, e = unreachableBlocks.size(); i != e; i++) {
errs() << unreachableBlocks[i]->getName() << " is unreachable\n";
unreachableBlocks[i]->eraseFromParent();
}
bool hasModifiedBlocks = (unreachableBlocks.size() > 0);
return hasModifiedBlocks;
}
示例11: bothUsedInPHI
static bool bothUsedInPHI(const MachineBasicBlock &A,
const SmallPtrSet<MachineBasicBlock *, 8> &SuccsB) {
for (MachineBasicBlock *BB : A.successors())
if (SuccsB.count(BB) && !BB->empty() && BB->begin()->isPHI())
return true;
return false;
}
示例12: runOnMachineFunction
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MRI = &mf.getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
const unsigned NumRegs = TRI->getNumRegs();
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
PHIVarInfo.resize(MF->getNumBlockIDs());
PHIJoins.clear();
// FIXME: LiveIntervals will be updated to remove its dependence on
// LiveVariables to improve compilation time and eliminate bizarre pass
// dependencies. Until then, we can't change much in -O0.
if (!MRI->isSSA())
report_fatal_error("regalloc=... not currently supported with -O0");
analyzePHINodes(mf);
// Calculate live variable information in depth first order on the CFG of the
// function. This guarantees that we will see the definition of a virtual
// register before its uses due to dominance properties of SSA (except for PHI
// nodes, which are treated as a special case).
MachineBasicBlock *Entry = &MF->front();
SmallPtrSet<MachineBasicBlock*,16> Visited;
for (MachineBasicBlock *MBB : depth_first_ext(Entry, Visited)) {
runOnBlock(MBB, NumRegs);
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
}
// Convert and transfer the dead / killed information we have gathered into
// VirtRegInfo onto MI's.
for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j)
if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg))
VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI);
else
VirtRegInfo[Reg].Kills[j]->addRegisterKilled(Reg, TRI);
}
// Check to make sure there are no unreachable blocks in the MC CFG for the
// function. If so, it is due to a bug in the instruction selector or some
// other part of the code generator if this happens.
#ifndef NDEBUG
for(MachineFunction::iterator i = MF->begin(), e = MF->end(); i != e; ++i)
assert(Visited.count(&*i) != 0 && "unreachable basic block found");
#endif
PhysRegDef.clear();
PhysRegUse.clear();
PHIVarInfo.clear();
return false;
}
示例13: isProfitableToCSE
/// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
/// common expression that defines Reg.
bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI) {
// FIXME: Heuristics that works around the lack the live range splitting.
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
if (MI->getDesc().isAsCheapAsAMove()) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
if (CSBB != BB && !CSBB->isSuccessor(BB))
return false;
}
// Heuristics #2: If the expression doesn't not use a vr and the only use
// of the redundant computation are copies, do not cse.
bool HasVRegUse = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
HasVRegUse = true;
break;
}
}
if (!HasVRegUse) {
bool HasNonCopyUse = false;
for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
E = MRI->use_nodbg_end(); I != E; ++I) {
MachineInstr *Use = &*I;
// Ignore copies.
if (!Use->isCopyLike()) {
HasNonCopyUse = true;
break;
}
}
if (!HasNonCopyUse)
return false;
}
// Heuristics #3: If the common subexpression is used by PHIs, do not reuse
// it unless the defined value is already used in the BB of the new use.
bool HasPHI = false;
SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
E = MRI->use_nodbg_end(); I != E; ++I) {
MachineInstr *Use = &*I;
HasPHI |= Use->isPHI();
CSBBs.insert(Use->getParent());
}
if (!HasPHI)
return true;
return CSBBs.count(MI->getParent());
}
示例14: ComputeLiveInBlocks
/// DetermineInsertionPoint - At this point, we're committed to promoting the
/// alloca using IDF's, and the standard SSA construction algorithm. Determine
/// which blocks need phi nodes and see if we can optimize out some work by
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
AllocaInfo &Info) {
// Unique the set of defining blocks for efficient lookup.
SmallPtrSet<BasicBlock*, 32> DefBlocks;
DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
// Determine which blocks the value is live in. These are blocks which lead
// to uses.
SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
// Compute the locations where PhiNodes need to be inserted. Look at the
// dominance frontier of EACH basic-block we have a write in.
unsigned CurrentVersion = 0;
SmallPtrSet<PHINode*, 16> InsertedPHINodes;
std::vector<std::pair<unsigned, BasicBlock*> > DFBlocks;
while (!Info.DefiningBlocks.empty()) {
BasicBlock *BB = Info.DefiningBlocks.back();
Info.DefiningBlocks.pop_back();
// Look up the DF for this write, add it to defining blocks.
DominanceFrontier::const_iterator it = DF.find(BB);
if (it == DF.end()) continue;
const DominanceFrontier::DomSetType &S = it->second;
// In theory we don't need the indirection through the DFBlocks vector.
// In practice, the order of calling QueuePhiNode would depend on the
// (unspecified) ordering of basic blocks in the dominance frontier,
// which would give PHI nodes non-determinstic subscripts. Fix this by
// processing blocks in order of the occurance in the function.
for (DominanceFrontier::DomSetType::const_iterator P = S.begin(),
PE = S.end(); P != PE; ++P) {
// If the frontier block is not in the live-in set for the alloca, don't
// bother processing it.
if (!LiveInBlocks.count(*P))
continue;
DFBlocks.push_back(std::make_pair(BBNumbers[*P], *P));
}
// Sort by which the block ordering in the function.
if (DFBlocks.size() > 1)
std::sort(DFBlocks.begin(), DFBlocks.end());
for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) {
BasicBlock *BB = DFBlocks[i].second;
if (QueuePhiNode(BB, AllocaNum, CurrentVersion, InsertedPHINodes))
Info.DefiningBlocks.push_back(BB);
}
DFBlocks.clear();
}
}
示例15: while
/// Recursively traverse the conformance lists to determine sole conforming
/// class, struct or enum type.
NominalTypeDecl *
ProtocolConformanceAnalysis::findSoleConformingType(ProtocolDecl *Protocol) {
/// First check in the SoleConformingTypeCache.
auto SoleConformingTypeIt = SoleConformingTypeCache.find(Protocol);
if (SoleConformingTypeIt != SoleConformingTypeCache.end())
return SoleConformingTypeIt->second;
SmallVector<ProtocolDecl *, 8> PDWorkList;
SmallPtrSet<ProtocolDecl *, 8> VisitedPDs;
NominalTypeDecl *SoleConformingNTD = nullptr;
PDWorkList.push_back(Protocol);
while (!PDWorkList.empty()) {
auto *PD = PDWorkList.pop_back_val();
// Protocols must have internal or lower access.
if (PD->getEffectiveAccess() > AccessLevel::Internal) {
return nullptr;
}
VisitedPDs.insert(PD);
auto NTDList = getConformances(PD);
for (auto *ConformingNTD : NTDList) {
// Recurse on protocol types.
if (auto *Proto = dyn_cast<ProtocolDecl>(ConformingNTD)) {
// Ignore visited protocol decls.
if (!VisitedPDs.count(Proto))
PDWorkList.push_back(Proto);
} else { // Classes, Structs and Enums are added here.
// Bail if more than one conforming types were found.
if (SoleConformingNTD && ConformingNTD != SoleConformingNTD) {
return nullptr;
} else {
SoleConformingNTD = ConformingNTD;
}
}
}
}
// Bail if we did not find a sole conforming type.
if (!SoleConformingNTD)
return nullptr;
// Generic declarations are ignored.
if (SoleConformingNTD->isGenericContext()) {
return nullptr;
}
// Populate SoleConformingTypeCache.
SoleConformingTypeCache.insert(std::pair<ProtocolDecl *, NominalTypeDecl *>(
Protocol, SoleConformingNTD));
// Return SoleConformingNTD.
return SoleConformingNTD;
}