本文整理汇总了C++中SmallSetVector类的典型用法代码示例。如果您正苦于以下问题:C++ SmallSetVector类的具体用法?C++ SmallSetVector怎么用?C++ SmallSetVector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SmallSetVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: isFunctionMallocLike
/// Tests whether a function is "malloc-like".
///
/// A function is "malloc-like" if it returns either null or a pointer that
/// doesn't alias any other pointer visible to the caller.
static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
SmallSetVector<Value *, 8> FlowsToReturn;
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I)
if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator()))
FlowsToReturn.insert(Ret->getReturnValue());
for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
Value *RetVal = FlowsToReturn[i];
if (Constant *C = dyn_cast<Constant>(RetVal)) {
if (!C->isNullValue() && !isa<UndefValue>(C))
return false;
continue;
}
if (isa<Argument>(RetVal))
return false;
if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
switch (RVI->getOpcode()) {
// Extend the analysis by looking upwards.
case Instruction::BitCast:
case Instruction::GetElementPtr:
case Instruction::AddrSpaceCast:
FlowsToReturn.insert(RVI->getOperand(0));
continue;
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(RVI);
FlowsToReturn.insert(SI->getTrueValue());
FlowsToReturn.insert(SI->getFalseValue());
continue;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(RVI);
for (Value *IncValue : PN->incoming_values())
FlowsToReturn.insert(IncValue);
continue;
}
// Check whether the pointer came from an allocation.
case Instruction::Alloca:
break;
case Instruction::Call:
case Instruction::Invoke: {
CallSite CS(RVI);
if (CS.paramHasAttr(0, Attribute::NoAlias))
break;
if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction()))
break;
} // fall-through
default:
return false; // Did not come from an allocation.
}
if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
return false;
}
return true;
}
示例2: LLVM_DEBUG
// Analyze interleaved accesses and collect them into interleaved load and
// store groups.
//
// When generating code for an interleaved load group, we effectively hoist all
// loads in the group to the location of the first load in program order. When
// generating code for an interleaved store group, we sink all stores to the
// location of the last store. This code motion can change the order of load
// and store instructions and may break dependences.
//
// The code generation strategy mentioned above ensures that we won't violate
// any write-after-read (WAR) dependences.
//
// E.g., for the WAR dependence: a = A[i]; // (1)
// A[i] = b; // (2)
//
// The store group of (2) is always inserted at or below (2), and the load
// group of (1) is always inserted at or above (1). Thus, the instructions will
// never be reordered. All other dependences are checked to ensure the
// correctness of the instruction reordering.
//
// The algorithm visits all memory accesses in the loop in bottom-up program
// order. Program order is established by traversing the blocks in the loop in
// reverse postorder when collecting the accesses.
//
// We visit the memory accesses in bottom-up order because it can simplify the
// construction of store groups in the presence of write-after-write (WAW)
// dependences.
//
// E.g., for the WAW dependence: A[i] = a; // (1)
// A[i] = b; // (2)
// A[i + 1] = c; // (3)
//
// We will first create a store group with (3) and (2). (1) can't be added to
// this group because it and (2) are dependent. However, (1) can be grouped
// with other accesses that may precede it in program order. Note that a
// bottom-up order does not imply that WAW dependences should not be checked.
void InterleavedAccessInfo::analyzeInterleaving(
bool EnablePredicatedInterleavedMemAccesses) {
LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
const ValueToValueMap &Strides = LAI->getSymbolicStrides();
// Holds all accesses with a constant stride.
MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
collectConstStrideAccesses(AccessStrideInfo, Strides);
if (AccessStrideInfo.empty())
return;
// Collect the dependences in the loop.
collectDependences();
// Holds all interleaved store groups temporarily.
SmallSetVector<InterleaveGroup *, 4> StoreGroups;
// Holds all interleaved load groups temporarily.
SmallSetVector<InterleaveGroup *, 4> LoadGroups;
// Search in bottom-up program order for pairs of accesses (A and B) that can
// form interleaved load or store groups. In the algorithm below, access A
// precedes access B in program order. We initialize a group for B in the
// outer loop of the algorithm, and then in the inner loop, we attempt to
// insert each A into B's group if:
//
// 1. A and B have the same stride,
// 2. A and B have the same memory object size, and
// 3. A belongs in B's group according to its distance from B.
//
// Special care is taken to ensure group formation will not break any
// dependences.
for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
BI != E; ++BI) {
Instruction *B = BI->first;
StrideDescriptor DesB = BI->second;
// Initialize a group for B if it has an allowable stride. Even if we don't
// create a group for B, we continue with the bottom-up algorithm to ensure
// we don't break any of B's dependences.
InterleaveGroup *Group = nullptr;
if (isStrided(DesB.Stride) &&
(!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
Group = getInterleaveGroup(B);
if (!Group) {
LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
<< '\n');
Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
}
if (B->mayWriteToMemory())
StoreGroups.insert(Group);
else
LoadGroups.insert(Group);
}
for (auto AI = std::next(BI); AI != E; ++AI) {
Instruction *A = AI->first;
StrideDescriptor DesA = AI->second;
// Our code motion strategy implies that we can't have dependences
// between accesses in an interleaved group and other accesses located
// between the first and last member of the group. Note that this also
// means that a group can't have more than one member at a given offset.
// The accesses in a group can have dependences with other accesses, but
//.........这里部分代码省略.........
示例3: assert
/// UpdateSuccessorsPHIs - After FromBB is tail duplicated into its predecessor
/// blocks, the successors have gained new predecessors. Update the PHI
/// instructions in them accordingly.
void
TailDuplicatePass::UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
SmallVector<MachineBasicBlock*, 8> &TDBBs,
SmallSetVector<MachineBasicBlock*,8> &Succs) {
for (SmallSetVector<MachineBasicBlock*, 8>::iterator SI = Succs.begin(),
SE = Succs.end(); SI != SE; ++SI) {
MachineBasicBlock *SuccBB = *SI;
for (MachineBasicBlock::iterator II = SuccBB->begin(), EE = SuccBB->end();
II != EE; ++II) {
if (!II->isPHI())
break;
unsigned Idx = 0;
for (unsigned i = 1, e = II->getNumOperands(); i != e; i += 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
Idx = i;
break;
}
}
assert(Idx != 0);
MachineOperand &MO0 = II->getOperand(Idx);
unsigned Reg = MO0.getReg();
if (isDead) {
// Folded into the previous BB.
// There could be duplicate phi source entries. FIXME: Should sdisel
// or earlier pass fixed this?
for (unsigned i = II->getNumOperands()-2; i != Idx; i -= 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
II->RemoveOperand(i+1);
II->RemoveOperand(i);
}
}
} else
Idx = 0;
// If Idx is set, the operands at Idx and Idx+1 must be removed.
// We reuse the location to avoid expensive RemoveOperand calls.
DenseMap<unsigned,AvailableValsTy>::iterator LI=SSAUpdateVals.find(Reg);
if (LI != SSAUpdateVals.end()) {
// This register is defined in the tail block.
for (unsigned j = 0, ee = LI->second.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = LI->second[j].first;
unsigned SrcReg = LI->second[j].second;
if (Idx != 0) {
II->getOperand(Idx).setReg(SrcReg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
II->addOperand(MachineOperand::CreateReg(SrcReg, false));
II->addOperand(MachineOperand::CreateMBB(SrcBB));
}
}
} else {
// Live in tail block, must also be live in predecessors.
for (unsigned j = 0, ee = TDBBs.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = TDBBs[j];
if (Idx != 0) {
II->getOperand(Idx).setReg(Reg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
II->addOperand(MachineOperand::CreateReg(Reg, false));
II->addOperand(MachineOperand::CreateMBB(SrcBB));
}
}
}
if (Idx != 0) {
II->RemoveOperand(Idx+1);
II->RemoveOperand(Idx);
}
}
}
}
示例4: report_fatal_error
bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies; // Candidates for deletion
DenseMap<unsigned, MachineInstr*> AvailCopyMap; // Def -> available copies map
DenseMap<unsigned, MachineInstr*> CopyMap; // Def -> copies map
SourceMap SrcMap; // Src -> Def map
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
MachineInstr *MI = &*I;
++I;
if (MI->isCopy()) {
unsigned Def = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isVirtualRegister(Def) ||
TargetRegisterInfo::isVirtualRegister(Src))
report_fatal_error("MachineCopyPropagation should be run after"
" register allocation!");
DenseMap<unsigned, MachineInstr*>::iterator CI = AvailCopyMap.find(Src);
if (CI != AvailCopyMap.end()) {
MachineInstr *CopyMI = CI->second;
if (!MRI->isReserved(Def) &&
(!MRI->isReserved(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
isNopCopy(CopyMI, Def, Src, TRI)) {
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
// %ECX<def> = COPY %EAX<kill>
// ... nothing clobbered EAX.
// %EAX<def> = COPY %ECX
// =>
// %ECX<def> = COPY %EAX
//
// Also avoid eliminating a copy from reserved registers unless the
// definition is proven not clobbered. e.g.
// %RSP<def> = COPY %RAX
// CALL
// %RAX<def> = COPY %RSP
// Clear any kills of Def between CopyMI and MI. This extends the
// live range.
for (MachineBasicBlock::iterator I = CopyMI, E = MI; I != E; ++I)
I->clearRegisterKills(Def, TRI);
removeCopy(MI);
Changed = true;
++NumDeletes;
continue;
}
}
// If Src is defined by a previous copy, it cannot be eliminated.
for (MCRegAliasIterator AI(Src, TRI, true); AI.isValid(); ++AI) {
CI = CopyMap.find(*AI);
if (CI != CopyMap.end())
MaybeDeadCopies.remove(CI->second);
}
// Copy is now a candidate for deletion.
MaybeDeadCopies.insert(MI);
// If 'Src' is previously source of another copy, then this earlier copy's
// source is no longer available. e.g.
// %xmm9<def> = copy %xmm2
// ...
// %xmm2<def> = copy %xmm0
// ...
// %xmm2<def> = copy %xmm9
SourceNoLongerAvailable(Def, SrcMap, AvailCopyMap);
// Remember Def is defined by the copy.
// ... Make sure to clear the def maps of aliases first.
for (MCRegAliasIterator AI(Def, TRI, false); AI.isValid(); ++AI) {
CopyMap.erase(*AI);
AvailCopyMap.erase(*AI);
}
CopyMap[Def] = MI;
AvailCopyMap[Def] = MI;
for (MCSubRegIterator SR(Def, TRI); SR.isValid(); ++SR) {
CopyMap[*SR] = MI;
AvailCopyMap[*SR] = MI;
}
// Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation.
if (std::find(SrcMap[Src].begin(), SrcMap[Src].end(), Def) ==
SrcMap[Src].end()) {
SrcMap[Src].push_back(Def);
}
continue;
}
// Not a copy.
SmallVector<unsigned, 2> Defs;
int RegMaskOpNum = -1;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
//.........这里部分代码省略.........
示例5: formLCSSAForInstructions
/// For every instruction from the worklist, check to see if it has any uses
/// that are outside the current loop. If so, insert LCSSA PHI nodes and
/// rewrite the uses.
bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
DominatorTree &DT, LoopInfo &LI) {
SmallVector<Use *, 16> UsesToRewrite;
SmallSetVector<PHINode *, 16> PHIsToRemove;
PredIteratorCache PredCache;
bool Changed = false;
// Cache the Loop ExitBlocks across this loop. We expect to get a lot of
// instructions within the same loops, computing the exit blocks is
// expensive, and we're not mutating the loop structure.
SmallDenseMap<Loop*, SmallVector<BasicBlock *,1>> LoopExitBlocks;
while (!Worklist.empty()) {
UsesToRewrite.clear();
Instruction *I = Worklist.pop_back_val();
BasicBlock *InstBB = I->getParent();
Loop *L = LI.getLoopFor(InstBB);
if (!LoopExitBlocks.count(L))
L->getExitBlocks(LoopExitBlocks[L]);
assert(LoopExitBlocks.count(L));
const SmallVectorImpl<BasicBlock *> &ExitBlocks = LoopExitBlocks[L];
if (ExitBlocks.empty())
continue;
// Tokens cannot be used in PHI nodes, so we skip over them.
// We can run into tokens which are live out of a loop with catchswitch
// instructions in Windows EH if the catchswitch has one catchpad which
// is inside the loop and another which is not.
if (I->getType()->isTokenTy())
continue;
for (Use &U : I->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
BasicBlock *UserBB = User->getParent();
if (PHINode *PN = dyn_cast<PHINode>(User))
UserBB = PN->getIncomingBlock(U);
if (InstBB != UserBB && !L->contains(UserBB))
UsesToRewrite.push_back(&U);
}
// If there are no uses outside the loop, exit with no change.
if (UsesToRewrite.empty())
continue;
++NumLCSSA; // We are applying the transformation
// Invoke instructions are special in that their result value is not
// available along their unwind edge. The code below tests to see whether
// DomBB dominates the value, so adjust DomBB to the normal destination
// block, which is effectively where the value is first usable.
BasicBlock *DomBB = InstBB;
if (InvokeInst *Inv = dyn_cast<InvokeInst>(I))
DomBB = Inv->getNormalDest();
DomTreeNode *DomNode = DT.getNode(DomBB);
SmallVector<PHINode *, 16> AddedPHIs;
SmallVector<PHINode *, 8> PostProcessPHIs;
SmallVector<PHINode *, 4> InsertedPHIs;
SSAUpdater SSAUpdate(&InsertedPHIs);
SSAUpdate.Initialize(I->getType(), I->getName());
// Insert the LCSSA phi's into all of the exit blocks dominated by the
// value, and add them to the Phi's map.
for (BasicBlock *ExitBB : ExitBlocks) {
if (!DT.dominates(DomNode, DT.getNode(ExitBB)))
continue;
// If we already inserted something for this BB, don't reprocess it.
if (SSAUpdate.HasValueForBlock(ExitBB))
continue;
PHINode *PN = PHINode::Create(I->getType(), PredCache.size(ExitBB),
I->getName() + ".lcssa", &ExitBB->front());
// Add inputs from inside the loop for this PHI.
for (BasicBlock *Pred : PredCache.get(ExitBB)) {
PN->addIncoming(I, Pred);
// If the exit block has a predecessor not within the loop, arrange for
// the incoming value use corresponding to that predecessor to be
// rewritten in terms of a different LCSSA PHI.
if (!L->contains(Pred))
UsesToRewrite.push_back(
&PN->getOperandUse(PN->getOperandNumForIncomingValue(
PN->getNumIncomingValues() - 1)));
}
AddedPHIs.push_back(PN);
// Remember that this phi makes the value alive in this block.
SSAUpdate.AddAvailableValue(ExitBB, PN);
//.........这里部分代码省略.........
示例6: runInternal
void AAEvaluator::runInternal(Function &F, AAResults &AA) {
const DataLayout &DL = F.getParent()->getDataLayout();
++FunctionCount;
SetVector<Value *> Pointers;
SmallSetVector<CallBase *, 16> Calls;
SetVector<Value *> Loads;
SetVector<Value *> Stores;
for (auto &I : F.args())
if (I.getType()->isPointerTy()) // Add all pointer arguments.
Pointers.insert(&I);
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
if (I->getType()->isPointerTy()) // Add all pointer instructions.
Pointers.insert(&*I);
if (EvalAAMD && isa<LoadInst>(&*I))
Loads.insert(&*I);
if (EvalAAMD && isa<StoreInst>(&*I))
Stores.insert(&*I);
Instruction &Inst = *I;
if (auto *Call = dyn_cast<CallBase>(&Inst)) {
Value *Callee = Call->getCalledValue();
// Skip actual functions for direct function calls.
if (!isa<Function>(Callee) && isInterestingPointer(Callee))
Pointers.insert(Callee);
// Consider formals.
for (Use &DataOp : Call->data_ops())
if (isInterestingPointer(DataOp))
Pointers.insert(DataOp);
Calls.insert(Call);
} else {
// Consider all operands.
for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
OI != OE; ++OI)
if (isInterestingPointer(*OI))
Pointers.insert(*OI);
}
}
if (PrintAll || PrintNoAlias || PrintMayAlias || PrintPartialAlias ||
PrintMustAlias || PrintNoModRef || PrintMod || PrintRef || PrintModRef)
errs() << "Function: " << F.getName() << ": " << Pointers.size()
<< " pointers, " << Calls.size() << " call sites\n";
// iterate over the worklist, and run the full (n^2)/2 disambiguations
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
auto I1Size = LocationSize::unknown();
Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized())
I1Size = LocationSize::precise(DL.getTypeStoreSize(I1ElTy));
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
auto I2Size = LocationSize::unknown();
Type *I2ElTy = cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized())
I2Size = LocationSize::precise(DL.getTypeStoreSize(I2ElTy));
AliasResult AR = AA.alias(*I1, I1Size, *I2, I2Size);
switch (AR) {
case NoAlias:
PrintResults(AR, PrintNoAlias, *I1, *I2, F.getParent());
++NoAliasCount;
break;
case MayAlias:
PrintResults(AR, PrintMayAlias, *I1, *I2, F.getParent());
++MayAliasCount;
break;
case PartialAlias:
PrintResults(AR, PrintPartialAlias, *I1, *I2, F.getParent());
++PartialAliasCount;
break;
case MustAlias:
PrintResults(AR, PrintMustAlias, *I1, *I2, F.getParent());
++MustAliasCount;
break;
}
}
}
if (EvalAAMD) {
// iterate over all pairs of load, store
for (Value *Load : Loads) {
for (Value *Store : Stores) {
AliasResult AR = AA.alias(MemoryLocation::get(cast<LoadInst>(Load)),
MemoryLocation::get(cast<StoreInst>(Store)));
switch (AR) {
case NoAlias:
PrintLoadStoreResults(AR, PrintNoAlias, Load, Store, F.getParent());
++NoAliasCount;
break;
case MayAlias:
PrintLoadStoreResults(AR, PrintMayAlias, Load, Store, F.getParent());
++MayAliasCount;
break;
case PartialAlias:
PrintLoadStoreResults(AR, PrintPartialAlias, Load, Store, F.getParent());
++PartialAliasCount;
//.........这里部分代码省略.........
示例7: assert
LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
using Node = LazyCallGraph::Node;
using Edge = LazyCallGraph::Edge;
using SCC = LazyCallGraph::SCC;
using RefSCC = LazyCallGraph::RefSCC;
RefSCC &InitialRC = InitialC.getOuterRefSCC();
SCC *C = &InitialC;
RefSCC *RC = &InitialRC;
Function &F = N.getFunction();
// Walk the function body and build up the set of retained, promoted, and
// demoted edges.
SmallVector<Constant *, 16> Worklist;
SmallPtrSet<Constant *, 16> Visited;
SmallPtrSet<Node *, 16> RetainedEdges;
SmallSetVector<Node *, 4> PromotedRefTargets;
SmallSetVector<Node *, 4> DemotedCallTargets;
// First walk the function and handle all called functions. We do this first
// because if there is a single call edge, whether there are ref edges is
// irrelevant.
for (Instruction &I : instructions(F))
if (auto CS = CallSite(&I))
if (Function *Callee = CS.getCalledFunction())
if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
Node &CalleeN = *G.lookup(*Callee);
Edge *E = N->lookup(CalleeN);
// FIXME: We should really handle adding new calls. While it will
// make downstream usage more complex, there is no fundamental
// limitation and it will allow passes within the CGSCC to be a bit
// more flexible in what transforms they can do. Until then, we
// verify that new calls haven't been introduced.
assert(E && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as "
"promoted existing ref edges!");
bool Inserted = RetainedEdges.insert(&CalleeN).second;
(void)Inserted;
assert(Inserted && "We should never visit a function twice.");
if (!E->isCall())
PromotedRefTargets.insert(&CalleeN);
}
// Now walk all references.
for (Instruction &I : instructions(F))
for (Value *Op : I.operand_values())
if (auto *C = dyn_cast<Constant>(Op))
if (Visited.insert(C).second)
Worklist.push_back(C);
auto VisitRef = [&](Function &Referee) {
Node &RefereeN = *G.lookup(Referee);
Edge *E = N->lookup(RefereeN);
// FIXME: Similarly to new calls, we also currently preclude
// introducing new references. See above for details.
assert(E && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which "
"function passes aren't allowed to do!");
bool Inserted = RetainedEdges.insert(&RefereeN).second;
(void)Inserted;
assert(Inserted && "We should never visit a function twice.");
if (E->isCall())
DemotedCallTargets.insert(&RefereeN);
};
LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);
// Include synthetic reference edges to known, defined lib functions.
for (auto *F : G.getLibFunctions())
// While the list of lib functions doesn't have repeats, don't re-visit
// anything handled above.
if (!Visited.count(F))
VisitRef(*F);
// First remove all of the edges that are no longer present in this function.
// The first step makes these edges uniformly ref edges and accumulates them
// into a separate data structure so removal doesn't invalidate anything.
SmallVector<Node *, 4> DeadTargets;
for (Edge &E : *N) {
if (RetainedEdges.count(&E.getNode()))
continue;
SCC &TargetC = *G.lookupSCC(E.getNode());
RefSCC &TargetRC = TargetC.getOuterRefSCC();
if (&TargetRC == RC && E.isCall()) {
if (C != &TargetC) {
// For separate SCCs this is trivial.
RC->switchTrivialInternalEdgeToRef(N, E.getNode());
} else {
// Now update the call graph.
C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
G, N, C, AM, UR);
}
}
// Now that this is ready for actual removal, put it into our list.
DeadTargets.push_back(&E.getNode());
}
// Remove the easy cases quickly and actually pull them out of our list.
//.........这里部分代码省略.........
示例8: while
bool AArch64RedundantCopyElimination::optimizeCopy(MachineBasicBlock *MBB) {
// Check if the current basic block has a single predecessor.
if (MBB->pred_size() != 1)
return false;
MachineBasicBlock *PredMBB = *MBB->pred_begin();
MachineBasicBlock::iterator CompBr = PredMBB->getLastNonDebugInstr();
if (CompBr == PredMBB->end() || PredMBB->succ_size() != 2)
return false;
++CompBr;
do {
--CompBr;
if (guaranteesZeroRegInBlock(*CompBr, MBB))
break;
} while (CompBr != PredMBB->begin() && CompBr->isTerminator());
// We've not found a CBZ/CBNZ, time to bail out.
if (!guaranteesZeroRegInBlock(*CompBr, MBB))
return false;
unsigned TargetReg = CompBr->getOperand(0).getReg();
if (!TargetReg)
return false;
assert(TargetRegisterInfo::isPhysicalRegister(TargetReg) &&
"Expect physical register");
// Remember all registers aliasing with TargetReg.
SmallSetVector<unsigned, 8> TargetRegs;
for (MCRegAliasIterator AI(TargetReg, TRI, true); AI.isValid(); ++AI)
TargetRegs.insert(*AI);
bool Changed = false;
MachineBasicBlock::iterator LastChange = MBB->begin();
unsigned SmallestDef = TargetReg;
// Remove redundant Copy instructions unless TargetReg is modified.
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
MachineInstr *MI = &*I;
++I;
if (MI->isCopy() && MI->getOperand(0).isReg() &&
MI->getOperand(1).isReg()) {
unsigned DefReg = MI->getOperand(0).getReg();
unsigned SrcReg = MI->getOperand(1).getReg();
if ((SrcReg == AArch64::XZR || SrcReg == AArch64::WZR) &&
!MRI->isReserved(DefReg) &&
(TargetReg == DefReg || TRI->isSuperRegister(DefReg, TargetReg))) {
DEBUG(dbgs() << "Remove redundant Copy : ");
DEBUG((MI)->print(dbgs()));
MI->eraseFromParent();
Changed = true;
LastChange = I;
NumCopiesRemoved++;
SmallestDef =
TRI->isSubRegister(SmallestDef, DefReg) ? DefReg : SmallestDef;
continue;
}
}
if (MI->modifiesRegister(TargetReg, TRI))
break;
}
if (!Changed)
return false;
// Otherwise, we have to fixup the use-def chain, starting with the
// CBZ/CBNZ. Conservatively mark as much as we can live.
CompBr->clearRegisterKills(SmallestDef, TRI);
if (std::none_of(TargetRegs.begin(), TargetRegs.end(),
[&](unsigned Reg) { return MBB->isLiveIn(Reg); }))
MBB->addLiveIn(TargetReg);
// Clear any kills of TargetReg between CompBr and the last removed COPY.
for (MachineInstr &MMI :
make_range(MBB->begin()->getIterator(), LastChange->getIterator()))
MMI.clearRegisterKills(SmallestDef, TRI);
return true;
}
示例9: MIB
/// After FromBB is tail duplicated into its predecessor blocks, the successors
/// have gained new predecessors. Update the PHI instructions in them
/// accordingly.
void
TailDuplicatePass::UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallSetVector<MachineBasicBlock*,8> &Succs) {
for (SmallSetVector<MachineBasicBlock*, 8>::iterator SI = Succs.begin(),
SE = Succs.end(); SI != SE; ++SI) {
MachineBasicBlock *SuccBB = *SI;
for (MachineBasicBlock::iterator II = SuccBB->begin(), EE = SuccBB->end();
II != EE; ++II) {
if (!II->isPHI())
break;
MachineInstrBuilder MIB(*FromBB->getParent(), II);
unsigned Idx = 0;
for (unsigned i = 1, e = II->getNumOperands(); i != e; i += 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
Idx = i;
break;
}
}
assert(Idx != 0);
MachineOperand &MO0 = II->getOperand(Idx);
unsigned Reg = MO0.getReg();
if (isDead) {
// Folded into the previous BB.
// There could be duplicate phi source entries. FIXME: Should sdisel
// or earlier pass fixed this?
for (unsigned i = II->getNumOperands()-2; i != Idx; i -= 2) {
MachineOperand &MO = II->getOperand(i+1);
if (MO.getMBB() == FromBB) {
II->RemoveOperand(i+1);
II->RemoveOperand(i);
}
}
} else
Idx = 0;
// If Idx is set, the operands at Idx and Idx+1 must be removed.
// We reuse the location to avoid expensive RemoveOperand calls.
DenseMap<unsigned,AvailableValsTy>::iterator LI=SSAUpdateVals.find(Reg);
if (LI != SSAUpdateVals.end()) {
// This register is defined in the tail block.
for (unsigned j = 0, ee = LI->second.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = LI->second[j].first;
// If we didn't duplicate a bb into a particular predecessor, we
// might still have added an entry to SSAUpdateVals to correcly
// recompute SSA. If that case, avoid adding a dummy extra argument
// this PHI.
if (!SrcBB->isSuccessor(SuccBB))
continue;
unsigned SrcReg = LI->second[j].second;
if (Idx != 0) {
II->getOperand(Idx).setReg(SrcReg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
MIB.addReg(SrcReg).addMBB(SrcBB);
}
}
} else {
// Live in tail block, must also be live in predecessors.
for (unsigned j = 0, ee = TDBBs.size(); j != ee; ++j) {
MachineBasicBlock *SrcBB = TDBBs[j];
if (Idx != 0) {
II->getOperand(Idx).setReg(Reg);
II->getOperand(Idx+1).setMBB(SrcBB);
Idx = 0;
} else {
MIB.addReg(Reg).addMBB(SrcBB);
}
}
}
if (Idx != 0) {
II->RemoveOperand(Idx+1);
II->RemoveOperand(Idx);
}
}
}
}
示例10: categorizeModuleFlagNodes
/// linkModuleFlagsMetadata - Merge the linker flags in Src into the Dest
/// module.
bool ModuleLinker::linkModuleFlagsMetadata() {
const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata();
if (!SrcModFlags) return false;
NamedMDNode *DstModFlags = DstM->getOrInsertModuleFlagsMetadata();
// If the destination module doesn't have module flags yet, then just copy
// over the source module's flags.
if (DstModFlags->getNumOperands() == 0) {
for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I)
DstModFlags->addOperand(SrcModFlags->getOperand(I));
return false;
}
bool HasErr = false;
// Otherwise, we have to merge them based on their behaviors. First,
// categorize all of the nodes in the modules' module flags. If an error or
// warning occurs, then emit the appropriate message(s).
DenseMap<MDString*, MDNode*> ErrorNode;
DenseMap<MDString*, MDNode*> WarningNode;
DenseMap<MDString*, MDNode*> OverrideNode;
DenseMap<MDString*, SmallSetVector<MDNode*, 8> > RequireNodes;
SmallSetVector<MDString*, 16> SeenIDs;
HasErr |= categorizeModuleFlagNodes(SrcModFlags, ErrorNode, WarningNode,
OverrideNode, RequireNodes, SeenIDs);
HasErr |= categorizeModuleFlagNodes(DstModFlags, ErrorNode, WarningNode,
OverrideNode, RequireNodes, SeenIDs);
// Check that there isn't both an error and warning node for a flag.
for (SmallSetVector<MDString*, 16>::iterator
I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
MDString *ID = *I;
if (ErrorNode[ID] && WarningNode[ID])
HasErr = emitError("linking module flags '" + ID->getString() +
"': IDs have conflicting behaviors");
}
// Early exit if we had an error.
if (HasErr) return true;
// Get the destination's module flags ready for new operands.
DstModFlags->dropAllReferences();
// Add all of the module flags to the destination module.
DenseMap<MDString*, SmallVector<MDNode*, 4> > AddedNodes;
for (SmallSetVector<MDString*, 16>::iterator
I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
MDString *ID = *I;
if (OverrideNode[ID]) {
DstModFlags->addOperand(OverrideNode[ID]);
AddedNodes[ID].push_back(OverrideNode[ID]);
} else if (ErrorNode[ID]) {
DstModFlags->addOperand(ErrorNode[ID]);
AddedNodes[ID].push_back(ErrorNode[ID]);
} else if (WarningNode[ID]) {
DstModFlags->addOperand(WarningNode[ID]);
AddedNodes[ID].push_back(WarningNode[ID]);
}
for (SmallSetVector<MDNode*, 8>::iterator
II = RequireNodes[ID].begin(), IE = RequireNodes[ID].end();
II != IE; ++II)
DstModFlags->addOperand(*II);
}
// Now check that all of the requirements have been satisfied.
for (SmallSetVector<MDString*, 16>::iterator
I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
MDString *ID = *I;
SmallSetVector<MDNode*, 8> &Set = RequireNodes[ID];
for (SmallSetVector<MDNode*, 8>::iterator
II = Set.begin(), IE = Set.end(); II != IE; ++II) {
MDNode *Node = *II;
assert(isa<MDNode>(Node->getOperand(2)) &&
"Module flag's third operand must be an MDNode!");
MDNode *Val = cast<MDNode>(Node->getOperand(2));
MDString *ReqID = cast<MDString>(Val->getOperand(0));
Value *ReqVal = Val->getOperand(1);
bool HasValue = false;
for (SmallVectorImpl<MDNode*>::iterator
RI = AddedNodes[ReqID].begin(), RE = AddedNodes[ReqID].end();
RI != RE; ++RI) {
MDNode *ReqNode = *RI;
if (ReqNode->getOperand(2) == ReqVal) {
HasValue = true;
break;
}
}
if (!HasValue)
HasErr = emitError("linking module flags '" + ReqID->getString() +
"': does not have the required value");
//.........这里部分代码省略.........
示例11: setupEntryBlockAndCallSites
/// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
/// the function context and marking the call sites with the appropriate
/// values. These values are used by the DWARF EH emitter.
bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
SmallVector<ReturnInst *, 16> Returns;
SmallVector<InvokeInst *, 16> Invokes;
SmallSetVector<LandingPadInst *, 16> LPads;
// Look through the terminators of the basic blocks to find invokes.
for (BasicBlock &BB : F)
if (auto *II = dyn_cast<InvokeInst>(BB.getTerminator())) {
if (Function *Callee = II->getCalledFunction())
if (Callee->getIntrinsicID() == Intrinsic::donothing) {
// Remove the NOP invoke.
BranchInst::Create(II->getNormalDest(), II);
II->eraseFromParent();
continue;
}
Invokes.push_back(II);
LPads.insert(II->getUnwindDest()->getLandingPadInst());
} else if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) {
Returns.push_back(RI);
}
if (Invokes.empty())
return false;
NumInvokes += Invokes.size();
lowerIncomingArguments(F);
lowerAcrossUnwindEdges(F, Invokes);
Value *FuncCtx =
setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
BasicBlock *EntryBB = &F.front();
IRBuilder<> Builder(EntryBB->getTerminator());
// Get a reference to the jump buffer.
Value *JBufPtr =
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 5, "jbuf_gep");
// Save the frame pointer.
Value *FramePtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 0,
"jbuf_fp_gep");
Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp");
Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true);
// Save the stack pointer.
Value *StackPtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 2,
"jbuf_sp_gep");
Val = Builder.CreateCall(StackAddrFn, {}, "sp");
Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);
// Call the setup_dispatch instrinsic. It fills in the rest of the jmpbuf.
Builder.CreateCall(BuiltinSetupDispatchFn, {});
// Store a pointer to the function context so that the back-end will know
// where to look for it.
Value *FuncCtxArg = Builder.CreateBitCast(FuncCtx, Builder.getInt8PtrTy());
Builder.CreateCall(FuncCtxFn, FuncCtxArg);
// At this point, we are all set up, update the invoke instructions to mark
// their call_site values.
for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
insertCallSiteStore(Invokes[I], I + 1);
ConstantInt *CallSiteNum =
ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);
// Record the call site value for the back end so it stays associated with
// the invoke.
CallInst::Create(CallSiteFn, CallSiteNum, "", Invokes[I]);
}
// Mark call instructions that aren't nounwind as no-action (call_site ==
// -1). Skip the entry block, as prior to then, no function context has been
// created for this function and any unexpected exceptions thrown will go
// directly to the caller's context, which is what we want anyway, so no need
// to do anything here.
for (BasicBlock &BB : F) {
if (&BB == &F.front())
continue;
for (Instruction &I : BB)
if (I.mayThrow())
insertCallSiteStore(&I, -1);
}
// Register the function context and make sure it's known to not throw
CallInst *Register =
CallInst::Create(RegisterFn, FuncCtx, "", EntryBB->getTerminator());
Register->setDoesNotThrow();
// Following any allocas not in the entry block, update the saved SP in the
// jmpbuf to the new value.
for (BasicBlock &BB : F) {
if (&BB == &F.front())
continue;
//.........这里部分代码省略.........
示例12: handleEndBlock
/// Remove dead stores to stack-allocated locations in the function end block.
/// Ex:
/// %A = alloca i32
/// ...
/// store i32 1, i32* %A
/// ret void
static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
MemoryDependenceResults *MD,
const TargetLibraryInfo *TLI,
InstOverlapIntervalsTy &IOL,
DenseMap<Instruction*, size_t> *InstrOrdering) {
bool MadeChange = false;
// Keep track of all of the stack objects that are dead at the end of the
// function.
SmallSetVector<Value*, 16> DeadStackObjects;
// Find all of the alloca'd pointers in the entry block.
BasicBlock &Entry = BB.getParent()->front();
for (Instruction &I : Entry) {
if (isa<AllocaInst>(&I))
DeadStackObjects.insert(&I);
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
DeadStackObjects.insert(&I);
}
// Treat byval or inalloca arguments the same, stores to them are dead at the
// end of the function.
for (Argument &AI : BB.getParent()->args())
if (AI.hasByValOrInAllocaAttr())
DeadStackObjects.insert(&AI);
const DataLayout &DL = BB.getModule()->getDataLayout();
// Scan the basic block backwards
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
--BBI;
// If we find a store, check to see if it points into a dead stack value.
if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
for (Value *Pointer : Pointers)
if (!DeadStackObjects.count(Pointer)) {
AllDead = false;
break;
}
if (AllDead) {
Instruction *Dead = &*BBI;
DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
<< *Dead << "\n Objects: ";
for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
E = Pointers.end(); I != E; ++I) {
dbgs() << **I;
if (std::next(I) != E)
dbgs() << ", ";
}
dbgs() << '\n');
// DCE instructions only used to calculate that store.
deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
++NumFastStores;
MadeChange = true;
continue;
}
}
// Remove any dead non-memory-mutating instructions.
if (isInstructionTriviallyDead(&*BBI, TLI)) {
DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
<< *&*BBI << '\n');
deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
++NumFastOther;
MadeChange = true;
continue;
}
if (isa<AllocaInst>(BBI)) {
// Remove allocas from the list of dead stack objects; there can't be
// any references before the definition.
DeadStackObjects.remove(&*BBI);
continue;
}
if (auto CS = CallSite(&*BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
if (isAllocLikeFn(&*BBI, TLI))
DeadStackObjects.remove(&*BBI);
// If this call does not access memory, it can't be loading any of our
//.........这里部分代码省略.........
示例13: SortBlocks
/// Sort the blocks in RPO, taking special care to make sure that loops are
/// contiguous even in the case of split backedges.
///
/// TODO: Determine whether RPO is actually worthwhile, or whether we should
/// move to just a stable-topological-sort-based approach that would preserve
/// more of the original order.
static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI) {
// Note that we do our own RPO rather than using
// "llvm/ADT/PostOrderIterator.h" because we want control over the order that
// successors are visited in (see above). Also, we can sort the blocks in the
// MachineFunction as we go.
SmallPtrSet<MachineBasicBlock *, 16> Visited;
SmallVector<POStackEntry, 16> Stack;
MachineBasicBlock *EntryBlock = &*MF.begin();
Visited.insert(EntryBlock);
Stack.push_back(POStackEntry(EntryBlock, MF, MLI));
for (;;) {
POStackEntry &Entry = Stack.back();
SmallVectorImpl<MachineBasicBlock *> &Succs = Entry.Succs;
if (!Succs.empty()) {
MachineBasicBlock *Succ = Succs.pop_back_val();
if (Visited.insert(Succ).second)
Stack.push_back(POStackEntry(Succ, MF, MLI));
continue;
}
// Put the block in its position in the MachineFunction.
MachineBasicBlock &MBB = *Entry.MBB;
MBB.moveBefore(&*MF.begin());
// Branch instructions may utilize a fallthrough, so update them if a
// fallthrough has been added or removed.
if (!MBB.empty() && MBB.back().isTerminator() && !MBB.back().isBranch() &&
!MBB.back().isBarrier())
report_fatal_error(
"Non-branch terminator with fallthrough cannot yet be rewritten");
if (MBB.empty() || !MBB.back().isTerminator() || MBB.back().isBranch())
MBB.updateTerminator();
Stack.pop_back();
if (Stack.empty())
break;
}
// Now that we've sorted the blocks in RPO, renumber them.
MF.RenumberBlocks();
#ifndef NDEBUG
SmallSetVector<MachineLoop *, 8> OnStack;
// Insert a sentinel representing the degenerate loop that starts at the
// function entry block and includes the entire function as a "loop" that
// executes once.
OnStack.insert(nullptr);
for (auto &MBB : MF) {
assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
MachineLoop *Loop = MLI.getLoopFor(&MBB);
if (Loop && &MBB == Loop->getHeader()) {
// Loop header. The loop predecessor should be sorted above, and the other
// predecessors should be backedges below.
for (auto Pred : MBB.predecessors())
assert(
(Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
"Loop header predecessors must be loop predecessors or backedges");
assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
} else {
// Not a loop header. All predecessors should be sorted above.
for (auto Pred : MBB.predecessors())
assert(Pred->getNumber() < MBB.getNumber() &&
"Non-loop-header predecessors should be topologically sorted");
assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
"Blocks must be nested in their loops");
}
while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back()))
OnStack.pop_back();
}
assert(OnStack.pop_back_val() == nullptr &&
"The function entry block shouldn't actually be a loop header");
assert(OnStack.empty() &&
"Control flow stack pushes and pops should be balanced.");
#endif
}
示例14: isReturnNonNull
/// Tests whether this function is known to not return null.
///
/// Requires that the function returns a pointer.
///
/// Returns true if it believes the function will not return a null, and sets
/// \p Speculative based on whether the returned conclusion is a speculative
/// conclusion due to SCC calls.
static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
const TargetLibraryInfo &TLI, bool &Speculative) {
assert(F->getReturnType()->isPointerTy() &&
"nonnull only meaningful on pointer types");
Speculative = false;
SmallSetVector<Value *, 8> FlowsToReturn;
for (BasicBlock &BB : *F)
if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
FlowsToReturn.insert(Ret->getReturnValue());
for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
Value *RetVal = FlowsToReturn[i];
// If this value is locally known to be non-null, we're good
if (isKnownNonNull(RetVal, &TLI))
continue;
// Otherwise, we need to look upwards since we can't make any local
// conclusions.
Instruction *RVI = dyn_cast<Instruction>(RetVal);
if (!RVI)
return false;
switch (RVI->getOpcode()) {
// Extend the analysis by looking upwards.
case Instruction::BitCast:
case Instruction::GetElementPtr:
case Instruction::AddrSpaceCast:
FlowsToReturn.insert(RVI->getOperand(0));
continue;
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(RVI);
FlowsToReturn.insert(SI->getTrueValue());
FlowsToReturn.insert(SI->getFalseValue());
continue;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(RVI);
for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
FlowsToReturn.insert(PN->getIncomingValue(i));
continue;
}
case Instruction::Call:
case Instruction::Invoke: {
CallSite CS(RVI);
Function *Callee = CS.getCalledFunction();
// A call to a node within the SCC is assumed to return null until
// proven otherwise
if (Callee && SCCNodes.count(Callee)) {
Speculative = true;
continue;
}
return false;
}
default:
return false; // Unknown source, may be null
};
llvm_unreachable("should have either continued or returned");
}
return true;
}
示例15: stringErr
/// Merge the linker flags in Src into the Dest module.
Error IRLinker::linkModuleFlagsMetadata() {
// If the source module has no module flags, we are done.
const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata();
if (!SrcModFlags)
return Error::success();
// If the destination module doesn't have module flags yet, then just copy
// over the source module's flags.
NamedMDNode *DstModFlags = DstM.getOrInsertModuleFlagsMetadata();
if (DstModFlags->getNumOperands() == 0) {
for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I)
DstModFlags->addOperand(SrcModFlags->getOperand(I));
return Error::success();
}
// First build a map of the existing module flags and requirements.
DenseMap<MDString *, std::pair<MDNode *, unsigned>> Flags;
SmallSetVector<MDNode *, 16> Requirements;
for (unsigned I = 0, E = DstModFlags->getNumOperands(); I != E; ++I) {
MDNode *Op = DstModFlags->getOperand(I);
ConstantInt *Behavior = mdconst::extract<ConstantInt>(Op->getOperand(0));
MDString *ID = cast<MDString>(Op->getOperand(1));
if (Behavior->getZExtValue() == Module::Require) {
Requirements.insert(cast<MDNode>(Op->getOperand(2)));
} else {
Flags[ID] = std::make_pair(Op, I);
}
}
// Merge in the flags from the source module, and also collect its set of
// requirements.
for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I) {
MDNode *SrcOp = SrcModFlags->getOperand(I);
ConstantInt *SrcBehavior =
mdconst::extract<ConstantInt>(SrcOp->getOperand(0));
MDString *ID = cast<MDString>(SrcOp->getOperand(1));
MDNode *DstOp;
unsigned DstIndex;
std::tie(DstOp, DstIndex) = Flags.lookup(ID);
unsigned SrcBehaviorValue = SrcBehavior->getZExtValue();
// If this is a requirement, add it and continue.
if (SrcBehaviorValue == Module::Require) {
// If the destination module does not already have this requirement, add
// it.
if (Requirements.insert(cast<MDNode>(SrcOp->getOperand(2)))) {
DstModFlags->addOperand(SrcOp);
}
continue;
}
// If there is no existing flag with this ID, just add it.
if (!DstOp) {
Flags[ID] = std::make_pair(SrcOp, DstModFlags->getNumOperands());
DstModFlags->addOperand(SrcOp);
continue;
}
// Otherwise, perform a merge.
ConstantInt *DstBehavior =
mdconst::extract<ConstantInt>(DstOp->getOperand(0));
unsigned DstBehaviorValue = DstBehavior->getZExtValue();
// If either flag has override behavior, handle it first.
if (DstBehaviorValue == Module::Override) {
// Diagnose inconsistent flags which both have override behavior.
if (SrcBehaviorValue == Module::Override &&
SrcOp->getOperand(2) != DstOp->getOperand(2))
return stringErr("linking module flags '" + ID->getString() +
"': IDs have conflicting override values");
continue;
} else if (SrcBehaviorValue == Module::Override) {
// Update the destination flag to that of the source.
DstModFlags->setOperand(DstIndex, SrcOp);
Flags[ID].first = SrcOp;
continue;
}
// Diagnose inconsistent merge behavior types.
if (SrcBehaviorValue != DstBehaviorValue)
return stringErr("linking module flags '" + ID->getString() +
"': IDs have conflicting behaviors");
auto replaceDstValue = [&](MDNode *New) {
Metadata *FlagOps[] = {DstOp->getOperand(0), ID, New};
MDNode *Flag = MDNode::get(DstM.getContext(), FlagOps);
DstModFlags->setOperand(DstIndex, Flag);
Flags[ID].first = Flag;
};
// Perform the merge for standard behavior types.
switch (SrcBehaviorValue) {
case Module::Require:
case Module::Override:
llvm_unreachable("not possible");
case Module::Error: {
// Emit an error if the values differ.
//.........这里部分代码省略.........