本文整理汇总了C++中SmallVector类的典型用法代码示例。如果您正苦于以下问题:C++ SmallVector类的具体用法?C++ SmallVector怎么用?C++ SmallVector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SmallVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EmitCMP
SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
DebugLoc dl = Op.getDebugLoc();
// If we are doing an AND and testing against zero, then the CMP
// will not be generated. The AND (or BIT) will generate the condition codes,
// but they are different from CMP.
// FIXME: since we're doing a post-processing, use a pseudoinstr here, so
// lowering & isel wouldn't diverge.
bool andCC = false;
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (RHSC->isNullValue() && LHS.hasOneUse() &&
(LHS.getOpcode() == ISD::AND ||
(LHS.getOpcode() == ISD::TRUNCATE &&
LHS.getOperand(0).getOpcode() == ISD::AND))) {
andCC = true;
}
}
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDValue TargetCC;
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
// Get the condition codes directly from the status register, if its easy.
// Otherwise a branch will be generated. Note that the AND and BIT
// instructions generate different flags than CMP, the carry bit can be used
// for NE/EQ.
bool Invert = false;
bool Shift = false;
bool Convert = true;
switch (cast<ConstantSDNode>(TargetCC)->getZExtValue()) {
default:
Convert = false;
break;
case MSP430CC::COND_HS:
// Res = SRW & 1, no processing is required
break;
case MSP430CC::COND_LO:
// Res = ~(SRW & 1)
Invert = true;
break;
case MSP430CC::COND_NE:
if (andCC) {
// C = ~Z, thus Res = SRW & 1, no processing is required
} else {
// Res = ~((SRW >> 1) & 1)
Shift = true;
Invert = true;
}
break;
case MSP430CC::COND_E:
Shift = true;
// C = ~Z for AND instruction, thus we can put Res = ~(SRW & 1), however,
// Res = (SRW >> 1) & 1 is 1 word shorter.
break;
}
EVT VT = Op.getValueType();
SDValue One = DAG.getConstant(1, VT);
if (Convert) {
SDValue SR = DAG.getCopyFromReg(DAG.getEntryNode(), dl, MSP430::SRW,
MVT::i16, Flag);
if (Shift)
// FIXME: somewhere this is turned into a SRL, lower it MSP specific?
SR = DAG.getNode(ISD::SRA, dl, MVT::i16, SR, One);
SR = DAG.getNode(ISD::AND, dl, MVT::i16, SR, One);
if (Invert)
SR = DAG.getNode(ISD::XOR, dl, MVT::i16, SR, One);
return SR;
} else {
SDValue Zero = DAG.getConstant(0, VT);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(One);
Ops.push_back(Zero);
Ops.push_back(TargetCC);
Ops.push_back(Flag);
return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, &Ops[0], Ops.size());
}
}
示例2: switch
llvm::Constant *
CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
const VTableComponent *Components,
unsigned NumComponents,
const VTableLayout::VTableThunkTy *VTableThunks,
unsigned NumVTableThunks) {
SmallVector<llvm::Constant *, 64> Inits;
llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
QualType ClassType = CGM.getContext().getTagDeclType(RD);
llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
unsigned NextVTableThunkIndex = 0;
llvm::Constant* PureVirtualFn = 0;
for (unsigned I = 0; I != NumComponents; ++I) {
VTableComponent Component = Components[I];
llvm::Constant *Init = 0;
switch (Component.getKind()) {
case VTableComponent::CK_VCallOffset:
Init = llvm::ConstantInt::get(PtrDiffTy,
Component.getVCallOffset().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_VBaseOffset:
Init = llvm::ConstantInt::get(PtrDiffTy,
Component.getVBaseOffset().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_OffsetToTop:
Init = llvm::ConstantInt::get(PtrDiffTy,
Component.getOffsetToTop().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_RTTI:
Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
break;
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
case VTableComponent::CK_DeletingDtorPointer: {
GlobalDecl GD;
// Get the right global decl.
switch (Component.getKind()) {
default:
llvm_unreachable("Unexpected vtable component kind");
case VTableComponent::CK_FunctionPointer:
GD = Component.getFunctionDecl();
break;
case VTableComponent::CK_CompleteDtorPointer:
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
break;
case VTableComponent::CK_DeletingDtorPointer:
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
break;
}
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
llvm::FunctionType *Ty =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
CGM.Int8PtrTy);
}
Init = PureVirtualFn;
} else {
// Check if we should use a thunk.
if (NextVTableThunkIndex < NumVTableThunks &&
VTableThunks[NextVTableThunkIndex].first == I) {
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
MaybeEmitThunkAvailableExternally(GD, Thunk);
Init = CGM.GetAddrOfThunk(GD, Thunk);
NextVTableThunkIndex++;
} else {
llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
}
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
}
break;
}
case VTableComponent::CK_UnusedFunctionPointer:
Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
break;
};
//.........这里部分代码省略.........
示例3: emitImplicitValueConstructor
static void emitImplicitValueConstructor(SILGenFunction &SGF,
ConstructorDecl *ctor) {
RegularLocation Loc(ctor);
Loc.markAutoGenerated();
// FIXME: Handle 'self' along with the other arguments.
auto *paramList = ctor->getParameters();
auto *selfDecl = ctor->getImplicitSelfDecl();
auto selfTyCan = selfDecl->getType();
auto selfIfaceTyCan = selfDecl->getInterfaceType();
SILType selfTy = SGF.getLoweredType(selfTyCan);
// Emit the indirect return argument, if any.
SILValue resultSlot;
if (selfTy.isAddressOnly(SGF.SGM.M) && SGF.silConv.useLoweredAddresses()) {
auto &AC = SGF.getASTContext();
auto VD = new (AC) ParamDecl(VarDecl::Specifier::InOut,
SourceLoc(), SourceLoc(),
AC.getIdentifier("$return_value"),
SourceLoc(),
AC.getIdentifier("$return_value"),
ctor);
VD->setInterfaceType(selfIfaceTyCan);
resultSlot = SGF.F.begin()->createFunctionArgument(selfTy, VD);
}
// Emit the elementwise arguments.
SmallVector<RValue, 4> elements;
for (size_t i = 0, size = paramList->size(); i < size; ++i) {
auto ¶m = paramList->get(i);
elements.push_back(
emitImplicitValueConstructorArg(
SGF, Loc, param->getInterfaceType()->getCanonicalType(), ctor));
}
emitConstructorMetatypeArg(SGF, ctor);
auto *decl = selfTy.getStructOrBoundGenericStruct();
assert(decl && "not a struct?!");
// If we have an indirect return slot, initialize it in-place.
if (resultSlot) {
auto elti = elements.begin(), eltEnd = elements.end();
for (VarDecl *field : decl->getStoredProperties()) {
auto fieldTy = selfTy.getFieldType(field, SGF.SGM.M);
auto &fieldTL = SGF.getTypeLowering(fieldTy);
SILValue slot = SGF.B.createStructElementAddr(Loc, resultSlot, field,
fieldTL.getLoweredType().getAddressType());
InitializationPtr init(new KnownAddressInitialization(slot));
// An initialized 'let' property has a single value specified by the
// initializer - it doesn't come from an argument.
if (!field->isStatic() && field->isLet() &&
field->getParentInitializer()) {
#ifndef NDEBUG
auto fieldTy = decl->getDeclContext()->mapTypeIntoContext(
field->getInterfaceType());
assert(fieldTy->isEqual(field->getParentInitializer()->getType())
&& "Checked by sema");
#endif
// Cleanup after this initialization.
FullExpr scope(SGF.Cleanups, field->getParentPatternBinding());
SGF.emitExprInto(field->getParentInitializer(), init.get());
continue;
}
assert(elti != eltEnd && "number of args does not match number of fields");
(void)eltEnd;
std::move(*elti).forwardInto(SGF, Loc, init.get());
++elti;
}
SGF.B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
SGF.emitEmptyTuple(Loc));
return;
}
// Otherwise, build a struct value directly from the elements.
SmallVector<SILValue, 4> eltValues;
auto elti = elements.begin(), eltEnd = elements.end();
for (VarDecl *field : decl->getStoredProperties()) {
auto fieldTy = selfTy.getFieldType(field, SGF.SGM.M);
SILValue v;
// An initialized 'let' property has a single value specified by the
// initializer - it doesn't come from an argument.
if (!field->isStatic() && field->isLet() && field->getParentInitializer()) {
// Cleanup after this initialization.
FullExpr scope(SGF.Cleanups, field->getParentPatternBinding());
v = SGF.emitRValue(field->getParentInitializer())
.forwardAsSingleStorageValue(SGF, fieldTy, Loc);
} else {
assert(elti != eltEnd && "number of args does not match number of fields");
(void)eltEnd;
v = std::move(*elti).forwardAsSingleStorageValue(SGF, fieldTy, Loc);
++elti;
}
//.........这里部分代码省略.........
示例4: speculateMonomorphicTarget
//.........这里部分代码省略.........
// Virt is the block containing the slow virtual call.
SILBasicBlock *Virt = F->createBasicBlock();
Iden->createBBArg(SubType);
SILBasicBlock *Continue = Entry->splitBasicBlock(It);
SILBuilderWithScope Builder(Entry, AI.getInstruction());
// Create the checked_cast_branch instruction that checks at runtime if the
// class instance is identical to the SILType.
ClassMethodInst *CMI = cast<ClassMethodInst>(AI.getCallee());
CCBI = Builder.createCheckedCastBranch(AI.getLoc(), /*exact*/ true,
CMI->getOperand(), SubType, Iden,
Virt);
It = CCBI->getIterator();
SILBuilderWithScope VirtBuilder(Virt, AI.getInstruction());
SILBuilderWithScope IdenBuilder(Iden, AI.getInstruction());
// This is the class reference downcasted into subclass SubType.
SILValue DownCastedClassInstance = Iden->getBBArg(0);
// Copy the two apply instructions into the two blocks.
FullApplySite IdenAI = CloneApply(AI, IdenBuilder);
FullApplySite VirtAI = CloneApply(AI, VirtBuilder);
// See if Continue has a release on self as the instruction right after the
// apply. If it exists, move it into position in the diamond.
if (auto *Release =
dyn_cast<StrongReleaseInst>(std::next(Continue->begin()))) {
if (Release->getOperand() == CMI->getOperand()) {
VirtBuilder.createStrongRelease(Release->getLoc(), CMI->getOperand(),
Atomicity::Atomic);
IdenBuilder.createStrongRelease(
Release->getLoc(), DownCastedClassInstance, Atomicity::Atomic);
Release->eraseFromParent();
}
}
// Create a PHInode for returning the return value from both apply
// instructions.
SILArgument *Arg = Continue->createBBArg(AI.getType());
if (!isa<TryApplyInst>(AI)) {
IdenBuilder.createBranch(AI.getLoc(), Continue,
ArrayRef<SILValue>(IdenAI.getInstruction()));
VirtBuilder.createBranch(AI.getLoc(), Continue,
ArrayRef<SILValue>(VirtAI.getInstruction()));
}
// Remove the old Apply instruction.
assert(AI.getInstruction() == &Continue->front() &&
"AI should be the first instruction in the split Continue block");
if (!isa<TryApplyInst>(AI)) {
AI.getInstruction()->replaceAllUsesWith(Arg);
AI.getInstruction()->eraseFromParent();
assert(!Continue->empty() &&
"There should be at least a terminator after AI");
} else {
AI.getInstruction()->eraseFromParent();
assert(Continue->empty() &&
"There should not be an instruction after try_apply");
Continue->eraseFromParent();
}
// Update the stats.
NumTargetsPredicted++;
// Devirtualize the apply instruction on the identical path.
auto NewInstPair = devirtualizeClassMethod(IdenAI, DownCastedClassInstance);
assert(NewInstPair.first && "Expected to be able to devirtualize apply!");
replaceDeadApply(IdenAI, NewInstPair.first);
// Split critical edges resulting from VirtAI.
if (auto *TAI = dyn_cast<TryApplyInst>(VirtAI)) {
auto *ErrorBB = TAI->getFunction()->createBasicBlock();
ErrorBB->createBBArg(TAI->getErrorBB()->getBBArg(0)->getType());
Builder.setInsertionPoint(ErrorBB);
Builder.createBranch(TAI->getLoc(), TAI->getErrorBB(),
{ErrorBB->getBBArg(0)});
auto *NormalBB = TAI->getFunction()->createBasicBlock();
NormalBB->createBBArg(TAI->getNormalBB()->getBBArg(0)->getType());
Builder.setInsertionPoint(NormalBB);
Builder.createBranch(TAI->getLoc(), TAI->getNormalBB(),
{NormalBB->getBBArg(0) });
Builder.setInsertionPoint(VirtAI.getInstruction());
SmallVector<SILValue, 4> Args;
for (auto Arg : VirtAI.getArguments()) {
Args.push_back(Arg);
}
FullApplySite NewVirtAI = Builder.createTryApply(VirtAI.getLoc(), VirtAI.getCallee(),
VirtAI.getSubstCalleeSILType(), VirtAI.getSubstitutions(),
Args, NormalBB, ErrorBB);
VirtAI.getInstruction()->eraseFromParent();
VirtAI = NewVirtAI;
}
return VirtAI;
}
示例5: ComputeLiveInBlocks
/// DetermineInsertionPoint - At this point, we're committed to promoting the
/// alloca using IDF's, and the standard SSA construction algorithm. Determine
/// which blocks need phi nodes and see if we can optimize out some work by
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
AllocaInfo &Info) {
// Unique the set of defining blocks for efficient lookup.
SmallPtrSet<BasicBlock*, 32> DefBlocks;
DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
// Determine which blocks the value is live in. These are blocks which lead
// to uses.
SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
// Use a priority queue keyed on dominator tree level so that inserted nodes
// are handled from the bottom of the dominator tree upwards.
typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
DomTreeNodeCompare> IDFPriorityQueue;
IDFPriorityQueue PQ;
for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(),
E = DefBlocks.end(); I != E; ++I) {
if (DomTreeNode *Node = DT.getNode(*I))
PQ.push(std::make_pair(Node, DomLevels[Node]));
}
SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks;
SmallPtrSet<DomTreeNode*, 32> Visited;
SmallVector<DomTreeNode*, 32> Worklist;
while (!PQ.empty()) {
DomTreeNodePair RootPair = PQ.top();
PQ.pop();
DomTreeNode *Root = RootPair.first;
unsigned RootLevel = RootPair.second;
// Walk all dominator tree children of Root, inspecting their CFG edges with
// targets elsewhere on the dominator tree. Only targets whose level is at
// most Root's level are added to the iterated dominance frontier of the
// definition set.
Worklist.clear();
Worklist.push_back(Root);
while (!Worklist.empty()) {
DomTreeNode *Node = Worklist.pop_back_val();
BasicBlock *BB = Node->getBlock();
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
++SI) {
DomTreeNode *SuccNode = DT.getNode(*SI);
// Quickly skip all CFG edges that are also dominator tree edges instead
// of catching them below.
if (SuccNode->getIDom() == Node)
continue;
unsigned SuccLevel = DomLevels[SuccNode];
if (SuccLevel > RootLevel)
continue;
if (!Visited.insert(SuccNode))
continue;
BasicBlock *SuccBB = SuccNode->getBlock();
if (!LiveInBlocks.count(SuccBB))
continue;
DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
if (!DefBlocks.count(SuccBB))
PQ.push(std::make_pair(SuccNode, SuccLevel));
}
for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
++CI) {
if (!Visited.count(*CI))
Worklist.push_back(*CI);
}
}
}
if (DFBlocks.size() > 1)
std::sort(DFBlocks.begin(), DFBlocks.end());
unsigned CurrentVersion = 0;
for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
示例6: assert
/// VerifyIndirectJumps - Verify whether any possible indirect jump
/// might cross a protection boundary. Unlike direct jumps, indirect
/// jumps count cleanups as protection boundaries: since there's no
/// way to know where the jump is going, we can't implicitly run the
/// right cleanups the way we can with direct jumps.
///
/// Thus, an indirect jump is "trivial" if it bypasses no
/// initializations and no teardowns. More formally, an indirect jump
/// from A to B is trivial if the path out from A to DCA(A,B) is
/// trivial and the path in from DCA(A,B) to B is trivial, where
/// DCA(A,B) is the deepest common ancestor of A and B.
/// Jump-triviality is transitive but asymmetric.
///
/// A path in is trivial if none of the entered scopes have an InDiag.
/// A path out is trivial is none of the exited scopes have an OutDiag.
///
/// Under these definitions, this function checks that the indirect
/// jump between A and B is trivial for every indirect goto statement A
/// and every label B whose address was taken in the function.
void JumpScopeChecker::VerifyIndirectJumps() {
if (IndirectJumps.empty()) return;
// If there aren't any address-of-label expressions in this function,
// complain about the first indirect goto.
if (IndirectJumpTargets.empty()) {
S.Diag(IndirectJumps[0]->getGotoLoc(),
diag::err_indirect_goto_without_addrlabel);
return;
}
// Collect a single representative of every scope containing an
// indirect goto. For most code bases, this substantially cuts
// down on the number of jump sites we'll have to consider later.
typedef std::pair<unsigned, IndirectGotoStmt*> JumpScope;
SmallVector<JumpScope, 32> JumpScopes;
{
llvm::DenseMap<unsigned, IndirectGotoStmt*> JumpScopesMap;
for (SmallVectorImpl<IndirectGotoStmt*>::iterator
I = IndirectJumps.begin(), E = IndirectJumps.end(); I != E; ++I) {
IndirectGotoStmt *IG = *I;
assert(LabelAndGotoScopes.count(IG) &&
"indirect jump didn't get added to scopes?");
unsigned IGScope = LabelAndGotoScopes[IG];
IndirectGotoStmt *&Entry = JumpScopesMap[IGScope];
if (!Entry) Entry = IG;
}
JumpScopes.reserve(JumpScopesMap.size());
for (llvm::DenseMap<unsigned, IndirectGotoStmt*>::iterator
I = JumpScopesMap.begin(), E = JumpScopesMap.end(); I != E; ++I)
JumpScopes.push_back(*I);
}
// Collect a single representative of every scope containing a
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
for (SmallVectorImpl<LabelDecl*>::iterator
I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
I != E; ++I) {
LabelDecl *TheLabel = *I;
assert(LabelAndGotoScopes.count(TheLabel->getStmt()) &&
"Referenced label didn't get added to scopes?");
unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
LabelDecl *&Target = TargetScopes[LabelScope];
if (!Target) Target = TheLabel;
}
// For each target scope, make sure it's trivially reachable from
// every scope containing a jump site.
//
// A path between scopes always consists of exitting zero or more
// scopes, then entering zero or more scopes. We build a set of
// of scopes S from which the target scope can be trivially
// entered, then verify that every jump scope can be trivially
// exitted to reach a scope in S.
llvm::BitVector Reachable(Scopes.size(), false);
for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
unsigned TargetScope = TI->first;
LabelDecl *TargetLabel = TI->second;
Reachable.reset();
// Mark all the enclosing scopes from which you can safely jump
// into the target scope. 'Min' will end up being the index of
// the shallowest such scope.
unsigned Min = TargetScope;
while (true) {
Reachable.set(Min);
// Don't go beyond the outermost scope.
if (Min == 0) break;
// Stop if we can't trivially enter the current scope.
if (Scopes[Min].InDiag) break;
Min = Scopes[Min].ParentScope;
}
// Walk through all the jump sites, checking that they can trivially
//.........这里部分代码省略.........
示例7: collectModuleHeaderIncludes
/// \brief Collect the set of header includes needed to construct the given
/// module and update the TopHeaders file set of the module.
///
/// \param Module The module we're collecting includes from.
///
/// \param Includes Will be augmented with the set of \#includes or \#imports
/// needed to load all of the named headers.
static std::error_code
collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
ModuleMap &ModMap, clang::Module *Module,
SmallVectorImpl<char> &Includes) {
// Don't collect any headers for unavailable modules.
if (!Module->isAvailable())
return std::error_code();
// Add includes for each of these headers.
for (Module::Header &H : Module->Headers[Module::HK_Normal]) {
Module->addTopHeader(H.Entry);
// Use the path as specified in the module map file. We'll look for this
// file relative to the module build directory (the directory containing
// the module map file) so this will find the same file that we found
// while parsing the module map.
if (std::error_code Err = addHeaderInclude(H.NameAsWritten, Includes,
LangOpts, Module->IsExternC))
return Err;
}
// Note that Module->PrivateHeaders will not be a TopHeader.
if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
Module->addTopHeader(UmbrellaHeader.Entry);
if (Module->Parent) {
// Include the umbrella header for submodules.
if (std::error_code Err = addHeaderInclude(UmbrellaHeader.NameAsWritten,
Includes, LangOpts,
Module->IsExternC))
return Err;
}
} else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
// Add all of the headers we find in this subdirectory.
std::error_code EC;
SmallString<128> DirNative;
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative, EC),
DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
// headers.
if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
.Cases(".h", ".H", ".hh", ".hpp", true)
.Default(false))
continue;
const FileEntry *Header = FileMgr.getFile(Dir->path());
// FIXME: This shouldn't happen unless there is a file system race. Is
// that worth diagnosing?
if (!Header)
continue;
// If this header is marked 'unavailable' in this module, don't include
// it.
if (ModMap.isHeaderUnavailableInModule(Header, Module))
continue;
// Compute the relative path from the directory to this file.
SmallVector<StringRef, 16> Components;
auto PathIt = llvm::sys::path::rbegin(Dir->path());
for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
Components.push_back(*PathIt);
SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
for (auto It = Components.rbegin(), End = Components.rend(); It != End;
++It)
llvm::sys::path::append(RelativeHeader, *It);
// Include this header as part of the umbrella directory.
Module->addTopHeader(Header);
if (std::error_code Err = addHeaderInclude(RelativeHeader, Includes,
LangOpts, Module->IsExternC))
return Err;
}
if (EC)
return EC;
}
// Recurse into submodules.
for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
SubEnd = Module->submodule_end();
Sub != SubEnd; ++Sub)
if (std::error_code Err = collectModuleHeaderIncludes(
LangOpts, FileMgr, ModMap, *Sub, Includes))
return Err;
return std::error_code();
}
示例8: assert
CallGraphNode* ArgumentRecovery::recoverArguments(llvm::CallGraphNode *node)
{
Function* fn = node->getFunction();
if (fn == nullptr)
{
// "theoretical nodes", whatever that is
return nullptr;
}
// quick exit if there isn't exactly one argument
if (fn->arg_size() != 1)
{
return nullptr;
}
Argument* fnArg = fn->arg_begin();
if (!isStructType(fnArg))
{
return nullptr;
}
// This is a nasty NASTY hack that relies on the AA pass being RegisterUse.
// The data should be moved to a separate helper pass that can be queried from both the AA pass and this one.
RegisterUse& regUse = getAnalysis<RegisterUse>();
CallGraph& cg = getAnalysis<CallGraphWrapperPass>().getCallGraph();
const auto* modRefInfo = regUse.getModRefInfo(fn);
assert(modRefInfo != nullptr);
// At this point we pretty much know that we're going to modify the function, so start doing that.
// Get register offsets from the old function before we start mutilating it.
auto& registerMap = exposeAllRegisters(fn);
// Create a new function prototype, asking RegisterUse for which registers should be passed in, and how.
LLVMContext& ctx = fn->getContext();
SmallVector<pair<const char*, Type*>, 16> parameters;
Type* int64 = Type::getInt64Ty(ctx);
Type* int64ptr = Type::getInt64PtrTy(ctx);
for (const auto& pair : *modRefInfo)
{
if (pair.second != RegisterUse::NoModRef)
{
Type* paramType = (pair.second & RegisterUse::Mod) == RegisterUse::Mod ? int64ptr : int64;
parameters.push_back({pair.first, paramType});
}
}
// Order parameters.
// FIXME: This could use an ABI-specific sort routine. For now, use a lexicographical sort.
sort(parameters.begin(), parameters.end(), [](const pair<const char*, Type*>& a, const pair<const char*, Type*>& b) {
return strcmp(a.first, b.first) < 0;
});
// Extract parameter types.
SmallVector<Type*, 16> parameterTypes;
for (const auto& pair : parameters)
{
parameterTypes.push_back(pair.second);
}
// Ideally, we would also do caller analysis here to figure out which output registers are never read, such that
// we can either eliminate them from the parameter list or pass them by value instead of by address.
// We would also pick a return value.
FunctionType* newFunctionType = FunctionType::get(Type::getVoidTy(ctx), parameterTypes, false);
Function* newFunc = Function::Create(newFunctionType, fn->getLinkage());
newFunc->copyAttributesFrom(fn);
fn->getParent()->getFunctionList().insert(fn, newFunc);
newFunc->takeName(fn);
fn->setName("__hollow_husk__" + newFunc->getName());
// Set argument names
size_t i = 0;
for (Argument& arg : newFunc->args())
{
arg.setName(parameters[i].first);
i++;
}
// update call graph
CallGraphNode* newFuncNode = cg.getOrInsertFunction(newFunc);
CallGraphNode* oldFuncNode = cg[fn];
// loop over callers and transform call sites.
while (!fn->use_empty())
{
CallSite cs(fn->user_back());
Instruction* call = cast<CallInst>(cs.getInstruction());
Function* caller = call->getParent()->getParent();
auto& registerPositions = exposeAllRegisters(caller);
SmallVector<Value*, 16> callParameters;
for (const auto& pair : parameters)
{
// HACKHACK: find a pointer to a 64-bit int in the set.
Value* registerPointer = nullptr;
auto range = registerPositions.equal_range(pair.first);
for (auto iter = range.first; iter != range.second; iter++)
//.........这里部分代码省略.........
示例9: fixBranchesAndUses
void StackAllocationPromoter::fixBranchesAndUses(BlockSet &PhiBlocks) {
// First update uses of the value.
SmallVector<LoadInst *, 4> collectedLoads;
for (auto UI = ASI->use_begin(), E = ASI->use_end(); UI != E;) {
auto *Inst = UI->getUser();
UI++;
bool removedUser = false;
collectedLoads.clear();
collectLoads(Inst, collectedLoads);
for (LoadInst *LI : collectedLoads) {
SILValue Def;
// If this block has no predecessors then nothing dominates it and
// the instruction is unreachable. If the instruction we're
// examining is a value, replace it with undef. Either way, delete
// the instruction and move on.
SILBasicBlock *BB = LI->getParent();
Def = getLiveInValue(PhiBlocks, BB);
LLVM_DEBUG(llvm::dbgs() << "*** Replacing " << *LI
<< " with Def " << *Def);
// Replace the load with the definition that we found.
replaceLoad(LI, Def, ASI);
removedUser = true;
NumInstRemoved++;
}
if (removedUser)
continue;
// If this block has no predecessors then nothing dominates it and
// the instruction is unreachable. Delete the instruction and move
// on.
SILBasicBlock *BB = Inst->getParent();
if (auto *DVAI = dyn_cast<DebugValueAddrInst>(Inst)) {
// Replace DebugValueAddr with DebugValue.
SILValue Def = getLiveInValue(PhiBlocks, BB);
promoteDebugValueAddr(DVAI, Def, B);
NumInstRemoved++;
continue;
}
// Replace destroys with a release of the value.
if (auto *DAI = dyn_cast<DestroyAddrInst>(Inst)) {
SILValue Def = getLiveInValue(PhiBlocks, BB);
replaceDestroy(DAI, Def);
continue;
}
}
// Now that all of the uses are fixed we can fix the branches that point
// to the blocks with the added arguments.
// For each Block with a new Phi argument:
for (auto Block : PhiBlocks) {
// Fix all predecessors.
for (auto PBBI = Block->getPredecessorBlocks().begin(),
E = Block->getPredecessorBlocks().end();
PBBI != E;) {
auto *PBB = *PBBI;
++PBBI;
assert(PBB && "Invalid block!");
fixPhiPredBlock(PhiBlocks, Block, PBB);
}
}
}
示例10: assert
/// InlineFunction - This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
/// though.
///
/// Note that this only does one level of inlining. For example, if the
/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
/// exists in the instruction stream. Similarly this will inline a recursive
/// function by one level.
bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
bool InsertLifetime) {
Instruction *TheCall = CS.getInstruction();
assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
"Instruction not in function!");
// If IFI has any state in it, zap it before we fill it in.
IFI.reset();
const Function *CalledFunc = CS.getCalledFunction();
if (CalledFunc == 0 || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function!
CalledFunc->getFunctionType()->isVarArg()) return false;
// If the call to the callee is not a tail call, we must clear the 'tail'
// flags on any calls that we inline.
bool MustClearTailCallFlags =
!(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
// If the call to the callee cannot throw, set the 'nounwind' flag on any
// calls that we inline.
bool MarkNoUnwind = CS.doesNotThrow();
BasicBlock *OrigBB = TheCall->getParent();
Function *Caller = OrigBB->getParent();
// GC poses two hazards to inlining, which only occur when the callee has GC:
// 1. If the caller has no GC, then the callee's GC must be propagated to the
// caller.
// 2. If the caller has a differing GC, it is invalid to inline.
if (CalledFunc->hasGC()) {
if (!Caller->hasGC())
Caller->setGC(CalledFunc->getGC());
else if (CalledFunc->getGC() != Caller->getGC())
return false;
}
// Get the personality function from the callee if it contains a landing pad.
Value *CalleePersonality = 0;
for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
I != E; ++I)
if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
const BasicBlock *BB = II->getUnwindDest();
const LandingPadInst *LP = BB->getLandingPadInst();
CalleePersonality = LP->getPersonalityFn();
break;
}
// Find the personality function used by the landing pads of the caller. If it
// exists, then check to see that it matches the personality function used in
// the callee.
if (CalleePersonality) {
for (Function::const_iterator I = Caller->begin(), E = Caller->end();
I != E; ++I)
if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
const BasicBlock *BB = II->getUnwindDest();
const LandingPadInst *LP = BB->getLandingPadInst();
// If the personality functions match, then we can perform the
// inlining. Otherwise, we can't inline.
// TODO: This isn't 100% true. Some personality functions are proper
// supersets of others and can be used in place of the other.
if (LP->getPersonalityFn() != CalleePersonality)
return false;
break;
}
}
// Get an iterator to the last basic block in the function, which will have
// the new function inlined after it.
Function::iterator LastBlock = &Caller->back();
// Make sure to capture all of the return instructions from the cloned
// function.
SmallVector<ReturnInst*, 8> Returns;
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
{ // Scope to destroy VMap after cloning.
ValueToValueMapTy VMap;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
// Calculate the vector of arguments to pass into the function cloner, which
// matches up the formal to the actual argument values.
CallSite::arg_iterator AI = CS.arg_begin();
unsigned ArgNo = 0;
for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
//.........这里部分代码省略.........
示例11: normalize
std::string Triple::normalize(StringRef Str) {
// Parse into components.
SmallVector<StringRef, 4> Components;
Str.split(Components, "-");
// If the first component corresponds to a known architecture, preferentially
// use it for the architecture. If the second component corresponds to a
// known vendor, preferentially use it for the vendor, etc. This avoids silly
// component movement when a component parses as (eg) both a valid arch and a
// valid os.
ArchType Arch = UnknownArch;
if (Components.size() > 0)
Arch = parseArch(Components[0]);
VendorType Vendor = UnknownVendor;
if (Components.size() > 1)
Vendor = parseVendor(Components[1]);
OSType OS = UnknownOS;
if (Components.size() > 2)
OS = parseOS(Components[2]);
EnvironmentType Environment = UnknownEnvironment;
if (Components.size() > 3)
Environment = parseEnvironment(Components[3]);
ObjectFormatType ObjectFormat = UnknownObjectFormat;
// Note which components are already in their final position. These will not
// be moved.
bool Found[4];
Found[0] = Arch != UnknownArch;
Found[1] = Vendor != UnknownVendor;
Found[2] = OS != UnknownOS;
Found[3] = Environment != UnknownEnvironment;
// If they are not there already, permute the components into their canonical
// positions by seeing if they parse as a valid architecture, and if so moving
// the component to the architecture position etc.
for (unsigned Pos = 0; Pos != array_lengthof(Found); ++Pos) {
if (Found[Pos])
continue; // Already in the canonical position.
for (unsigned Idx = 0; Idx != Components.size(); ++Idx) {
// Do not reparse any components that already matched.
if (Idx < array_lengthof(Found) && Found[Idx])
continue;
// Does this component parse as valid for the target position?
bool Valid = false;
StringRef Comp = Components[Idx];
switch (Pos) {
default: llvm_unreachable("unexpected component type!");
case 0:
Arch = parseArch(Comp);
Valid = Arch != UnknownArch;
break;
case 1:
Vendor = parseVendor(Comp);
Valid = Vendor != UnknownVendor;
break;
case 2:
OS = parseOS(Comp);
Valid = OS != UnknownOS;
break;
case 3:
Environment = parseEnvironment(Comp);
Valid = Environment != UnknownEnvironment;
if (!Valid) {
ObjectFormat = parseFormat(Comp);
Valid = ObjectFormat != UnknownObjectFormat;
}
break;
}
if (!Valid)
continue; // Nope, try the next component.
// Move the component to the target position, pushing any non-fixed
// components that are in the way to the right. This tends to give
// good results in the common cases of a forgotten vendor component
// or a wrongly positioned environment.
if (Pos < Idx) {
// Insert left, pushing the existing components to the right. For
// example, a-b-i386 -> i386-a-b when moving i386 to the front.
StringRef CurrentComponent(""); // The empty component.
// Replace the component we are moving with an empty component.
std::swap(CurrentComponent, Components[Idx]);
// Insert the component being moved at Pos, displacing any existing
// components to the right.
for (unsigned i = Pos; !CurrentComponent.empty(); ++i) {
// Skip over any fixed components.
while (i < array_lengthof(Found) && Found[i])
++i;
// Place the component at the new position, getting the component
// that was at this position - it will be moved right.
std::swap(CurrentComponent, Components[i]);
}
} else if (Pos > Idx) {
// Push right by inserting empty components until the component at Idx
// reaches the target position Pos. For example, pc-a -> -pc-a when
// moving pc to the second position.
do {
// Insert one empty component at Idx.
StringRef CurrentComponent(""); // The empty component.
//.........这里部分代码省略.........
示例12: switch
/// EmitAnyX86InstComments - This function decodes x86 instructions and prints
/// newline terminated strings to the specified string if desired. This
/// information is shown in disassembly dumps when verbose assembly is enabled.
void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
const char *(*getRegName)(unsigned)) {
// If this is a shuffle operation, the switch should fill in this state.
SmallVector<int, 8> ShuffleMask;
const char *DestName = 0, *Src1Name = 0, *Src2Name = 0;
switch (MI->getOpcode()) {
case X86::INSERTPSrr:
Src1Name = getRegName(MI->getOperand(0).getReg());
Src2Name = getRegName(MI->getOperand(2).getReg());
DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
break;
case X86::VINSERTPSrr:
DestName = getRegName(MI->getOperand(0).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
Src2Name = getRegName(MI->getOperand(2).getReg());
DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
break;
case X86::MOVLHPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(0).getReg());
DecodeMOVLHPSMask(2, ShuffleMask);
break;
case X86::VMOVLHPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeMOVLHPSMask(2, ShuffleMask);
break;
case X86::MOVHLPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(0).getReg());
DecodeMOVHLPSMask(2, ShuffleMask);
break;
case X86::VMOVHLPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeMOVHLPSMask(2, ShuffleMask);
break;
case X86::PSHUFDri:
case X86::VPSHUFDri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFDmi:
case X86::VPSHUFDmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFMask(MVT::v4i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::VPSHUFDYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPSHUFDYmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFMask(MVT::v8i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::PSHUFHWri:
case X86::VPSHUFHWri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFHWmi:
case X86::VPSHUFHWmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFHWMask(MVT::v8i16,
MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::VPSHUFHWYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPSHUFHWYmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFHWMask(MVT::v16i16,
MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::PSHUFLWri:
case X86::VPSHUFLWri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFLWmi:
case X86::VPSHUFLWmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFLWMask(MVT::v8i16,
MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::VPSHUFLWYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
//.........这里部分代码省略.........
示例13: Match
/// Match - Match the pattern string against the input buffer Buffer. This
/// returns the position that is matched or npos if there is no match. If
/// there is a match, the size of the matched string is returned in MatchLen.
size_t Pattern::Match(StringRef Buffer, size_t &MatchLen,
StringMap<StringRef> &VariableTable) const {
// If this is the EOF pattern, match it immediately.
if (CheckTy == Check::CheckEOF) {
MatchLen = 0;
return Buffer.size();
}
// If this is a fixed string pattern, just match it now.
if (!FixedStr.empty()) {
MatchLen = FixedStr.size();
return Buffer.find(FixedStr);
}
// Regex match.
// If there are variable uses, we need to create a temporary string with the
// actual value.
StringRef RegExToMatch = RegExStr;
std::string TmpStr;
if (!VariableUses.empty()) {
TmpStr = RegExStr;
unsigned InsertOffset = 0;
for (unsigned i = 0, e = VariableUses.size(); i != e; ++i) {
std::string Value;
if (VariableUses[i].first[0] == '@') {
if (!EvaluateExpression(VariableUses[i].first, Value))
return StringRef::npos;
} else {
StringMap<StringRef>::iterator it =
VariableTable.find(VariableUses[i].first);
// If the variable is undefined, return an error.
if (it == VariableTable.end())
return StringRef::npos;
// Look up the value and escape it so that we can put it into the regex.
Value += Regex::escape(it->second);
}
// Plop it into the regex at the adjusted offset.
TmpStr.insert(TmpStr.begin()+VariableUses[i].second+InsertOffset,
Value.begin(), Value.end());
InsertOffset += Value.size();
}
// Match the newly constructed regex.
RegExToMatch = TmpStr;
}
SmallVector<StringRef, 4> MatchInfo;
if (!Regex(RegExToMatch, Regex::Newline).match(Buffer, &MatchInfo))
return StringRef::npos;
// Successful regex match.
assert(!MatchInfo.empty() && "Didn't get any match");
StringRef FullMatch = MatchInfo[0];
// If this defines any variables, remember their values.
for (std::map<StringRef, unsigned>::const_iterator I = VariableDefs.begin(),
E = VariableDefs.end();
I != E; ++I) {
assert(I->second < MatchInfo.size() && "Internal paren error");
VariableTable[I->first] = MatchInfo[I->second];
}
MatchLen = FullMatch.size();
return FullMatch.data()-Buffer.data();
}
示例14: main
//.........这里部分代码省略.........
if (OptLevelO1)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 1, 0);
if (OptLevelO2)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);
if (OptLevelOs)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);
if (OptLevelOz)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);
if (OptLevelO3)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);
if (FPasses) {
FPasses->doInitialization();
for (Function &F : *M)
FPasses->run(F);
FPasses->doFinalization();
}
// Check that the module is well formed on completion of optimization
if (!NoVerify && !VerifyEach)
Passes.add(createVerifierPass());
if (EnableDebugify)
Passes.add(createCheckDebugifyPass());
// In run twice mode, we want to make sure the output is bit-by-bit
// equivalent if we run the pass manager again, so setup two buffers and
// a stream to write to them. Note that llc does something similar and it
// may be worth to abstract this out in the future.
SmallVector<char, 0> Buffer;
SmallVector<char, 0> CompileTwiceBuffer;
std::unique_ptr<raw_svector_ostream> BOS;
raw_ostream *OS = nullptr;
// Write bitcode or assembly to the output as the last step...
if (!NoOutput && !AnalyzeOnly) {
assert(Out);
OS = &Out->os();
if (RunTwice) {
BOS = make_unique<raw_svector_ostream>(Buffer);
OS = BOS.get();
}
if (OutputAssembly) {
if (EmitSummaryIndex)
report_fatal_error("Text output is incompatible with -module-summary");
if (EmitModuleHash)
report_fatal_error("Text output is incompatible with -module-hash");
Passes.add(createPrintModulePass(*OS, "", PreserveAssemblyUseListOrder));
} else if (OutputThinLTOBC)
Passes.add(createWriteThinLTOBitcodePass(
*OS, ThinLinkOut ? &ThinLinkOut->os() : nullptr));
else
Passes.add(createBitcodeWriterPass(*OS, PreserveBitcodeUseListOrder,
EmitSummaryIndex, EmitModuleHash));
}
// Before executing passes, print the final values of the LLVM options.
cl::PrintOptionValues();
// If requested, run all passes again with the same pass manager to catch
// bugs caused by persistent state in the passes
if (RunTwice) {
示例15: pred_end
/// ProcessLoop - Walk the loop structure in depth first order, ensuring that
/// all loops have preheaders.
///
bool LoopSimplify::ProcessLoop(Loop *L, LPPassManager &LPM) {
bool Changed = false;
ReprocessLoop:
// Check to see that no blocks (other than the header) in this loop have
// predecessors that are not in the loop. This is not valid for natural
// loops, but can occur if the blocks are unreachable. Since they are
// unreachable we can just shamelessly delete those CFG edges!
for (Loop::block_iterator BB = L->block_begin(), E = L->block_end();
BB != E; ++BB) {
if (*BB == L->getHeader()) continue;
SmallPtrSet<BasicBlock*, 4> BadPreds;
for (pred_iterator PI = pred_begin(*BB),
PE = pred_end(*BB); PI != PE; ++PI) {
BasicBlock *P = *PI;
if (!L->contains(P))
BadPreds.insert(P);
}
// Delete each unique out-of-loop (and thus dead) predecessor.
for (SmallPtrSet<BasicBlock*, 4>::iterator I = BadPreds.begin(),
E = BadPreds.end(); I != E; ++I) {
DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
<< (*I)->getName() << "\n");
// Inform each successor of each dead pred.
for (succ_iterator SI = succ_begin(*I), SE = succ_end(*I); SI != SE; ++SI)
(*SI)->removePredecessor(*I);
// Zap the dead pred's terminator and replace it with unreachable.
TerminatorInst *TI = (*I)->getTerminator();
TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
(*I)->getTerminator()->eraseFromParent();
new UnreachableInst((*I)->getContext(), *I);
Changed = true;
}
}
// If there are exiting blocks with branches on undef, resolve the undef in
// the direction which will exit the loop. This will help simplify loop
// trip count computations.
SmallVector<BasicBlock*, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
E = ExitingBlocks.end(); I != E; ++I)
if (BranchInst *BI = dyn_cast<BranchInst>((*I)->getTerminator()))
if (BI->isConditional()) {
if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
<< (*I)->getName() << "\n");
BI->setCondition(ConstantInt::get(Cond->getType(),
!L->contains(BI->getSuccessor(0))));
// This may make the loop analyzable, force SCEV recomputation.
if (SE)
SE->forgetLoop(L);
Changed = true;
}
}
// Does the loop already have a preheader? If so, don't insert one.
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
Preheader = InsertPreheaderForLoop(L);
if (Preheader) {
++NumInserted;
Changed = true;
}
}
// Next, check to make sure that all exit nodes of the loop only have
// predecessors that are inside of the loop. This check guarantees that the
// loop preheader/header will dominate the exit blocks. If the exit block has
// predecessors from outside of the loop, split the edge now.
SmallVector<BasicBlock*, 8> ExitBlocks;
L->getExitBlocks(ExitBlocks);
SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
ExitBlocks.end());
for (SmallSetVector<BasicBlock *, 8>::iterator I = ExitBlockSet.begin(),
E = ExitBlockSet.end(); I != E; ++I) {
BasicBlock *ExitBlock = *I;
for (pred_iterator PI = pred_begin(ExitBlock), PE = pred_end(ExitBlock);
PI != PE; ++PI)
// Must be exactly this loop: no subloops, parent loops, or non-loop preds
// allowed.
if (!L->contains(*PI)) {
if (RewriteLoopExitBlock(L, ExitBlock)) {
++NumInserted;
Changed = true;
}
break;
}
//.........这里部分代码省略.........