本文整理汇总了C++中SmallVector::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallVector::begin方法的具体用法?C++ SmallVector::begin怎么用?C++ SmallVector::begin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallVector
的用法示例。
在下文中一共展示了SmallVector::begin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: recurseBasicBlock
// recurseBasicBlock() - This calculates the ProfileInfo estimation for a
// single block and then recurses into the successors.
// The algorithm preserves the flow condition, meaning that the sum of the
// weight of the incoming edges must be equal the block weight which must in
// turn be equal to the sume of the weights of the outgoing edges.
// Since the flow of an block is deterimined from the current state of the
// flow, once an edge has a flow assigned this flow is never changed again,
// otherwise it would be possible to violate the flow condition in another
// block.
void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {
// Break the recursion if this BasicBlock was already visited.
if (BBToVisit.find(BB) == BBToVisit.end()) return;
// Read the LoopInfo for this block.
bool BBisHeader = LI->isLoopHeader(BB);
Loop* BBLoop = LI->getLoopFor(BB);
// To get the block weight, read all incoming edges.
double BBWeight = 0;
std::set<BasicBlock*> ProcessedPreds;
for ( pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
bbi != bbe; ++bbi ) {
// If this block was not considered already, add weight.
Edge edge = getEdge(*bbi,BB);
double w = getEdgeWeight(edge);
if (ProcessedPreds.insert(*bbi).second) {
BBWeight += ignoreMissing(w);
}
// If this block is a loop header and the predecessor is contained in this
// loop, thus the edge is a backedge, continue and do not check if the
// value is valid.
if (BBisHeader && BBLoop->contains(*bbi)) {
printEdgeError(edge, "but is backedge, continueing");
continue;
}
// If the edges value is missing (and this is no loop header, and this is
// no backedge) return, this block is currently non estimatable.
if (w == MissingValue) {
printEdgeError(edge, "returning");
return;
}
}
if (getExecutionCount(BB) != MissingValue) {
BBWeight = getExecutionCount(BB);
}
// Fetch all necessary information for current block.
SmallVector<Edge, 8> ExitEdges;
SmallVector<Edge, 8> Edges;
if (BBLoop) {
BBLoop->getExitEdges(ExitEdges);
}
// If this is a loop header, consider the following:
// Exactly the flow that is entering this block, must exit this block too. So
// do the following:
// *) get all the exit edges, read the flow that is already leaving this
// loop, remember the edges that do not have any flow on them right now.
// (The edges that have already flow on them are most likely exiting edges of
// other loops, do not touch those flows because the previously caclulated
// loopheaders would not be exact anymore.)
// *) In case there is not a single exiting edge left, create one at the loop
// latch to prevent the flow from building up in the loop.
// *) Take the flow that is not leaving the loop already and distribute it on
// the remaining exiting edges.
// (This ensures that all flow that enters the loop also leaves it.)
// *) Increase the flow into the loop by increasing the weight of this block.
// There is at least one incoming backedge that will bring us this flow later
// on. (So that the flow condition in this node is valid again.)
if (BBisHeader) {
double incoming = BBWeight;
// Subtract the flow leaving the loop.
std::set<Edge> ProcessedExits;
for (SmallVector<Edge, 8>::iterator ei = ExitEdges.begin(),
ee = ExitEdges.end(); ei != ee; ++ei) {
if (ProcessedExits.insert(*ei).second) {
double w = getEdgeWeight(*ei);
if (w == MissingValue) {
Edges.push_back(*ei);
// Check if there is a necessary minimal weight, if yes, subtract it
// from weight.
if (MinimalWeight.find(*ei) != MinimalWeight.end()) {
incoming -= MinimalWeight[*ei];
DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n");
}
} else {
incoming -= w;
}
}
}
// If no exit edges, create one:
if (Edges.size() == 0) {
BasicBlock *Latch = BBLoop->getLoopLatch();
if (Latch) {
Edge edge = getEdge(Latch,0);
EdgeInformation[BB->getParent()][edge] = BBWeight;
printEdgeWeight(edge);
edge = getEdge(Latch, BB);
EdgeInformation[BB->getParent()][edge] = BBWeight * ExecCount;
//.........这里部分代码省略.........
示例2: parseFunctionArguments
/// Parse a function definition signature.
/// func-signature:
/// func-arguments func-throws? func-signature-result?
/// func-signature-result:
/// '->' type
///
/// Note that this leaves retType as null if unspecified.
ParserStatus
Parser::parseFunctionSignature(Identifier SimpleName,
DeclName &FullName,
SmallVectorImpl<ParameterList*> &bodyParams,
DefaultArgumentInfo &defaultArgs,
SourceLoc &throwsLoc,
bool &rethrows,
TypeRepr *&retType) {
SmallVector<Identifier, 4> NamePieces;
NamePieces.push_back(SimpleName);
FullName = SimpleName;
ParserStatus Status;
// We force first type of a func declaration to be a tuple for consistency.
if (Tok.is(tok::l_paren)) {
ParameterContextKind paramContext;
if (SimpleName.isOperator())
paramContext = ParameterContextKind::Operator;
else
paramContext = ParameterContextKind::Function;
Status = parseFunctionArguments(NamePieces, bodyParams, paramContext,
defaultArgs);
FullName = DeclName(Context, SimpleName,
llvm::makeArrayRef(NamePieces.begin() + 1,
NamePieces.end()));
if (bodyParams.empty()) {
// If we didn't get anything, add a () pattern to avoid breaking
// invariants.
assert(Status.hasCodeCompletion() || Status.isError());
bodyParams.push_back(ParameterList::createEmpty(Context));
}
} else {
diagnose(Tok, diag::func_decl_without_paren);
Status = makeParserError();
// Recover by creating a '() -> ?' signature.
bodyParams.push_back(ParameterList::createEmpty(Context, PreviousLoc,
PreviousLoc));
FullName = DeclName(Context, SimpleName, bodyParams.back());
}
// Check for the 'throws' keyword.
rethrows = false;
if (Tok.is(tok::kw_throws)) {
throwsLoc = consumeToken();
} else if (Tok.is(tok::kw_rethrows)) {
throwsLoc = consumeToken();
rethrows = true;
} else if (Tok.is(tok::kw_throw)) {
throwsLoc = consumeToken();
diagnose(throwsLoc, diag::throw_in_function_type)
.fixItReplace(throwsLoc, "throws");
}
SourceLoc arrowLoc;
// If there's a trailing arrow, parse the rest as the result type.
if (Tok.isAny(tok::arrow, tok::colon)) {
if (!consumeIf(tok::arrow, arrowLoc)) {
// FixIt ':' to '->'.
diagnose(Tok, diag::func_decl_expected_arrow)
.fixItReplace(SourceRange(Tok.getLoc()), "->");
arrowLoc = consumeToken(tok::colon);
}
ParserResult<TypeRepr> ResultType =
parseType(diag::expected_type_function_result);
if (ResultType.hasCodeCompletion())
return ResultType;
retType = ResultType.getPtrOrNull();
if (!retType) {
Status.setIsParseError();
return Status;
}
} else {
// Otherwise, we leave retType null.
retType = nullptr;
}
// Check for 'throws' and 'rethrows' after the type and correct it.
if (!throwsLoc.isValid()) {
if (Tok.is(tok::kw_throws)) {
throwsLoc = consumeToken();
} else if (Tok.is(tok::kw_rethrows)) {
throwsLoc = consumeToken();
rethrows = true;
}
if (throwsLoc.isValid()) {
assert(arrowLoc.isValid());
assert(retType);
auto diag = rethrows ? diag::rethrows_after_function_result
//.........这里部分代码省略.........
示例3: adjustLoopBranches
bool LoopInterchangeTransform::adjustLoopBranches() {
DEBUG(dbgs() << "adjustLoopBranches called\n");
// Adjust the loop preheader
BasicBlock *InnerLoopHeader = InnerLoop->getHeader();
BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
BasicBlock *OuterLoopPredecessor = OuterLoopPreHeader->getUniquePredecessor();
BasicBlock *InnerLoopLatchPredecessor =
InnerLoopLatch->getUniquePredecessor();
BasicBlock *InnerLoopLatchSuccessor;
BasicBlock *OuterLoopLatchSuccessor;
BranchInst *OuterLoopLatchBI =
dyn_cast<BranchInst>(OuterLoopLatch->getTerminator());
BranchInst *InnerLoopLatchBI =
dyn_cast<BranchInst>(InnerLoopLatch->getTerminator());
BranchInst *OuterLoopHeaderBI =
dyn_cast<BranchInst>(OuterLoopHeader->getTerminator());
BranchInst *InnerLoopHeaderBI =
dyn_cast<BranchInst>(InnerLoopHeader->getTerminator());
if (!OuterLoopPredecessor || !InnerLoopLatchPredecessor ||
!OuterLoopLatchBI || !InnerLoopLatchBI || !OuterLoopHeaderBI ||
!InnerLoopHeaderBI)
return false;
BranchInst *InnerLoopLatchPredecessorBI =
dyn_cast<BranchInst>(InnerLoopLatchPredecessor->getTerminator());
BranchInst *OuterLoopPredecessorBI =
dyn_cast<BranchInst>(OuterLoopPredecessor->getTerminator());
if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI)
return false;
BasicBlock *InnerLoopHeaderSuccessor = InnerLoopHeader->getUniqueSuccessor();
if (!InnerLoopHeaderSuccessor)
return false;
// Adjust Loop Preheader and headers
unsigned NumSucc = OuterLoopPredecessorBI->getNumSuccessors();
for (unsigned i = 0; i < NumSucc; ++i) {
if (OuterLoopPredecessorBI->getSuccessor(i) == OuterLoopPreHeader)
OuterLoopPredecessorBI->setSuccessor(i, InnerLoopPreHeader);
}
NumSucc = OuterLoopHeaderBI->getNumSuccessors();
for (unsigned i = 0; i < NumSucc; ++i) {
if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch)
OuterLoopHeaderBI->setSuccessor(i, LoopExit);
else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader)
OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSuccessor);
}
// Adjust reduction PHI's now that the incoming block has changed.
updateIncomingBlock(InnerLoopHeaderSuccessor, InnerLoopHeader,
OuterLoopHeader);
BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI);
InnerLoopHeaderBI->eraseFromParent();
// -------------Adjust loop latches-----------
if (InnerLoopLatchBI->getSuccessor(0) == InnerLoopHeader)
InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(1);
else
InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(0);
NumSucc = InnerLoopLatchPredecessorBI->getNumSuccessors();
for (unsigned i = 0; i < NumSucc; ++i) {
if (InnerLoopLatchPredecessorBI->getSuccessor(i) == InnerLoopLatch)
InnerLoopLatchPredecessorBI->setSuccessor(i, InnerLoopLatchSuccessor);
}
// Adjust PHI nodes in InnerLoopLatchSuccessor. Update all uses of PHI with
// the value and remove this PHI node from inner loop.
SmallVector<PHINode *, 8> LcssaVec;
for (auto I = InnerLoopLatchSuccessor->begin(); isa<PHINode>(I); ++I) {
PHINode *LcssaPhi = cast<PHINode>(I);
LcssaVec.push_back(LcssaPhi);
}
for (auto I = LcssaVec.begin(), E = LcssaVec.end(); I != E; ++I) {
PHINode *P = *I;
Value *Incoming = P->getIncomingValueForBlock(InnerLoopLatch);
P->replaceAllUsesWith(Incoming);
P->eraseFromParent();
}
if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopHeader)
OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(1);
else
OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(0);
if (InnerLoopLatchBI->getSuccessor(1) == InnerLoopLatchSuccessor)
InnerLoopLatchBI->setSuccessor(1, OuterLoopLatchSuccessor);
else
InnerLoopLatchBI->setSuccessor(0, OuterLoopLatchSuccessor);
//.........这里部分代码省略.........
示例4: InsertUnwindResumeCalls
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present
/// into calls to the appropriate _Unwind_Resume function.
bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
bool UsesNewEH = false;
SmallVector<ResumeInst*, 16> Resumes;
for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
TerminatorInst *TI = I->getTerminator();
if (ResumeInst *RI = dyn_cast<ResumeInst>(TI))
Resumes.push_back(RI);
else if (InvokeInst *II = dyn_cast<InvokeInst>(TI))
UsesNewEH = II->getUnwindDest()->isLandingPad();
}
if (Resumes.empty())
return UsesNewEH;
// Find the rewind function if we didn't already.
if (!RewindFunction) {
LLVMContext &Ctx = Resumes[0]->getContext();
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
Type::getInt8PtrTy(Ctx), false);
const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy);
}
// Create the basic block where the _Unwind_Resume call will live.
LLVMContext &Ctx = Fn.getContext();
unsigned ResumesSize = Resumes.size();
if (ResumesSize == 1) {
// Instead of creating a new BB and PHI node, just append the call to
// _Unwind_Resume to the end of the single resume block.
ResumeInst *RI = Resumes.front();
BasicBlock *UnwindBB = RI->getParent();
Value *ExnObj = GetExceptionObject(RI);
// Call the _Unwind_Resume function.
CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// We never expect _Unwind_Resume to return.
new UnreachableInst(Ctx, UnwindBB);
return true;
}
BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn);
PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesSize,
"exn.obj", UnwindBB);
// Extract the exception object from the ResumeInst and add it to the PHI node
// that feeds the _Unwind_Resume call.
for (SmallVectorImpl<ResumeInst*>::iterator
I = Resumes.begin(), E = Resumes.end(); I != E; ++I) {
ResumeInst *RI = *I;
BasicBlock *Parent = RI->getParent();
BranchInst::Create(UnwindBB, Parent);
Value *ExnObj = GetExceptionObject(RI);
PN->addIncoming(ExnObj, Parent);
++NumResumesLowered;
}
// Call the function.
CallInst *CI = CallInst::Create(RewindFunction, PN, "", UnwindBB);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// We never expect _Unwind_Resume to return.
new UnreachableInst(Ctx, UnwindBB);
return true;
}
示例5: EmitExceptionTable
unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
unsigned char* StartFunction,
unsigned char* EndFunction) const {
assert(MMI && "MachineModuleInfo not registered!");
// Map all labels and get rid of any dead landing pads.
MMI->TidyLandingPads(JCE->getLabelLocations());
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
if (PadInfos.empty()) return 0;
// Sort the landing pads in order of their type ids. This is used to fold
// duplicate actions.
SmallVector<const LandingPadInfo *, 64> LandingPads;
LandingPads.reserve(PadInfos.size());
for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
LandingPads.push_back(&PadInfos[i]);
std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
// Negative type ids index into FilterIds, positive type ids index into
// TypeInfos. The value written for a positive type id is just the type
// id itself. For a negative type id, however, the value written is the
// (negative) byte offset of the corresponding FilterIds entry. The byte
// offset is usually equal to the type id, because the FilterIds entries
// are written using a variable width encoding which outputs one byte per
// entry as long as the value written is not too large, but can differ.
// This kind of complication does not occur for positive type ids because
// type infos are output using a fixed width encoding.
// FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
SmallVector<int, 16> FilterOffsets;
FilterOffsets.reserve(FilterIds.size());
int Offset = -1;
for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
E = FilterIds.end(); I != E; ++I) {
FilterOffsets.push_back(Offset);
Offset -= MCAsmInfo::getULEB128Size(*I);
}
// Compute the actions table and gather the first action index for each
// landing pad site.
SmallVector<ActionEntry, 32> Actions;
SmallVector<unsigned, 64> FirstActions;
FirstActions.reserve(LandingPads.size());
int FirstAction = 0;
unsigned SizeActions = 0;
for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
const LandingPadInfo *LP = LandingPads[i];
const std::vector<int> &TypeIds = LP->TypeIds;
const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
unsigned SizeSiteActions = 0;
if (NumShared < TypeIds.size()) {
unsigned SizeAction = 0;
ActionEntry *PrevAction = 0;
if (NumShared) {
const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
assert(Actions.size());
PrevAction = &Actions.back();
SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
for (unsigned j = NumShared; j != SizePrevIds; ++j) {
SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
SizeAction += -PrevAction->NextAction;
PrevAction = PrevAction->Previous;
}
}
// Compute the actions.
for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
int TypeID = TypeIds[I];
assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
SizeSiteActions += SizeAction;
ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
Actions.push_back(Action);
PrevAction = &Actions.back();
}
// Record the first action of the landing pad site.
FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
} // else identical - re-use previous FirstAction
FirstActions.push_back(FirstAction);
// Compute this sites contribution to size.
SizeActions += SizeSiteActions;
}
// Compute the call-site table. Entries must be ordered by address.
SmallVector<CallSiteEntry, 64> CallSites;
//.........这里部分代码省略.........
示例6: lookupInModule
static void lookupInModule(ModuleDecl *module, ModuleDecl::AccessPathTy accessPath,
SmallVectorImpl<ValueDecl *> &decls,
ResolutionKind resolutionKind, bool canReturnEarly,
LazyResolver *typeResolver,
ModuleLookupCache &cache,
const DeclContext *moduleScopeContext,
bool respectAccessControl,
ArrayRef<ModuleDecl::ImportedModule> extraImports,
CallbackTy callback) {
assert(module);
assert(std::none_of(extraImports.begin(), extraImports.end(),
[](ModuleDecl::ImportedModule import) -> bool {
return !import.second;
}));
ModuleLookupCache::iterator iter;
bool isNew;
std::tie(iter, isNew) = cache.insert({{accessPath, module}, {}});
if (!isNew) {
decls.append(iter->second.begin(), iter->second.end());
return;
}
size_t initialCount = decls.size();
SmallVector<ValueDecl *, 4> localDecls;
callback(module, accessPath, localDecls);
if (respectAccessControl) {
auto newEndIter = std::remove_if(localDecls.begin(), localDecls.end(),
[=](ValueDecl *VD) {
return !VD->isAccessibleFrom(moduleScopeContext);
});
localDecls.erase(newEndIter, localDecls.end());
// This only applies to immediate imports of the top-level module.
if (moduleScopeContext && moduleScopeContext->getParentModule() != module)
moduleScopeContext = nullptr;
}
OverloadSetTy overloads;
resolutionKind = recordImportDecls(typeResolver, decls, localDecls,
overloads, resolutionKind);
bool foundDecls = decls.size() > initialCount;
if (!foundDecls || !canReturnEarly ||
resolutionKind == ResolutionKind::Overloadable) {
SmallVector<ModuleDecl::ImportedModule, 8> reexports;
module->getImportedModulesForLookup(reexports);
assert(std::none_of(reexports.begin(), reexports.end(),
[](ModuleDecl::ImportedModule import) -> bool {
return !import.second;
}));
reexports.append(extraImports.begin(), extraImports.end());
// Prefer scoped imports (import func Swift.max) to whole-module imports.
SmallVector<ValueDecl *, 8> unscopedValues;
SmallVector<ValueDecl *, 8> scopedValues;
for (auto next : reexports) {
// Filter any whole-module imports, and skip specific-decl imports if the
// import path doesn't match exactly.
ModuleDecl::AccessPathTy combinedAccessPath;
if (accessPath.empty()) {
combinedAccessPath = next.first;
} else if (!next.first.empty() &&
!ModuleDecl::isSameAccessPath(next.first, accessPath)) {
// If we ever allow importing non-top-level decls, it's possible the
// rule above isn't what we want.
assert(next.first.size() == 1 && "import of non-top-level decl");
continue;
} else {
combinedAccessPath = accessPath;
}
auto &resultSet = next.first.empty() ? unscopedValues : scopedValues;
lookupInModule<OverloadSetTy>(next.second, combinedAccessPath,
resultSet, resolutionKind, canReturnEarly,
typeResolver, cache, moduleScopeContext,
respectAccessControl, {}, callback);
}
// Add the results from scoped imports.
resolutionKind = recordImportDecls(typeResolver, decls, scopedValues,
overloads, resolutionKind);
// Add the results from unscoped imports.
foundDecls = decls.size() > initialCount;
if (!foundDecls || !canReturnEarly ||
resolutionKind == ResolutionKind::Overloadable) {
resolutionKind = recordImportDecls(typeResolver, decls, unscopedValues,
overloads, resolutionKind);
}
}
// Remove duplicated declarations.
llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
decls.erase(std::remove_if(decls.begin() + initialCount, decls.end(),
[&](ValueDecl *d) -> bool {
return !knownDecls.insert(d).second;
}),
decls.end());
//.........这里部分代码省略.........
示例7: emitExceptionTable
/// Emit landing pads and actions.
///
/// The general organization of the table is complex, but the basic concepts are
/// easy. First there is a header which describes the location and organization
/// of the three components that follow.
///
/// 1. The landing pad site information describes the range of code covered by
/// the try. In our case it's an accumulation of the ranges covered by the
/// invokes in the try. There is also a reference to the landing pad that
/// handles the exception once processed. Finally an index into the actions
/// table.
/// 2. The action table, in our case, is composed of pairs of type IDs and next
/// action offset. Starting with the action index from the landing pad
/// site, each type ID is checked for a match to the current exception. If
/// it matches then the exception and type id are passed on to the landing
/// pad. Otherwise the next action is looked up. This chain is terminated
/// with a next action of zero. If no type id is found then the frame is
/// unwound and handling continues.
/// 3. Type ID table contains references to all the C++ typeinfo for all
/// catches in the function. This tables is reverse indexed base 1.
void EHStreamer::emitExceptionTable() {
const std::vector<const GlobalValue *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
// Sort the landing pads in order of their type ids. This is used to fold
// duplicate actions.
SmallVector<const LandingPadInfo *, 64> LandingPads;
LandingPads.reserve(PadInfos.size());
for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
LandingPads.push_back(&PadInfos[i]);
// Order landing pads lexicographically by type id.
std::sort(LandingPads.begin(), LandingPads.end(),
[](const LandingPadInfo *L,
const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
// Compute the actions table and gather the first action index for each
// landing pad site.
SmallVector<ActionEntry, 32> Actions;
SmallVector<unsigned, 64> FirstActions;
unsigned SizeActions =
computeActionsTable(LandingPads, Actions, FirstActions);
// Compute the call-site table.
SmallVector<CallSiteEntry, 64> CallSites;
computeCallSiteTable(CallSites, LandingPads, FirstActions);
// Final tallies.
// Call sites.
bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
bool HaveTTData = IsSJLJ ? (!TypeInfos.empty() || !FilterIds.empty()) : true;
unsigned CallSiteTableLength;
if (IsSJLJ)
CallSiteTableLength = 0;
else {
unsigned SiteStartSize = 4; // dwarf::DW_EH_PE_udata4
unsigned SiteLengthSize = 4; // dwarf::DW_EH_PE_udata4
unsigned LandingPadSize = 4; // dwarf::DW_EH_PE_udata4
CallSiteTableLength =
CallSites.size() * (SiteStartSize + SiteLengthSize + LandingPadSize);
}
for (unsigned i = 0, e = CallSites.size(); i < e; ++i) {
CallSiteTableLength += getULEB128Size(CallSites[i].Action);
if (IsSJLJ)
CallSiteTableLength += getULEB128Size(i);
}
// Type infos.
const MCSection *LSDASection = Asm->getObjFileLowering().getLSDASection();
unsigned TTypeEncoding;
unsigned TypeFormatSize;
if (!HaveTTData) {
// For SjLj exceptions, if there is no TypeInfo, then we just explicitly say
// that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr
TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to
// typeinfo objects at the end of the LSDA. However, unless we're in static
// mode, this reference will require a relocation by the dynamic linker.
//
// Because of this, we have a couple of options:
//
// 1) If we are in -static mode, we can always use an absolute reference
// from the LSDA, because the static linker will resolve it.
//
// 2) Otherwise, if the LSDA section is writable, we can output the direct
// reference to the typeinfo and allow the dynamic linker to relocate
// it. Since it is in a writable section, the dynamic linker won't
// have a problem.
//
// 3) Finally, if we're in PIC mode and the LDSA section isn't writable,
//.........这里部分代码省略.........
示例8: emitImplicitValueConstructor
static void emitImplicitValueConstructor(SILGenFunction &gen,
ConstructorDecl *ctor) {
RegularLocation Loc(ctor);
Loc.markAutoGenerated();
// FIXME: Handle 'self' along with the other arguments.
auto *paramList = ctor->getParameterList(1);
auto selfTyCan = ctor->getImplicitSelfDecl()->getType()->getInOutObjectType();
SILType selfTy = gen.getLoweredType(selfTyCan);
// Emit the indirect return argument, if any.
SILValue resultSlot;
if (selfTy.isAddressOnly(gen.SGM.M)) {
auto &AC = gen.getASTContext();
auto VD = new (AC) ParamDecl(/*IsLet*/ false, SourceLoc(),
AC.getIdentifier("$return_value"),
SourceLoc(),
AC.getIdentifier("$return_value"), selfTyCan,
ctor);
resultSlot = new (gen.F.getModule()) SILArgument(gen.F.begin(), selfTy, VD);
}
// Emit the elementwise arguments.
SmallVector<RValue, 4> elements;
for (size_t i = 0, size = paramList->size(); i < size; ++i) {
auto ¶m = paramList->get(i);
elements.push_back(
emitImplicitValueConstructorArg(gen, Loc,
param.decl->getType()->getCanonicalType(),
ctor));
}
emitConstructorMetatypeArg(gen, ctor);
auto *decl = selfTy.getStructOrBoundGenericStruct();
assert(decl && "not a struct?!");
// If we have an indirect return slot, initialize it in-place.
if (resultSlot) {
auto elti = elements.begin(), eltEnd = elements.end();
for (VarDecl *field : decl->getStoredProperties()) {
auto fieldTy = selfTy.getFieldType(field, gen.SGM.M);
auto &fieldTL = gen.getTypeLowering(fieldTy);
SILValue slot = gen.B.createStructElementAddr(Loc, resultSlot, field,
fieldTL.getLoweredType().getAddressType());
InitializationPtr init(new KnownAddressInitialization(slot));
// An initialized 'let' property has a single value specified by the
// initializer - it doesn't come from an argument.
if (!field->isStatic() && field->isLet() &&
field->getParentInitializer()) {
assert(field->getType()->isEqual(field->getParentInitializer()
->getType()) && "Checked by sema");
// Cleanup after this initialization.
FullExpr scope(gen.Cleanups, field->getParentPatternBinding());
gen.emitRValue(field->getParentInitializer())
.forwardInto(gen, init.get(), Loc);
continue;
}
assert(elti != eltEnd && "number of args does not match number of fields");
(void)eltEnd;
std::move(*elti).forwardInto(gen, init.get(), Loc);
++elti;
}
gen.B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
gen.emitEmptyTuple(Loc));
return;
}
// Otherwise, build a struct value directly from the elements.
SmallVector<SILValue, 4> eltValues;
auto elti = elements.begin(), eltEnd = elements.end();
for (VarDecl *field : decl->getStoredProperties()) {
auto fieldTy = selfTy.getFieldType(field, gen.SGM.M);
SILValue v;
// An initialized 'let' property has a single value specified by the
// initializer - it doesn't come from an argument.
if (!field->isStatic() && field->isLet() && field->getParentInitializer()) {
// Cleanup after this initialization.
FullExpr scope(gen.Cleanups, field->getParentPatternBinding());
v = gen.emitRValue(field->getParentInitializer())
.forwardAsSingleStorageValue(gen, fieldTy, Loc);
} else {
assert(elti != eltEnd && "number of args does not match number of fields");
(void)eltEnd;
v = std::move(*elti).forwardAsSingleStorageValue(gen, fieldTy, Loc);
++elti;
}
eltValues.push_back(v);
}
SILValue selfValue = gen.B.createStruct(Loc, selfTy, eltValues);
gen.B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc),
selfValue);
//.........这里部分代码省略.........
示例9: AddReductionVar
//.........这里部分代码省略.........
// A reduction operation must only have one use of the reduction value.
if (!IsAPhi && Kind != RK_IntegerMinMax && Kind != RK_FloatMinMax &&
hasMultipleUsesOf(Cur, VisitedInsts))
return false;
// All inputs to a PHI node must be a reduction value.
if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
return false;
if (Kind == RK_IntegerMinMax &&
(isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
++NumCmpSelectPatternInst;
if (Kind == RK_FloatMinMax && (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
++NumCmpSelectPatternInst;
// Check whether we found a reduction operator.
FoundReduxOp |= !IsAPhi && Cur != Start;
// Process users of current instruction. Push non-PHI nodes after PHI nodes
// onto the stack. This way we are going to have seen all inputs to PHI
// nodes once we get to them.
SmallVector<Instruction *, 8> NonPHIs;
SmallVector<Instruction *, 8> PHIs;
for (User *U : Cur->users()) {
Instruction *UI = cast<Instruction>(U);
// Check if we found the exit user.
BasicBlock *Parent = UI->getParent();
if (!TheLoop->contains(Parent)) {
// Exit if you find multiple outside users or if the header phi node is
// being used. In this case the user uses the value of the previous
// iteration, in which case we would loose "VF-1" iterations of the
// reduction operation if we vectorize.
if (ExitInstruction != nullptr || Cur == Phi)
return false;
// The instruction used by an outside user must be the last instruction
// before we feed back to the reduction phi. Otherwise, we loose VF-1
// operations on the value.
if (!is_contained(Phi->operands(), Cur))
return false;
ExitInstruction = Cur;
continue;
}
// Process instructions only once (termination). Each reduction cycle
// value must only be used once, except by phi nodes and min/max
// reductions which are represented as a cmp followed by a select.
InstDesc IgnoredVal(false, nullptr);
if (VisitedInsts.insert(UI).second) {
if (isa<PHINode>(UI))
PHIs.push_back(UI);
else
NonPHIs.push_back(UI);
} else if (!isa<PHINode>(UI) &&
((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
!isa<SelectInst>(UI)) ||
!isMinMaxSelectCmpPattern(UI, IgnoredVal).isRecurrence()))
return false;
// Remember that we completed the cycle.
if (UI == Phi)
FoundStartPHI = true;
}
Worklist.append(PHIs.begin(), PHIs.end());
Worklist.append(NonPHIs.begin(), NonPHIs.end());
}
// This means we have seen one but not the other instruction of the
// pattern or more than just a select and cmp.
if ((Kind == RK_IntegerMinMax || Kind == RK_FloatMinMax) &&
NumCmpSelectPatternInst != 2)
return false;
if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
return false;
// If we think Phi may have been type-promoted, we also need to ensure that
// all source operands of the reduction are either SExtInsts or ZEstInsts. If
// so, we will be able to evaluate the reduction in the narrower bit width.
if (Start != Phi)
if (!getSourceExtensionKind(Start, ExitInstruction, RecurrenceType,
IsSigned, VisitedInsts, CastInsts))
return false;
// We found a reduction var if we have reached the original phi node and we
// only have a single instruction with out-of-loop users.
// The ExitInstruction(Instruction which is allowed to have out-of-loop users)
// is saved as part of the RecurrenceDescriptor.
// Save the description of this reduction variable.
RecurrenceDescriptor RD(
RdxStart, ExitInstruction, Kind, ReduxDesc.getMinMaxKind(),
ReduxDesc.getUnsafeAlgebraInst(), RecurrenceType, IsSigned, CastInsts);
RedDes = RD;
return true;
}
示例10: main
int main(int argc_, const char **argv_) {
llvm::sys::PrintStackTraceOnErrorSignal();
llvm::PrettyStackTraceProgram X(argc_, argv_);
std::set<std::string> SavedStrings;
SmallVector<const char*, 256> argv;
ExpandArgv(argc_, argv_, argv, SavedStrings);
// Handle -cc1 integrated tools.
if (argv.size() > 1 && StringRef(argv[1]).startswith("-cc1")) {
StringRef Tool = argv[1] + 4;
if (Tool == "")
return cc1_main(argv.data()+2, argv.data()+argv.size(), argv[0],
(void*) (intptr_t) GetExecutablePath);
if (Tool == "as")
return cc1as_main(argv.data()+2, argv.data()+argv.size(), argv[0],
(void*) (intptr_t) GetExecutablePath);
// Reject unknown tools.
llvm::errs() << "error: unknown integrated tool '" << Tool << "'\n";
return 1;
}
bool CanonicalPrefixes = true;
for (int i = 1, size = argv.size(); i < size; ++i) {
if (StringRef(argv[i]) == "-no-canonical-prefixes") {
CanonicalPrefixes = false;
break;
}
}
llvm::sys::Path Path = GetExecutablePath(argv[0], CanonicalPrefixes);
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions;
{
// Note that ParseDiagnosticArgs() uses the cc1 option table.
OwningPtr<OptTable> CC1Opts(createDriverOptTable());
unsigned MissingArgIndex, MissingArgCount;
OwningPtr<InputArgList> Args(CC1Opts->ParseArgs(argv.begin()+1, argv.end(),
MissingArgIndex, MissingArgCount));
// We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
// Any errors that would be diagnosed here will also be diagnosed later,
// when the DiagnosticsEngine actually exists.
(void) ParseDiagnosticArgs(*DiagOpts, *Args);
}
// Now we can create the DiagnosticsEngine with a properly-filled-out
// DiagnosticOptions instance.
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);
ProcessWarningOptions(Diags, *DiagOpts);
#ifdef CLANG_IS_PRODUCTION
const bool IsProduction = true;
#else
const bool IsProduction = false;
#endif
Driver TheDriver(Path.str(), llvm::sys::getDefaultTargetTriple(),
"a.out", IsProduction, Diags);
// Attempt to find the original path used to invoke the driver, to determine
// the installed path. We do this manually, because we want to support that
// path being a symlink.
{
SmallString<128> InstalledPath(argv[0]);
// Do a PATH lookup, if there are no directory components.
if (llvm::sys::path::filename(InstalledPath) == InstalledPath) {
llvm::sys::Path Tmp = llvm::sys::Program::FindProgramByName(
llvm::sys::path::filename(InstalledPath.str()));
if (!Tmp.empty())
InstalledPath = Tmp.str();
}
llvm::sys::fs::make_absolute(InstalledPath);
InstalledPath = llvm::sys::path::parent_path(InstalledPath);
bool exists;
if (!llvm::sys::fs::exists(InstalledPath.str(), exists) && exists)
TheDriver.setInstalledDir(InstalledPath);
}
llvm::InitializeAllTargets();
ParseProgName(argv, SavedStrings, TheDriver);
// Handle CC_PRINT_OPTIONS and CC_PRINT_OPTIONS_FILE.
TheDriver.CCPrintOptions = !!::getenv("CC_PRINT_OPTIONS");
if (TheDriver.CCPrintOptions)
TheDriver.CCPrintOptionsFilename = ::getenv("CC_PRINT_OPTIONS_FILE");
// Handle CC_PRINT_HEADERS and CC_PRINT_HEADERS_FILE.
TheDriver.CCPrintHeaders = !!::getenv("CC_PRINT_HEADERS");
if (TheDriver.CCPrintHeaders)
TheDriver.CCPrintHeadersFilename = ::getenv("CC_PRINT_HEADERS_FILE");
// Handle CC_LOG_DIAGNOSTICS and CC_LOG_DIAGNOSTICS_FILE.
TheDriver.CCLogDiagnostics = !!::getenv("CC_LOG_DIAGNOSTICS");
//.........这里部分代码省略.........
示例11: if
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
unsigned SrcReg, DstReg, SubIdx;
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
return false;
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
if (++UI == MRI->use_nodbg_end())
// No other uses.
return false;
// The source has other uses. See if we can replace the other uses with use of
// the result of the extension.
SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
UI = MRI->use_nodbg_begin(DstReg);
for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
UI != UE; ++UI)
ReachedBBs.insert(UI->getParent());
// Uses that are in the same BB of uses of the result of the instruction.
SmallVector<MachineOperand*, 8> Uses;
// Uses that the result of the instruction can reach.
SmallVector<MachineOperand*, 8> ExtendedUses;
bool ExtendLife = true;
UI = MRI->use_nodbg_begin(SrcReg);
for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
UI != UE; ++UI) {
MachineOperand &UseMO = UI.getOperand();
MachineInstr *UseMI = &*UI;
if (UseMI == MI)
continue;
if (UseMI->isPHI()) {
ExtendLife = false;
continue;
}
// It's an error to translate this:
//
// %reg1025 = <sext> %reg1024
// ...
// %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
//
// into this:
//
// %reg1025 = <sext> %reg1024
// ...
// %reg1027 = COPY %reg1025:4
// %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
//
// The problem here is that SUBREG_TO_REG is there to assert that an
// implicit zext occurs. It doesn't insert a zext instruction. If we allow
// the COPY here, it will give us the value after the <sext>, not the
// original value of %reg1024 before <sext>.
if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
continue;
MachineBasicBlock *UseMBB = UseMI->getParent();
if (UseMBB == MBB) {
// Local uses that come after the extension.
if (!LocalMIs.count(UseMI))
Uses.push_back(&UseMO);
} else if (ReachedBBs.count(UseMBB)) {
// Non-local uses where the result of the extension is used. Always
// replace these unless it's a PHI.
Uses.push_back(&UseMO);
} else if (Aggressive && DT->dominates(MBB, UseMBB)) {
// We may want to extend the live range of the extension result in order
// to replace these uses.
ExtendedUses.push_back(&UseMO);
} else {
// Both will be live out of the def MBB anyway. Don't extend live range of
// the extension result.
ExtendLife = false;
break;
}
}
if (ExtendLife && !ExtendedUses.empty())
// Extend the liveness of the extension result.
std::copy(ExtendedUses.begin(), ExtendedUses.end(),
std::back_inserter(Uses));
// Now replace all uses.
//.........这里部分代码省略.........
示例12: ComputeLiveInBlocks
/// DetermineInsertionPoint - At this point, we're committed to promoting the
/// alloca using IDF's, and the standard SSA construction algorithm. Determine
/// which blocks need phi nodes and see if we can optimize out some work by
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
AllocaInfo &Info) {
// Unique the set of defining blocks for efficient lookup.
SmallPtrSet<BasicBlock*, 32> DefBlocks;
DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
// Determine which blocks the value is live in. These are blocks which lead
// to uses.
SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
// Use a priority queue keyed on dominator tree level so that inserted nodes
// are handled from the bottom of the dominator tree upwards.
typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
DomTreeNodeCompare> IDFPriorityQueue;
IDFPriorityQueue PQ;
for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(),
E = DefBlocks.end(); I != E; ++I) {
if (DomTreeNode *Node = DT.getNode(*I))
PQ.push(std::make_pair(Node, DomLevels[Node]));
}
SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks;
SmallPtrSet<DomTreeNode*, 32> Visited;
SmallVector<DomTreeNode*, 32> Worklist;
while (!PQ.empty()) {
DomTreeNodePair RootPair = PQ.top();
PQ.pop();
DomTreeNode *Root = RootPair.first;
unsigned RootLevel = RootPair.second;
// Walk all dominator tree children of Root, inspecting their CFG edges with
// targets elsewhere on the dominator tree. Only targets whose level is at
// most Root's level are added to the iterated dominance frontier of the
// definition set.
Worklist.clear();
Worklist.push_back(Root);
while (!Worklist.empty()) {
DomTreeNode *Node = Worklist.pop_back_val();
BasicBlock *BB = Node->getBlock();
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
++SI) {
DomTreeNode *SuccNode = DT.getNode(*SI);
// Quickly skip all CFG edges that are also dominator tree edges instead
// of catching them below.
if (SuccNode->getIDom() == Node)
continue;
unsigned SuccLevel = DomLevels[SuccNode];
if (SuccLevel > RootLevel)
continue;
if (!Visited.insert(SuccNode))
continue;
BasicBlock *SuccBB = SuccNode->getBlock();
if (!LiveInBlocks.count(SuccBB))
continue;
DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
if (!DefBlocks.count(SuccBB))
PQ.push(std::make_pair(SuccNode, SuccLevel));
}
for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
++CI) {
if (!Visited.count(*CI))
Worklist.push_back(*CI);
}
}
}
if (DFBlocks.size() > 1)
std::sort(DFBlocks.begin(), DFBlocks.end());
unsigned CurrentVersion = 0;
for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
示例13: UnrollLoop
//.........这里部分代码省略.........
/// runtime-unroll the loop if computing RuntimeTripCount will be expensive and
/// AllowExpensiveTripCount is false.
///
/// The LoopInfo Analysis that is passed will be kept consistent.
///
/// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
/// DominatorTree if they are non-null.
bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
bool AllowRuntime, bool AllowExpensiveTripCount,
unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
bool PreserveLCSSA) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
return false;
}
BasicBlock *LatchBlock = L->getLoopLatch();
if (!LatchBlock) {
DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
return false;
}
// Loops with indirectbr cannot be cloned.
if (!L->isSafeToClone()) {
DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
return false;
}
BasicBlock *Header = L->getHeader();
BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
if (!BI || BI->isUnconditional()) {
// The loop-rotate pass can be helpful to avoid this in many cases.
DEBUG(dbgs() <<
" Can't unroll; loop not terminated by a conditional branch.\n");
return false;
}
if (Header->hasAddressTaken()) {
// The loop-rotate pass can be helpful to avoid this in many cases.
DEBUG(dbgs() <<
" Won't unroll loop: address of header block is taken.\n");
return false;
}
if (TripCount != 0)
DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
if (TripMultiple != 1)
DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
// Effectively "DCE" unrolled iterations that are beyond the tripcount
// and will never be executed.
if (TripCount != 0 && Count > TripCount)
Count = TripCount;
// Don't enter the unroll code if there is nothing to do. This way we don't
// need to support "partial unrolling by 1".
if (TripCount == 0 && Count < 2)
return false;
assert(Count > 0);
assert(TripMultiple > 0);
assert(TripCount == 0 || TripCount % TripMultiple == 0);
// Are we eliminating the loop control altogether?
bool CompletelyUnroll = Count == TripCount;
SmallVector<BasicBlock *, 4> ExitBlocks;
L->getExitBlocks(ExitBlocks);
std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks();
// Go through all exits of L and see if there are any phi-nodes there. We just
// conservatively assume that they're inserted to preserve LCSSA form, which
// means that complete unrolling might break this form. We need to either fix
// it in-place after the transformation, or entirely rebuild LCSSA. TODO: For
// now we just recompute LCSSA for the outer loop, but it should be possible
// to fix it in-place.
bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll &&
std::any_of(ExitBlocks.begin(), ExitBlocks.end(),
[&](BasicBlock *BB) { return isa<PHINode>(BB->begin()); });
// We assume a run-time trip count if the compiler cannot
// figure out the loop trip count and the unroll-runtime
// flag is specified.
bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
// Loops containing convergent instructions must have a count that divides
// their TripMultiple.
DEBUG(
{
bool HasConvergent = false;
for (auto &BB : L->blocks())
for (auto &I : *BB)
if (auto CS = CallSite(&I))
HasConvergent |= CS.isConvergent();
assert((!HasConvergent || TripMultiple % Count == 0) &&
"Unroll count must divide trip multiple if loop contains a "
"convergent operation.");
});
示例14: LowerStatepoint
//.........这里部分代码省略.........
DAG.getNode(ISD::GC_TRANSITION_START, getCurSDLoc(), NodeTys, TSOps);
Chain = GCTransitionStart.getValue(0);
Glue = GCTransitionStart.getValue(1);
}
// TODO: Currently, all of these operands are being marked as read/write in
// PrologEpilougeInserter.cpp, we should special case the VMState arguments
// and flags to be read-only.
SmallVector<SDValue, 40> Ops;
// Add the <id> and <numBytes> constants.
Ops.push_back(DAG.getTargetConstant(ISP.getID(), getCurSDLoc(), MVT::i64));
Ops.push_back(
DAG.getTargetConstant(ISP.getNumPatchBytes(), getCurSDLoc(), MVT::i32));
// Calculate and push starting position of vmstate arguments
// Get number of arguments incoming directly into call node
unsigned NumCallRegArgs =
CallNode->getNumOperands() - (CallHasIncomingGlue ? 4 : 3);
Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, getCurSDLoc(), MVT::i32));
// Add call target
SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
Ops.push_back(CallTarget);
// Add call arguments
// Get position of register mask in the call
SDNode::op_iterator RegMaskIt;
if (CallHasIncomingGlue)
RegMaskIt = CallNode->op_end() - 2;
else
RegMaskIt = CallNode->op_end() - 1;
Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);
// Add a constant argument for the calling convention
pushStackMapConstant(Ops, *this, CS.getCallingConv());
// Add a constant argument for the flags
uint64_t Flags = ISP.getFlags();
assert(
((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0)
&& "unknown flag used");
pushStackMapConstant(Ops, *this, Flags);
// Insert all vmstate and gcstate arguments
Ops.insert(Ops.end(), LoweredMetaArgs.begin(), LoweredMetaArgs.end());
// Add register mask from call node
Ops.push_back(*RegMaskIt);
// Add chain
Ops.push_back(Chain);
// Same for the glue, but we add it only if original call had it
if (Glue.getNode())
Ops.push_back(Glue);
// Compute return values. Provide a glue output since we consume one as
// input. This allows someone else to chain off us as needed.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDNode *StatepointMCNode =
DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);
SDNode *SinkNode = StatepointMCNode;
示例15: visitSoftInstr
// A soft instruction can be changed to work in other domains given by mask.
void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// Bitmask of available domains for this instruction after taking collapsed
// operands into account.
unsigned available = mask;
// Scan the explicit use operands for incoming domains.
SmallVector<int, 4> used;
if (LiveRegs)
for (unsigned i = mi->getDesc().getNumDefs(),
e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
for (int rx : regIndices(mo.getReg())) {
DomainValue *dv = LiveRegs[rx].Value;
if (dv == nullptr)
continue;
// Bitmask of domains that dv and available have in common.
unsigned common = dv->getCommonDomains(available);
// Is it possible to use this collapsed register for free?
if (dv->isCollapsed()) {
// Restrict available domains to the ones in common with the operand.
// If there are no common domains, we must pay the cross-domain
// penalty for this operand.
if (common) available = common;
} else if (common)
// Open DomainValue is compatible, save it for merging.
used.push_back(rx);
else
// Open DomainValue is not compatible with instruction. It is useless
// now.
kill(rx);
}
}
// If the collapsed operands force a single domain, propagate the collapse.
if (isPowerOf2_32(available)) {
unsigned domain = countTrailingZeros(available);
TII->setExecutionDomain(*mi, domain);
visitHardInstr(mi, domain);
return;
}
// Kill off any remaining uses that don't match available, and build a list of
// incoming DomainValues that we want to merge.
SmallVector<LiveReg, 4> Regs;
for (SmallVectorImpl<int>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
int rx = *i;
assert(LiveRegs && "no space allocated for live registers");
const LiveReg &LR = LiveRegs[rx];
// This useless DomainValue could have been missed above.
if (!LR.Value->getCommonDomains(available)) {
kill(rx);
continue;
}
// Sorted insertion.
bool Inserted = false;
for (SmallVectorImpl<LiveReg>::iterator i = Regs.begin(), e = Regs.end();
i != e && !Inserted; ++i) {
if (LR.Def < i->Def) {
Inserted = true;
Regs.insert(i, LR);
}
}
if (!Inserted)
Regs.push_back(LR);
}
// doms are now sorted in order of appearance. Try to merge them all, giving
// priority to the latest ones.
DomainValue *dv = nullptr;
while (!Regs.empty()) {
if (!dv) {
dv = Regs.pop_back_val().Value;
// Force the first dv to match the current instruction.
dv->AvailableDomains = dv->getCommonDomains(available);
assert(dv->AvailableDomains && "Domain should have been filtered");
continue;
}
DomainValue *Latest = Regs.pop_back_val().Value;
// Skip already merged values.
if (Latest == dv || Latest->Next)
continue;
if (merge(dv, Latest))
continue;
// If latest didn't merge, it is useless now. Kill all registers using it.
for (int i : used) {
assert(LiveRegs && "no space allocated for live registers");
if (LiveRegs[i].Value == Latest)
kill(i);
}
}
// dv is the DomainValue we are going to use for this instruction.
if (!dv) {
dv = alloc();
dv->AvailableDomains = available;
}
//.........这里部分代码省略.........