本文整理汇总了C++中SmallVectorImpl::end方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallVectorImpl::end方法的具体用法?C++ SmallVectorImpl::end怎么用?C++ SmallVectorImpl::end使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallVectorImpl
的用法示例。
在下文中一共展示了SmallVectorImpl::end方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getAllConformances
void ConformanceLookupTable::getAllConformances(
NominalTypeDecl *nominal,
LazyResolver *resolver,
bool sorted,
SmallVectorImpl<ProtocolConformance *> &scratch) {
// We need to expand and resolve all conformances to enumerate them.
updateLookupTable(nominal, ConformanceStage::Resolved, resolver);
// Gather all of the protocols.
for (const auto &conformance : AllConformances) {
for (auto entry : conformance.second) {
if (auto conformance = getConformance(nominal, resolver, entry))
scratch.push_back(conformance);
}
}
// If requested, sort the results.
if (sorted) {
llvm::array_pod_sort(scratch.begin(), scratch.end(),
&compareProtocolConformances);
}
}
示例2: EraseUnwantedCUDAMatches
void Sema::EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
if (Matches.size() <= 1)
return;
using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
// Gets the CUDA function preference for a call from Caller to Match.
auto GetCFP = [&](const Pair &Match) {
return IdentifyCUDAPreference(Caller, Match.second);
};
// Find the best call preference among the functions in Matches.
CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
Matches.begin(), Matches.end(),
[&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
// Erase all functions with lower priority.
llvm::erase_if(Matches,
[&](const Pair &Match) { return GetCFP(Match) < BestCFP; });
}
示例3: ComputeValueVTs
void AArch64CallLowering::splitToValueTypes(
const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL, MachineRegisterInfo &MRI,
const SplitArgTy &PerformArgSplit) const {
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
LLVMContext &Ctx = OrigArg.Ty->getContext();
SmallVector<EVT, 4> SplitVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
if (SplitVTs.size() == 1) {
// No splitting to do, but we want to replace the original type (e.g. [1 x
// double] -> double).
SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
OrigArg.Flags, OrigArg.IsFixed);
return;
}
unsigned FirstRegIdx = SplitArgs.size();
for (auto SplitVT : SplitVTs) {
// FIXME: set split flags if they're actually used (e.g. i128 on AAPCS).
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
SplitArgs.push_back(
ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy,
OrigArg.Flags, OrigArg.IsFixed});
}
SmallVector<uint64_t, 4> BitOffsets;
for (auto Offset : Offsets)
BitOffsets.push_back(Offset * 8);
SmallVector<unsigned, 8> SplitRegs;
for (auto I = &SplitArgs[FirstRegIdx]; I != SplitArgs.end(); ++I)
SplitRegs.push_back(I->Reg);
PerformArgSplit(SplitRegs, BitOffsets);
}
示例4: getAllMetadataImpl
void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,
MDNode*> > &Result) const {
Result.clear();
// Handle 'dbg' as a special case since it is not stored in the hash table.
if (!DbgLoc.isUnknown()) {
Result.push_back(std::make_pair((unsigned)LLVMContext::MD_dbg,
DbgLoc.getAsMDNode(getContext())));
if (!hasMetadataHashEntry()) return;
}
assert(hasMetadataHashEntry() &&
getContext().pImpl->MetadataStore.count(this) &&
"Shouldn't have called this");
const LLVMContextImpl::MDMapTy &Info =
getContext().pImpl->MetadataStore.find(this)->second;
assert(!Info.empty() && "Shouldn't have called this");
Result.append(Info.begin(), Info.end());
// Sort the resulting array so it is stable.
if (Result.size() > 1)
array_pod_sort(Result.begin(), Result.end());
}
示例5: OutputPossibleOverflows
// OutputPossibleOverflows - We've found a possible overflow earlier,
// now check whether Body might contain a comparison which might be
// preventing the overflow.
// This doesn't do flow analysis, range analysis, or points-to analysis; it's
// just a dumb "is there a comparison" scan. The aim here is to
// detect the most blatent cases of overflow and educate the
// programmer.
void MallocOverflowSecurityChecker::OutputPossibleOverflows(
SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
// By far the most common case: nothing to check.
if (PossibleMallocOverflows.empty())
return;
// Delete any possible overflows which have a comparison.
CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
// Output warnings for all overflows that are left.
for (CheckOverflowOps::theVecType::iterator
i = PossibleMallocOverflows.begin(),
e = PossibleMallocOverflows.end();
i != e;
++i) {
SourceRange R = i->mulop->getSourceRange();
BR.EmitBasicReport(D, "malloc() size overflow", categories::UnixAPI,
"the computation of the size of the memory allocation may overflow",
PathDiagnosticLocation::createOperatorLoc(i->mulop,
BR.getSourceManager()), &R, 1);
}
}
示例6: SharedTypeIds
/// ComputeActionsTable - Compute the actions table and gather the first action
/// index for each landing pad site.
unsigned DwarfException::
ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
SmallVectorImpl<ActionEntry> &Actions,
SmallVectorImpl<unsigned> &FirstActions) {
// The action table follows the call-site table in the LSDA. The individual
// records are of two types:
//
// * Catch clause
// * Exception specification
//
// The two record kinds have the same format, with only small differences.
// They are distinguished by the "switch value" field: Catch clauses
// (TypeInfos) have strictly positive switch values, and exception
// specifications (FilterIds) have strictly negative switch values. Value 0
// indicates a catch-all clause.
//
// Negative type IDs index into FilterIds. Positive type IDs index into
// TypeInfos. The value written for a positive type ID is just the type ID
// itself. For a negative type ID, however, the value written is the
// (negative) byte offset of the corresponding FilterIds entry. The byte
// offset is usually equal to the type ID (because the FilterIds entries are
// written using a variable width encoding, which outputs one byte per entry
// as long as the value written is not too large) but can differ. This kind
// of complication does not occur for positive type IDs because type infos are
// output using a fixed width encoding. FilterOffsets[i] holds the byte
// offset corresponding to FilterIds[i].
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
SmallVector<int, 16> FilterOffsets;
FilterOffsets.reserve(FilterIds.size());
int Offset = -1;
for (std::vector<unsigned>::const_iterator
I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
FilterOffsets.push_back(Offset);
Offset -= MCAsmInfo::getULEB128Size(*I);
}
FirstActions.reserve(LandingPads.size());
int FirstAction = 0;
unsigned SizeActions = 0;
const LandingPadInfo *PrevLPI = 0;
for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
const LandingPadInfo *LPI = *I;
const std::vector<int> &TypeIds = LPI->TypeIds;
unsigned NumShared = PrevLPI ? SharedTypeIds(LPI, PrevLPI) : 0;
unsigned SizeSiteActions = 0;
if (NumShared < TypeIds.size()) {
unsigned SizeAction = 0;
unsigned PrevAction = (unsigned)-1;
if (NumShared) {
unsigned SizePrevIds = PrevLPI->TypeIds.size();
assert(Actions.size());
PrevAction = Actions.size() - 1;
SizeAction =
MCAsmInfo::getSLEB128Size(Actions[PrevAction].NextAction) +
MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
for (unsigned j = NumShared; j != SizePrevIds; ++j) {
assert(PrevAction != (unsigned)-1 && "PrevAction is invalid!");
SizeAction -=
MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
SizeAction += -Actions[PrevAction].NextAction;
PrevAction = Actions[PrevAction].Previous;
}
}
// Compute the actions.
for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
int TypeID = TypeIds[J];
assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
SizeSiteActions += SizeAction;
ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
Actions.push_back(Action);
PrevAction = Actions.size() - 1;
}
// Record the first action of the landing pad site.
FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
} // else identical - re-use previous FirstAction
// Information used when created the call-site table. The action record
// field of the call site record is the offset of the first associated
// action record, relative to the start of the actions table. This value is
// biased by 1 (1 indicating the start of the actions table), and 0
// indicates that there are no actions.
//.........这里部分代码省略.........
示例7: populateExternalRelations
static void populateExternalRelations(
SmallVectorImpl<ExternalRelation> &ExtRelations, const Function &Fn,
const SmallVectorImpl<Value *> &RetVals, const ReachabilitySet &ReachSet) {
// If a function only returns one of its argument X, then X will be both an
// argument and a return value at the same time. This is an edge case that
// needs special handling here.
for (const auto &Arg : Fn.args()) {
if (is_contained(RetVals, &Arg)) {
auto ArgVal = InterfaceValue{Arg.getArgNo() + 1, 0};
auto RetVal = InterfaceValue{0, 0};
ExtRelations.push_back(ExternalRelation{ArgVal, RetVal, 0});
}
}
// Below is the core summary construction logic.
// A naive solution of adding only the value aliases that are parameters or
// return values in ReachSet to the summary won't work: It is possible that a
// parameter P is written into an intermediate value I, and the function
// subsequently returns *I. In that case, *I is does not value alias anything
// in ReachSet, and the naive solution will miss a summary edge from (P, 1) to
// (I, 1).
// To account for the aforementioned case, we need to check each non-parameter
// and non-return value for the possibility of acting as an intermediate.
// 'ValueMap' here records, for each value, which InterfaceValues read from or
// write into it. If both the read list and the write list of a given value
// are non-empty, we know that a particular value is an intermidate and we
// need to add summary edges from the writes to the reads.
DenseMap<Value *, ValueSummary> ValueMap;
for (const auto &OuterMapping : ReachSet.value_mappings()) {
if (auto Dst = getInterfaceValue(OuterMapping.first, RetVals)) {
for (const auto &InnerMapping : OuterMapping.second) {
// If Src is a param/return value, we get a same-level assignment.
if (auto Src = getInterfaceValue(InnerMapping.first, RetVals)) {
// This may happen if both Dst and Src are return values
if (*Dst == *Src)
continue;
if (hasReadOnlyState(InnerMapping.second))
ExtRelations.push_back(ExternalRelation{*Dst, *Src, UnknownOffset});
// No need to check for WriteOnly state, since ReachSet is symmetric
} else {
// If Src is not a param/return, add it to ValueMap
auto SrcIVal = InnerMapping.first;
if (hasReadOnlyState(InnerMapping.second))
ValueMap[SrcIVal.Val].FromRecords.push_back(
ValueSummary::Record{*Dst, SrcIVal.DerefLevel});
if (hasWriteOnlyState(InnerMapping.second))
ValueMap[SrcIVal.Val].ToRecords.push_back(
ValueSummary::Record{*Dst, SrcIVal.DerefLevel});
}
}
}
}
for (const auto &Mapping : ValueMap) {
for (const auto &FromRecord : Mapping.second.FromRecords) {
for (const auto &ToRecord : Mapping.second.ToRecords) {
auto ToLevel = ToRecord.DerefLevel;
auto FromLevel = FromRecord.DerefLevel;
// Same-level assignments should have already been processed by now
if (ToLevel == FromLevel)
continue;
auto SrcIndex = FromRecord.IValue.Index;
auto SrcLevel = FromRecord.IValue.DerefLevel;
auto DstIndex = ToRecord.IValue.Index;
auto DstLevel = ToRecord.IValue.DerefLevel;
if (ToLevel > FromLevel)
SrcLevel += ToLevel - FromLevel;
else
DstLevel += FromLevel - ToLevel;
ExtRelations.push_back(ExternalRelation{
InterfaceValue{SrcIndex, SrcLevel},
InterfaceValue{DstIndex, DstLevel}, UnknownOffset});
}
}
}
// Remove duplicates in ExtRelations
llvm::sort(ExtRelations.begin(), ExtRelations.end());
ExtRelations.erase(std::unique(ExtRelations.begin(), ExtRelations.end()),
ExtRelations.end());
}
示例8: doMerge
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
// FIXME: Find better heuristics
std::stable_sort(Globals.begin(), Globals.end(),
[this](const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
});
Type *Int32Ty = Type::getInt32Ty(M.getContext());
assert(Globals.size() > 1);
// FIXME: This simple solution merges globals all together as maximum as
// possible. However, with this solution it would be hard to remove dead
// global symbols at link-time. An alternative solution could be checking
// global symbols references function by function, and make the symbols
// being referred in the same function merged and we would probably need
// to introduce heuristic algorithm to solve the merge conflict from
// different functions.
for (size_t i = 0, e = Globals.size(); i != e; ) {
size_t j = 0;
uint64_t MergedSize = 0;
std::vector<Type*> Tys;
std::vector<Constant*> Inits;
bool HasExternal = false;
GlobalVariable *TheFirstExternal = 0;
for (j = i; j != e; ++j) {
Type *Ty = Globals[j]->getType()->getElementType();
MergedSize += DL->getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
break;
}
Tys.push_back(Ty);
Inits.push_back(Globals[j]->getInitializer());
if (Globals[j]->hasExternalLinkage() && !HasExternal) {
HasExternal = true;
TheFirstExternal = Globals[j];
}
}
// If merged variables doesn't have external linkage, we needn't to expose
// the symbol after merging.
GlobalValue::LinkageTypes Linkage = HasExternal
? GlobalValue::ExternalLinkage
: GlobalValue::InternalLinkage;
StructType *MergedTy = StructType::get(M.getContext(), Tys);
Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
// If merged variables have external linkage, we use symbol name of the
// first variable merged as the suffix of global symbol name. This would
// be able to avoid the link-time naming conflict for globalm symbols.
GlobalVariable *MergedGV = new GlobalVariable(
M, MergedTy, isConst, Linkage, MergedInit,
HasExternal ? "_MergedGlobals_" + TheFirstExternal->getName()
: "_MergedGlobals",
nullptr, GlobalVariable::NotThreadLocal, AddrSpace);
for (size_t k = i; k < j; ++k) {
GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
std::string Name = Globals[k]->getName();
Constant *Idx[2] = {
ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, k-i)
};
Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
if (Linkage != GlobalValue::InternalLinkage) {
// Generate a new alias...
auto *PTy = cast<PointerType>(GEP->getType());
GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
Linkage, Name, GEP, &M);
}
NumMerged++;
}
i = j;
}
return true;
}
示例9: find
bool
LoadAndStorePromoter::isInstInList(Instruction *I,
const SmallVectorImpl<Instruction*> &Insts)
const {
return std::find(Insts.begin(), Insts.end(), I) != Insts.end();
}
示例10: isExitBlock
/// Return true if the specified block is in the list.
static bool isExitBlock(BasicBlock *BB,
const SmallVectorImpl<BasicBlock *> &ExitBlocks) {
return find(ExitBlocks, BB) != ExitBlocks.end();
}
示例11: lookupInModule
static void lookupInModule(Module *module, Module::AccessPathTy accessPath,
SmallVectorImpl<ValueDecl *> &decls,
ResolutionKind resolutionKind, bool canReturnEarly,
LazyResolver *typeResolver,
ModuleLookupCache &cache,
const DeclContext *moduleScopeContext,
bool respectAccessControl,
ArrayRef<Module::ImportedModule> extraImports,
CallbackTy callback) {
assert(module);
assert(std::none_of(extraImports.begin(), extraImports.end(),
[](Module::ImportedModule import) -> bool {
return !import.second;
}));
ModuleLookupCache::iterator iter;
bool isNew;
std::tie(iter, isNew) = cache.insert({{accessPath, module}, {}});
if (!isNew) {
decls.append(iter->second.begin(), iter->second.end());
return;
}
size_t initialCount = decls.size();
SmallVector<ValueDecl *, 4> localDecls;
callback(module, accessPath, localDecls);
if (respectAccessControl) {
auto newEndIter = std::remove_if(localDecls.begin(), localDecls.end(),
[=](ValueDecl *VD) {
if (typeResolver) {
typeResolver->resolveAccessibility(VD);
}
if (!VD->hasAccessibility())
return false;
return !VD->isAccessibleFrom(moduleScopeContext);
});
localDecls.erase(newEndIter, localDecls.end());
// This only applies to immediate imports of the top-level module.
if (moduleScopeContext && moduleScopeContext->getParentModule() != module)
moduleScopeContext = nullptr;
}
OverloadSetTy overloads;
resolutionKind = recordImportDecls(typeResolver, decls, localDecls,
overloads, resolutionKind);
bool foundDecls = decls.size() > initialCount;
if (!foundDecls || !canReturnEarly ||
resolutionKind == ResolutionKind::Overloadable) {
SmallVector<Module::ImportedModule, 8> reexports;
module->getImportedModulesForLookup(reexports);
assert(std::none_of(reexports.begin(), reexports.end(),
[](Module::ImportedModule import) -> bool {
return !import.second;
}));
reexports.append(extraImports.begin(), extraImports.end());
// Prefer scoped imports (import func Swift.max) to whole-module imports.
SmallVector<ValueDecl *, 8> unscopedValues;
SmallVector<ValueDecl *, 8> scopedValues;
for (auto next : reexports) {
// Filter any whole-module imports, and skip specific-decl imports if the
// import path doesn't match exactly.
Module::AccessPathTy combinedAccessPath;
if (accessPath.empty()) {
combinedAccessPath = next.first;
} else if (!next.first.empty() &&
!Module::isSameAccessPath(next.first, accessPath)) {
// If we ever allow importing non-top-level decls, it's possible the
// rule above isn't what we want.
assert(next.first.size() == 1 && "import of non-top-level decl");
continue;
} else {
combinedAccessPath = accessPath;
}
auto &resultSet = next.first.empty() ? unscopedValues : scopedValues;
lookupInModule<OverloadSetTy>(next.second, combinedAccessPath,
resultSet, resolutionKind, canReturnEarly,
typeResolver, cache, moduleScopeContext,
respectAccessControl, {}, callback);
}
// Add the results from scoped imports.
resolutionKind = recordImportDecls(typeResolver, decls, scopedValues,
overloads, resolutionKind);
// Add the results from unscoped imports.
foundDecls = decls.size() > initialCount;
if (!foundDecls || !canReturnEarly ||
resolutionKind == ResolutionKind::Overloadable) {
resolutionKind = recordImportDecls(typeResolver, decls, unscopedValues,
overloads, resolutionKind);
}
}
// Remove duplicated declarations.
llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
//.........这里部分代码省略.........
示例12: createVectorVariantWrapper
static void createVectorVariantWrapper(llvm::Function *ScalarFunc,
llvm::Function *VectorFunc,
unsigned VLen,
const SmallVectorImpl<ParamInfo> &Info) {
assert(ScalarFunc->arg_size() == Info.size() &&
"Wrong number of parameter infos");
assert((VLen & (VLen - 1)) == 0 && "VLen must be a power-of-2");
bool IsMasked = VectorFunc->arg_size() == ScalarFunc->arg_size() + 1;
llvm::LLVMContext &Context = ScalarFunc->getContext();
llvm::BasicBlock *Entry
= llvm::BasicBlock::Create(Context, "entry", VectorFunc);
llvm::BasicBlock *LoopCond
= llvm::BasicBlock::Create(Context, "loop.cond", VectorFunc);
llvm::BasicBlock *LoopBody
= llvm::BasicBlock::Create(Context, "loop.body", VectorFunc);
llvm::BasicBlock *MaskOn
= IsMasked ? llvm::BasicBlock::Create(Context, "mask_on", VectorFunc) : 0;
llvm::BasicBlock *MaskOff
= IsMasked ? llvm::BasicBlock::Create(Context, "mask_off", VectorFunc) : 0;
llvm::BasicBlock *LoopStep
= llvm::BasicBlock::Create(Context, "loop.step", VectorFunc);
llvm::BasicBlock *LoopEnd
= llvm::BasicBlock::Create(Context, "loop.end", VectorFunc);
llvm::Value *VectorRet = 0;
SmallVector<llvm::Value*, 4> VectorArgs;
// The loop counter.
llvm::Type *IndexTy = llvm::Type::getInt32Ty(Context);
llvm::Value *Index = 0;
llvm::Value *Mask = 0;
// Copy the names from the scalar args to the vector args.
{
llvm::Function::arg_iterator SI = ScalarFunc->arg_begin(),
SE = ScalarFunc->arg_end(),
VI = VectorFunc->arg_begin();
for ( ; SI != SE; ++SI, ++VI)
VI->setName(SI->getName());
if (IsMasked)
VI->setName("mask");
}
llvm::IRBuilder<> Builder(Entry);
{
if (!VectorFunc->getReturnType()->isVoidTy())
VectorRet = Builder.CreateAlloca(VectorFunc->getReturnType());
Index = Builder.CreateAlloca(IndexTy, 0, "index");
Builder.CreateStore(llvm::ConstantInt::get(IndexTy, 0), Index);
llvm::Function::arg_iterator VI = VectorFunc->arg_begin();
for (SmallVectorImpl<ParamInfo>::const_iterator I = Info.begin(),
IE = Info.end(); I != IE; ++I, ++VI) {
llvm::Value *Arg = VI;
switch (I->Kind) {
case PK_Vector:
assert(Arg->getType()->isVectorTy() && "Not a vector");
assert(VLen == Arg->getType()->getVectorNumElements() &&
"Wrong number of elements");
break;
case PK_LinearConst:
Arg = buildLinearArg(Builder, VLen, Arg,
cast<llvm::ConstantAsMetadata>(I->Step)->getValue());
Arg->setName(VI->getName() + ".linear");
break;
case PK_Linear: {
unsigned Number =
cast<llvm::ConstantInt>(
cast<llvm::ConstantAsMetadata>(I->Step)->getValue())->getZExtValue();
llvm::Function::arg_iterator ArgI = VectorFunc->arg_begin();
std::advance(ArgI, Number);
llvm::Value *Step = ArgI;
Arg = buildLinearArg(Builder, VLen, Arg, Step);
Arg->setName(VI->getName() + ".linear");
} break;
case PK_Uniform:
Arg = Builder.CreateVectorSplat(VLen, Arg);
Arg->setName(VI->getName() + ".uniform");
break;
}
VectorArgs.push_back(Arg);
}
if (IsMasked)
Mask = buildMask(Builder, VLen, VI);
Builder.CreateBr(LoopCond);
}
Builder.SetInsertPoint(LoopCond);
{
llvm::Value *Cond = Builder.CreateICmpULT(
Builder.CreateLoad(Index), llvm::ConstantInt::get(IndexTy, VLen));
Builder.CreateCondBr(Cond, LoopBody, LoopEnd);
}
llvm::Value *VecIndex = 0;
//.........这里部分代码省略.........
示例13: removeShadowedDecls
//.........这里部分代码省略.........
auto secondDecl = collidingDecls.second[secondIdx];
auto secondModule = secondDecl->getModuleContext();
// If one declaration is in a protocol or extension thereof and the
// other is not, prefer the one that is not.
if ((bool)firstDecl->getDeclContext()
->getAsProtocolOrProtocolExtensionContext()
!= (bool)secondDecl->getDeclContext()
->getAsProtocolOrProtocolExtensionContext()) {
if (firstDecl->getDeclContext()
->getAsProtocolOrProtocolExtensionContext()) {
shadowed.insert(firstDecl);
break;
} else {
shadowed.insert(secondDecl);
continue;
}
}
// If one declaration is available and the other is not, prefer the
// available one.
if (firstDecl->getAttrs().isUnavailable(ctx) !=
secondDecl->getAttrs().isUnavailable(ctx)) {
if (firstDecl->getAttrs().isUnavailable(ctx)) {
shadowed.insert(firstDecl);
break;
} else {
shadowed.insert(secondDecl);
continue;
}
}
// Don't apply module-shadowing rules to members of protocol types.
if (isa<ProtocolDecl>(firstDecl->getDeclContext()) ||
isa<ProtocolDecl>(secondDecl->getDeclContext()))
continue;
// Prefer declarations in the current module over those in another
// module.
// FIXME: This is a hack. We should query a (lazily-built, cached)
// module graph to determine shadowing.
if ((firstModule == curModule) == (secondModule == curModule))
continue;
// If the first module is the current module, the second declaration
// is shadowed by the first.
if (firstModule == curModule) {
shadowed.insert(secondDecl);
continue;
}
// Otherwise, the first declaration is shadowed by the second. There is
// no point in continuing to compare the first declaration to others.
shadowed.insert(firstDecl);
break;
}
}
}
// Check for collisions among Objective-C initializers. When such collisions
// exist, we pick the
for (const auto &colliding : ObjCCollidingConstructors) {
if (colliding.second.size() == 1)
continue;
// Find the "best" constructor with this signature.
ConstructorDecl *bestCtor = colliding.second[0];
for (auto ctor : colliding.second) {
auto comparison = compareConstructors(ctor, bestCtor, ctx);
if (comparison == ConstructorComparison::Better)
bestCtor = ctor;
}
// Shadow any initializers that are worse.
for (auto ctor : colliding.second) {
auto comparison = compareConstructors(ctor, bestCtor, ctx);
if (comparison == ConstructorComparison::Worse)
shadowed.insert(ctor);
}
}
// If none of the declarations were shadowed, we're done.
if (shadowed.empty())
return false;
// Remove shadowed declarations from the list of declarations.
bool anyRemoved = false;
decls.erase(std::remove_if(decls.begin(), decls.end(),
[&](ValueDecl *vd) {
if (shadowed.count(vd) > 0) {
anyRemoved = true;
return true;
}
return false;
}),
decls.end());
return anyRemoved;
}
示例14: doInitialization
bool GlobalMerge::doInitialization(Module &M) {
if (!EnableGlobalMerge)
return false;
auto &DL = M.getDataLayout();
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
bool Changed = false;
setMustKeepGlobalVariables(M);
// Grab all non-const globals.
for (Module::global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
// Merge is safe for "normal" internal or external globals only
if (I->isDeclaration() || I->isThreadLocal() || I->hasSection())
continue;
if (!(EnableGlobalMergeOnExternal && I->hasExternalLinkage()) &&
!I->hasInternalLinkage())
continue;
PointerType *PT = dyn_cast<PointerType>(I->getType());
assert(PT && "Global variable is not a pointer!");
unsigned AddressSpace = PT->getAddressSpace();
// Ignore fancy-aligned globals for now.
unsigned Alignment = DL.getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
if (Alignment > DL.getABITypeAlignment(Ty))
continue;
// Ignore all 'special' globals.
if (I->getName().startswith("llvm.") ||
I->getName().startswith(".llvm."))
continue;
// Ignore all "required" globals:
if (isMustKeepGlobalVariable(I))
continue;
if (DL.getTypeAllocSize(Ty) < MaxOffset) {
if (TargetLoweringObjectFile::getKindForGlobal(I, *TM).isBSSLocal())
BSSGlobals[AddressSpace].push_back(I);
else if (I->isConstant())
ConstGlobals[AddressSpace].push_back(I);
else
Globals[AddressSpace].push_back(I);
}
}
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = Globals.begin(), E = Globals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, false, I->first);
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = BSSGlobals.begin(), E = BSSGlobals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, false, I->first);
if (EnableGlobalMergeOnConst)
for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
I = ConstGlobals.begin(), E = ConstGlobals.end(); I != E; ++I)
if (I->second.size() > 1)
Changed |= doMerge(I->second, M, true, I->first);
return Changed;
}
示例15: Matcher
CheckPredicateMatcher::CheckPredicateMatcher(
const TreePredicateFn &pred, const SmallVectorImpl<unsigned> &Ops)
: Matcher(CheckPredicate), Pred(pred.getOrigPatFragRecord()),
Operands(Ops.begin(), Ops.end()) {}