本文整理汇总了C++中SmallVector::insert方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallVector::insert方法的具体用法?C++ SmallVector::insert怎么用?C++ SmallVector::insert使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallVector
的用法示例。
在下文中一共展示了SmallVector::insert方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: canPaddingBeAccessed
/// \brief Checks if the padding bytes of an argument could be accessed.
bool ArgPromotion::canPaddingBeAccessed(Argument *arg) {
assert(arg->hasByValAttr());
// Track all the pointers to the argument to make sure they are not captured.
SmallPtrSet<Value *, 16> PtrValues;
PtrValues.insert(arg);
// Track all of the stores.
SmallVector<StoreInst *, 16> Stores;
// Scan through the uses recursively to make sure the pointer is always used
// sanely.
SmallVector<Value *, 16> WorkList;
WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end());
while (!WorkList.empty()) {
Value *V = WorkList.back();
WorkList.pop_back();
if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
if (PtrValues.insert(V))
WorkList.insert(WorkList.end(), V->user_begin(), V->user_end());
} else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
Stores.push_back(Store);
} else if (!isa<LoadInst>(V)) {
return true;
}
}
// Check to make sure the pointers aren't captured
for (StoreInst *Store : Stores)
if (PtrValues.count(Store->getValueOperand()))
return true;
return false;
}
示例2: Builder
// Get the value we should change this callsite to call instead.
Value *CSDataRando::getCloneCalledValue(CallSite CS, FuncInfo &CalleeInfo) {
if (CalleeInfo.ArgNodes.size() == 0) {
return nullptr;
}
// Find the function type we want based on how many args need to be added. We
// do this in case the original function has been cast to a different type.
FunctionType *FT = CS.getFunctionType();
SmallVector<Type*, 8> Params;
Params.insert(Params.end(), FT->param_begin(), FT->param_end());
Params.insert(Params.end(), CalleeInfo.ArgNodes.size(), MaskTy);
FunctionType *TargetType = FunctionType::get(FT->getReturnType(), Params, FT->isVarArg());
IRBuilder<> Builder(CS.getInstruction());
// Direct call, find the clone and cast it to what we want.
if (Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts())) {
Value *Clone = OldToNewFuncMap[F];
if (Clone) {
Clone = Builder.CreateBitCast(Clone, PointerType::getUnqual(TargetType));
}
return Clone;
}
// Indirect calls, cast the called value to the type we want.
Value *CalledValue = CS.getCalledValue();
return Builder.CreateBitCast(CalledValue, PointerType::getUnqual(TargetType));
}
示例3: removeAttr
AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const {
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment.
// For now, say we can't pass in alignment, which no current use does.
assert(!(Attrs & Attribute::Alignment) && "Attempt to exclude alignment!");
#endif
if (AttrList == 0) return AttrListPtr();
Attributes OldAttrs = getAttributes(Idx);
Attributes NewAttrs = OldAttrs & ~Attrs;
if (NewAttrs == OldAttrs)
return *this;
SmallVector<AttributeWithIndex, 8> NewAttrList;
const SmallVector<AttributeWithIndex, 4> &OldAttrList = AttrList->Attrs;
unsigned i = 0, e = OldAttrList.size();
// Copy attributes for arguments before this one.
for (; i != e && OldAttrList[i].Index < Idx; ++i)
NewAttrList.push_back(OldAttrList[i]);
// If there are attributes already at this index, merge them in.
assert(OldAttrList[i].Index == Idx && "Attribute isn't set?");
Attrs = OldAttrList[i].Attrs & ~Attrs;
++i;
if (Attrs) // If any attributes left for this parameter, add them.
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
// Copy attributes for arguments after this one.
NewAttrList.insert(NewAttrList.end(),
OldAttrList.begin()+i, OldAttrList.end());
return get(NewAttrList.data(), NewAttrList.size());
}
示例4: MovedFrom
TEST(SmallVectorTest, MidInsert) {
SmallVector<MovedFrom, 3> v;
v.push_back(MovedFrom());
v.insert(v.begin(), MovedFrom());
for (MovedFrom &m : v)
EXPECT_TRUE(m.hasValue);
}
示例5: TheDriver
/// createInvocationFromCommandLine - Construct a compiler invocation object for
/// a command line argument vector.
///
/// \return A CompilerInvocation, or 0 if none was built for the given
/// argument vector.
CompilerInvocation *
clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions,
ArgList.size(),
ArgList.begin());
}
SmallVector<const char *, 16> Args;
Args.push_back("<clang>"); // FIXME: Remove dummy argument.
Args.insert(Args.end(), ArgList.begin(), ArgList.end());
// FIXME: Find a cleaner way to force the driver into restricted modes.
Args.push_back("-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
"a.out", *Diags);
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
OwningPtr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
// Just print the cc1 options if -### was present.
if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
C->PrintJob(llvm::errs(), C->getJobs(), "\n", true);
return 0;
}
// We expect to get back exactly one command job, if we didn't something
// failed.
const driver::JobList &Jobs = C->getJobs();
if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
SmallString<256> Msg;
llvm::raw_svector_ostream OS(Msg);
C->PrintJob(OS, C->getJobs(), "; ", true);
Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
return 0;
}
const driver::Command *Cmd = cast<driver::Command>(*Jobs.begin());
if (StringRef(Cmd->getCreator().getName()) != "clang") {
Diags->Report(diag::err_fe_expected_clang_command);
return 0;
}
const driver::ArgStringList &CCArgs = Cmd->getArguments();
OwningPtr<CompilerInvocation> CI(new CompilerInvocation());
if (!CompilerInvocation::CreateFromArgs(*CI,
const_cast<const char **>(CCArgs.data()),
const_cast<const char **>(CCArgs.data()) +
CCArgs.size(),
*Diags))
return 0;
return CI.take();
}
示例6: tryEvaluateFunctionsWithArgs
bool DevirtModule::tryEvaluateFunctionsWithArgs(
MutableArrayRef<VirtualCallTarget> TargetsForSlot,
ArrayRef<ConstantInt *> Args) {
// Evaluate each function and store the result in each target's RetVal
// field.
for (VirtualCallTarget &Target : TargetsForSlot) {
if (Target.Fn->arg_size() != Args.size() + 1)
return false;
for (unsigned I = 0; I != Args.size(); ++I)
if (Target.Fn->getFunctionType()->getParamType(I + 1) !=
Args[I]->getType())
return false;
Evaluator Eval(M.getDataLayout(), nullptr);
SmallVector<Constant *, 2> EvalArgs;
EvalArgs.push_back(
Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0)));
EvalArgs.insert(EvalArgs.end(), Args.begin(), Args.end());
Constant *RetVal;
if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) ||
!isa<ConstantInt>(RetVal))
return false;
Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue();
}
return true;
}
示例7: TheDriver
std::unique_ptr<CompilerInvocation>
swift::driver::createCompilerInvocation(ArrayRef<const char *> ArgList,
DiagnosticEngine &Diags) {
SmallVector<const char *, 16> Args;
Args.push_back("<swiftc>"); // FIXME: Remove dummy argument.
Args.insert(Args.end(), ArgList.begin(), ArgList.end());
// When creating a CompilerInvocation, ensure that the driver creates a single
// frontend command.
Args.push_back("-force-single-frontend-invocation");
// Force the driver into batch mode by specifying "swiftc" as the name.
Driver TheDriver("swiftc", "swiftc", Args, Diags);
// Don't check for the existence of input files, since the user of the
// CompilerInvocation may wish to remap inputs to source buffers.
TheDriver.setCheckInputFilesExist(false);
std::unique_ptr<Compilation> C = TheDriver.buildCompilation(Args);
if (!C || C->getJobs().empty())
return nullptr; // Don't emit an error; one should already have been emitted
SmallPtrSet<const Job *, 4> CompileCommands;
for (const Job *Cmd : C->getJobs())
if (isa<CompileJobAction>(Cmd->getSource()))
CompileCommands.insert(Cmd);
if (CompileCommands.size() != 1) {
// TODO: include Jobs in the diagnostic.
Diags.diagnose(SourceLoc(), diag::error_expected_one_frontend_job);
return nullptr;
}
const Job *Cmd = *CompileCommands.begin();
if (StringRef("-frontend") != Cmd->getArguments().front()) {
Diags.diagnose(SourceLoc(), diag::error_expected_frontend_command);
return nullptr;
}
std::unique_ptr<CompilerInvocation> Invocation(new CompilerInvocation());
const llvm::opt::ArgStringList &BaseFrontendArgs = Cmd->getArguments();
ArrayRef<const char *> FrontendArgs =
llvm::makeArrayRef(BaseFrontendArgs.data() + 1,
BaseFrontendArgs.data() + BaseFrontendArgs.size());
if (Invocation->parseArgs(FrontendArgs, Diags))
return nullptr; // Don't emit an error; one should already have been emitted
return Invocation;
}
示例8: main
int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv, "Module concatenation");
ExitOnError ExitOnErr("llvm-cat: ");
LLVMContext Context;
SmallVector<char, 0> Buffer;
BitcodeWriter Writer(Buffer);
if (BinaryCat) {
for (const auto &InputFilename : InputFilenames) {
std::unique_ptr<MemoryBuffer> MB = ExitOnErr(
errorOrToExpected(MemoryBuffer::getFileOrSTDIN(InputFilename)));
std::vector<BitcodeModule> Mods = ExitOnErr(getBitcodeModuleList(*MB));
for (auto &BitcodeMod : Mods) {
Buffer.insert(Buffer.end(), BitcodeMod.getBuffer().begin(),
BitcodeMod.getBuffer().end());
Writer.copyStrtab(BitcodeMod.getStrtab());
}
}
} else {
// The string table does not own strings added to it, some of which are
// owned by the modules; keep them alive until we write the string table.
std::vector<std::unique_ptr<Module>> OwnedMods;
for (const auto &InputFilename : InputFilenames) {
SMDiagnostic Err;
std::unique_ptr<Module> M = parseIRFile(InputFilename, Err, Context);
if (!M) {
Err.print(argv[0], errs());
return 1;
}
Writer.writeModule(M.get());
OwnedMods.push_back(std::move(M));
}
Writer.writeStrtab();
}
std::error_code EC;
raw_fd_ostream OS(OutputFilename, EC, sys::fs::OpenFlags::F_None);
if (EC) {
errs() << argv[0] << ": cannot open " << OutputFilename << " for writing: "
<< EC.message();
return 1;
}
OS.write(Buffer.data(), Buffer.size());
return 0;
}
示例9: run
int csabase::run(int argc_, const char **argv_)
{
sys::PrintStackTraceOnErrorSignal();
PrettyStackTraceProgram X(argc_, argv_);
SmallVector<const char *, 1024> argv;
SpecificBumpPtrAllocator<char> ArgAllocator;
StringSetSaver Saver;
sys::Process::GetArgumentVector(argv,
ArrayRef<const char *>(argv_, argc_),
ArgAllocator);
cl::ExpandResponseFiles(Saver, cl::TokenizeGNUCommandLine, argv);
argv.insert(argv.begin() == argv.end() ? argv.begin() : argv.begin() + 1,
"-xc++");
llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmParser();
CompilerInstance Clang;
TextDiagnosticBuffer DiagsBuffer;
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts(new DiagnosticOptions());
DiagnosticsEngine Diags(DiagID, &*DiagOpts, &DiagsBuffer, false);
bool Success = CompilerInvocation::CreateFromArgs(
Clang.getInvocation(),
argv.data() + 1,
argv.data() + argv.size(),
Diags);
Clang.createDiagnostics();
install_fatal_error_handler(LLVMErrorHandler, &Clang.getDiagnostics());
DiagsBuffer.FlushDiagnostics(Clang.getDiagnostics());
if (Success) {
Success = ExecuteCompilerInvocation(&Clang);
}
remove_fatal_error_handler();
llvm::llvm_shutdown();
return !Success;
}
示例10: addAttr
AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const {
Attributes OldAttrs = getAttributes(Idx);
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment.
// For now, say we can't change a known alignment.
Attributes OldAlign = OldAttrs & Attribute::Alignment;
Attributes NewAlign = Attrs & Attribute::Alignment;
assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
"Attempt to change alignment!");
#endif
Attributes NewAttrs = OldAttrs | Attrs;
if (NewAttrs == OldAttrs)
return *this;
SmallVector<AttributeWithIndex, 8> NewAttrList;
if (AttrList == 0)
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
else {
const SmallVector<AttributeWithIndex, 4> &OldAttrList = AttrList->Attrs;
unsigned i = 0, e = OldAttrList.size();
// Copy attributes for arguments before this one.
for (; i != e && OldAttrList[i].Index < Idx; ++i)
NewAttrList.push_back(OldAttrList[i]);
// If there are attributes already at this index, merge them in.
if (i != e && OldAttrList[i].Index == Idx) {
Attrs |= OldAttrList[i].Attrs;
++i;
}
NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs));
// Copy attributes for arguments after this one.
NewAttrList.insert(NewAttrList.end(),
OldAttrList.begin()+i, OldAttrList.end());
}
return get(NewAttrList.data(), NewAttrList.size());
}
示例11: isRegion
AstBackEnd::RegionType AstBackEnd::isRegion(BasicBlock &entry, BasicBlock *exit)
{
// LLVM's algorithm for finding regions (as of this early LLVM 3.7 fork) seems over-eager. For instance, with the
// following graph:
//
// 0
// |\
// | 1
// | |
// | 2=<| (where =<| denotes an edge to itself)
// |/
// 3
//
// LLVM thinks that BBs 2 and 3 form a region. After asking for help on the mailing list, it appears that LLVM
// tags it as an "extended region"; that is, a set of nodes that would be a region if we only added one basic block.
// This is not helpful for our purposes.
//
// Sine the classical definition of regions apply to edges and edges are second-class citizens in the LLVM graph
// world, we're going to roll with this inefficient-but-working, home-baked definition instead:
//
// A region is an ordered pair (A, B) of nodes, where A dominates, and B postdominates, every node
// traversed in any given iteration order from A to B. Additionally, no path starts after B such that a node of the
// region can be reached again without traversing A.
// This definition means that B is *excluded* from the region, because B could have predecessors that are not
// dominated by A. And I'm okay with it, I like [) ranges. To compensate, nullptr represents the end of a function.
bool cyclic = false;
unordered_set<BasicBlock*> toVisit { &entry };
unordered_set<BasicBlock*> visited { exit };
SmallVector<BasicBlock*, 2> nodeSuccessors;
// Step one: check domination
while (toVisit.size() > 0)
{
auto iter = toVisit.begin();
BasicBlock* bb = *iter;
// We use `exit = nullptr` to denote that the exit is the end of the function, which post-dominates
// every basic block. This is a deviation from the normal LLVM dominator tree behavior, where
// nullptr is considered unreachable (and thus does not dominate or post-dominate anything).
if (!domTree->dominates(&entry, bb) || (exit != nullptr && !postDomTree->dominates(exit, bb)))
{
return NotARegion;
}
toVisit.erase(iter);
visited.insert(bb);
// Only visit region successors. This saves times, and saves us from spuriously declaring that regions are
// cyclic by skipping cycles that have already been identified.
nodeSuccessors.clear();
AstGraphNode* graphNode = grapher->getGraphNodeFromEntry(bb);
if (graphNode->hasExit())
{
nodeSuccessors.push_back(graphNode->getExit());
}
else
{
nodeSuccessors.insert(nodeSuccessors.end(), succ_begin(bb), succ_end(bb));
}
for (BasicBlock* succ : nodeSuccessors)
{
if (visited.count(succ) == 0)
{
toVisit.insert(succ);
}
else if (succ == &entry)
{
cyclic = true;
}
}
}
// Step two: check that no path starting after the exit goes back into the region without first going through the
// entry.
unordered_set<BasicBlock*> regionMembers;
regionMembers.swap(visited);
regionMembers.erase(exit);
if (exit != nullptr)
{
toVisit.insert(succ_begin(exit), succ_end(exit));
}
visited.insert(&entry);
while (toVisit.size() > 0)
{
auto iter = toVisit.begin();
BasicBlock* bb = *iter;
if (regionMembers.count(bb) != 0)
{
return NotARegion;
}
toVisit.erase(iter);
visited.insert(bb);
for (BasicBlock* succ : successors(bb))
{
if (visited.count(succ) == 0)
//.........这里部分代码省略.........
示例12: print
void print(raw_ostream &OS, FuncIdConversionHelper &FN,
RootVector RootValues) {
// Go through each of the roots, and traverse the call stack, producing the
// aggregates as you go along. Remember these aggregates and stacks, and
// show summary statistics about:
//
// - Total number of unique stacks
// - Top 10 stacks by count
// - Top 10 stacks by aggregate duration
SmallVector<std::pair<const StackTrieNode *, uint64_t>, 11>
TopStacksByCount;
SmallVector<std::pair<const StackTrieNode *, uint64_t>, 11> TopStacksBySum;
auto greater_second =
[](const std::pair<const StackTrieNode *, uint64_t> &A,
const std::pair<const StackTrieNode *, uint64_t> &B) {
return A.second > B.second;
};
uint64_t UniqueStacks = 0;
for (const auto *N : RootValues) {
SmallVector<const StackTrieNode *, 16> S;
S.emplace_back(N);
while (!S.empty()) {
auto *Top = S.pop_back_val();
// We only start printing the stack (by walking up the parent pointers)
// when we get to a leaf function.
if (!Top->ExtraData.TerminalDurations.empty()) {
++UniqueStacks;
auto TopSum =
std::accumulate(Top->ExtraData.TerminalDurations.begin(),
Top->ExtraData.TerminalDurations.end(), 0uLL);
{
auto E = std::make_pair(Top, TopSum);
TopStacksBySum.insert(std::lower_bound(TopStacksBySum.begin(),
TopStacksBySum.end(), E,
greater_second),
E);
if (TopStacksBySum.size() == 11)
TopStacksBySum.pop_back();
}
{
auto E =
std::make_pair(Top, Top->ExtraData.TerminalDurations.size());
TopStacksByCount.insert(std::lower_bound(TopStacksByCount.begin(),
TopStacksByCount.end(), E,
greater_second),
E);
if (TopStacksByCount.size() == 11)
TopStacksByCount.pop_back();
}
}
for (const auto *C : Top->Callees)
S.push_back(C);
}
}
// Now print the statistics in the end.
OS << "\n";
OS << "Unique Stacks: " << UniqueStacks << "\n";
OS << "Top 10 Stacks by leaf sum:\n\n";
for (const auto &P : TopStacksBySum) {
OS << "Sum: " << P.second << "\n";
printStack(OS, P.first, FN);
}
OS << "\n";
OS << "Top 10 Stacks by leaf count:\n\n";
for (const auto &P : TopStacksByCount) {
OS << "Count: " << P.second << "\n";
printStack(OS, P.first, FN);
}
OS << "\n";
}
示例13: LowerAsSTATEPOINT
//.........这里部分代码省略.........
DAG.getNode(ISD::GC_TRANSITION_START, getCurSDLoc(), NodeTys, TSOps);
Chain = GCTransitionStart.getValue(0);
Glue = GCTransitionStart.getValue(1);
}
// TODO: Currently, all of these operands are being marked as read/write in
// PrologEpilougeInserter.cpp, we should special case the VMState arguments
// and flags to be read-only.
SmallVector<SDValue, 40> Ops;
// Add the <id> and <numBytes> constants.
Ops.push_back(DAG.getTargetConstant(SI.ID, getCurSDLoc(), MVT::i64));
Ops.push_back(
DAG.getTargetConstant(SI.NumPatchBytes, getCurSDLoc(), MVT::i32));
// Calculate and push starting position of vmstate arguments
// Get number of arguments incoming directly into call node
unsigned NumCallRegArgs =
CallNode->getNumOperands() - (CallHasIncomingGlue ? 4 : 3);
Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, getCurSDLoc(), MVT::i32));
// Add call target
SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
Ops.push_back(CallTarget);
// Add call arguments
// Get position of register mask in the call
SDNode::op_iterator RegMaskIt;
if (CallHasIncomingGlue)
RegMaskIt = CallNode->op_end() - 2;
else
RegMaskIt = CallNode->op_end() - 1;
Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);
// Add a constant argument for the calling convention
pushStackMapConstant(Ops, *this, SI.CLI.CallConv);
// Add a constant argument for the flags
uint64_t Flags = SI.StatepointFlags;
assert(((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0) &&
"Unknown flag used");
pushStackMapConstant(Ops, *this, Flags);
// Insert all vmstate and gcstate arguments
Ops.insert(Ops.end(), LoweredMetaArgs.begin(), LoweredMetaArgs.end());
// Add register mask from call node
Ops.push_back(*RegMaskIt);
// Add chain
Ops.push_back(Chain);
// Same for the glue, but we add it only if original call had it
if (Glue.getNode())
Ops.push_back(Glue);
// Compute return values. Provide a glue output since we consume one as
// input. This allows someone else to chain off us as needed.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDNode *StatepointMCNode =
DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);
SDNode *SinkNode = StatepointMCNode;
示例14: runOnFunction
bool AllocaArraysMerging::runOnFunction(Function& F)
{
class ArraysToMerge
{
private:
std::map<AllocaInst*, uint32_t> arraysToMerge;
uint32_t currentOffset;
public:
ArraysToMerge():currentOffset(0)
{
}
bool empty() const
{
return arraysToMerge.empty();
}
std::map<AllocaInst*, uint32_t>::iterator begin()
{
return arraysToMerge.begin();
}
std::map<AllocaInst*, uint32_t>::iterator end()
{
return arraysToMerge.end();
}
void add(AllocaInst* a)
{
arraysToMerge.insert(std::make_pair(a, currentOffset));
currentOffset+=cast<ArrayType>(a->getAllocatedType())->getNumElements();
}
uint32_t getNewSize() const
{
return currentOffset;
}
};
cheerp::PointerAnalyzer & PA = getAnalysis<cheerp::PointerAnalyzer>();
cheerp::Registerize & registerize = getAnalysis<cheerp::Registerize>();
cheerp::GlobalDepsAnalyzer & GDA = getAnalysis<cheerp::GlobalDepsAnalyzer>();
std::list<std::pair<AllocaInst*, cheerp::Registerize::LiveRange>> allocaInfos;
// Gather all the allocas
for(BasicBlock& BB: F)
analyzeBlock(registerize, BB, allocaInfos);
if (allocaInfos.size() < 2)
return false;
bool Changed = false;
// We can also try to merge arrays of the same type, if only pointers to values are passed around
while(!allocaInfos.empty())
{
// Build a map of array to be merged and their offseet into the new array
ArraysToMerge arraysToMerge;
auto targetCandidate = allocaInfos.begin();
AllocaInst* targetAlloca = targetCandidate->first;
if(!targetAlloca->getAllocatedType()->isArrayTy() ||
// Check target uses
!checkUsesForArrayMerging(targetAlloca))
{
allocaInfos.erase(targetCandidate);
continue;
}
Type* targetElementType = targetAlloca->getAllocatedType()->getSequentialElementType();
auto sourceCandidate=targetCandidate;
++sourceCandidate;
// Now that we have computed the sourceCandidate we can invalidate the targetCandidate
allocaInfos.erase(targetCandidate);
while(sourceCandidate!=allocaInfos.end())
{
AllocaInst* sourceAlloca = sourceCandidate->first;
// Check that allocas are arrays of the same type
if(!sourceAlloca->getAllocatedType()->isArrayTy())
{
++sourceCandidate;
continue;
}
// Both are arrays, check the types
if(targetElementType != sourceAlloca->getAllocatedType()->getSequentialElementType())
{
++sourceCandidate;
continue;
}
// Verify that the source candidate has supported uses
if(!checkUsesForArrayMerging(sourceAlloca))
{
++sourceCandidate;
continue;
}
// We can merge the source and the target
// If the set is empty add the target as well
if(arraysToMerge.empty())
arraysToMerge.add(targetAlloca);
arraysToMerge.add(sourceAlloca);
auto oldCandidate = sourceCandidate;
++sourceCandidate;
// Now that we have moved to the next candidate, we can invalidate the old one
allocaInfos.erase(oldCandidate);
}
// If we have a non-empty set of alloca merge them
if (arraysToMerge.empty())
continue;
if(!Changed)
registerize.invalidateLiveRangeForAllocas(F);
//.........这里部分代码省略.........
示例15: CS
/// DoPromotion - This method actually performs the promotion of the specified
/// arguments, and returns the new function. At this point, we know that it's
/// safe to do so.
static Function *
doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
SmallPtrSetImpl<Argument *> &ByValArgsToTransform,
Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>>
ReplaceCallSite) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but has modified arguments.
FunctionType *FTy = F->getFunctionType();
std::vector<Type *> Params;
using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>;
// ScalarizedElements - If we are promoting a pointer that has elements
// accessed out of it, keep track of which elements are accessed so that we
// can add one argument for each.
//
// Arguments that are directly loaded will have a zero element value here, to
// handle cases where there are both a direct load and GEP accesses.
std::map<Argument *, ScalarizeTable> ScalarizedElements;
// OriginalLoads - Keep track of a representative load instruction from the
// original function so that we can tell the alias analysis implementation
// what the new GEP/Load instructions we are inserting look like.
// We need to keep the original loads for each argument and the elements
// of the argument that are accessed.
std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads;
// Attribute - Keep track of the parameter attributes for the arguments
// that we are *not* promoting. For the ones that we do promote, the parameter
// attributes are lost
SmallVector<AttributeSet, 8> ArgAttrVec;
AttributeList PAL = F->getAttributes();
// First, determine the new argument list
unsigned ArgNo = 0;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
++I, ++ArgNo) {
if (ByValArgsToTransform.count(&*I)) {
// Simple byval argument? Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
Params.insert(Params.end(), STy->element_begin(), STy->element_end());
ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(),
AttributeSet());
++NumByValArgsPromoted;
} else if (!ArgsToPromote.count(&*I)) {
// Unchanged argument
Params.push_back(I->getType());
ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo));
} else if (I->use_empty()) {
// Dead argument (which are always marked as promotable)
++NumArgumentsDead;
// There may be remaining metadata uses of the argument for things like
// llvm.dbg.value. Replace them with undef.
I->replaceAllUsesWith(UndefValue::get(I->getType()));
} else {
// Okay, this is being promoted. This means that the only uses are loads
// or GEPs which are only used by loads
// In this table, we will track which indices are loaded from the argument
// (where direct loads are tracked as no indices).
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
for (User *U : I->users()) {
Instruction *UI = cast<Instruction>(U);
Type *SrcTy;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
SrcTy = L->getType();
else
SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
IndicesVector Indices;
Indices.reserve(UI->getNumOperands() - 1);
// Since loads will only have a single operand, and GEPs only a single
// non-index operand, this will record direct loads without any indices,
// and gep+loads with the GEP indices.
for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
II != IE; ++II)
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
// GEPs with a single 0 index can be merged with direct loads
if (Indices.size() == 1 && Indices.front() == 0)
Indices.clear();
ArgIndices.insert(std::make_pair(SrcTy, Indices));
LoadInst *OrigLoad;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
OrigLoad = L;
else
// Take any load, we will use it only to update Alias Analysis
OrigLoad = cast<LoadInst>(UI->user_back());
OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
}
// Add a parameter to the function for each element passed in.
for (const auto &ArgIndex : ArgIndices) {
// not allowed to dereference ->begin() if size() is 0
Params.push_back(GetElementPtrInst::getIndexedType(
cast<PointerType>(I->getType()->getScalarType())->getElementType(),
ArgIndex.second));
//.........这里部分代码省略.........