本文整理汇总了C++中LoadInst::eraseFromParent方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::eraseFromParent方法的具体用法?C++ LoadInst::eraseFromParent怎么用?C++ LoadInst::eraseFromParent使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::eraseFromParent方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
/// RewriteSingleStoreAlloca - If there is only a single store to this value,
/// replace any loads of it that are directly dominated by the definition with
/// the value stored.
void PromoteMem2Reg::RewriteSingleStoreAlloca(AllocaInst *AI,
AllocaInfo &Info,
LargeBlockInfo &LBI) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
}
示例2: visitLoadInst
bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
if (!WidenLoads)
return false;
if ((I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
canWidenScalarExtLoad(I)) {
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = Builder.getInt32Ty();
Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
LoadInst *WidenLoad = Builder.CreateLoad(BitCast);
WidenLoad->copyMetadata(I);
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
ConstantInt *Lower =
mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->getValue().isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
// Don't make assumptions about the high bits.
ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(Mod->getContext(), LowAndHigh));
}
}
int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
Type *IntNTy = Builder.getIntNTy(TySize);
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
I.replaceAllUsesWith(ValOrig);
I.eraseFromParent();
return true;
}
return false;
}
示例3:
/// GetExceptionObject - Return the exception object from the value passed into
/// the 'resume' instruction (typically an aggregate). Clean up any dead
/// instructions, including the 'resume' instruction.
Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
Value *V = RI->getOperand(0);
Value *ExnObj = 0;
InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
LoadInst *SelLoad = 0;
InsertValueInst *ExcIVI = 0;
bool EraseIVIs = false;
if (SelIVI) {
if (SelIVI->getNumIndices() == 1 && *SelIVI->idx_begin() == 1) {
ExcIVI = dyn_cast<InsertValueInst>(SelIVI->getOperand(0));
if (ExcIVI && isa<UndefValue>(ExcIVI->getOperand(0)) &&
ExcIVI->getNumIndices() == 1 && *ExcIVI->idx_begin() == 0) {
ExnObj = ExcIVI->getOperand(1);
SelLoad = dyn_cast<LoadInst>(SelIVI->getOperand(1));
EraseIVIs = true;
}
}
}
if (!ExnObj)
ExnObj = ExtractValueInst::Create(RI->getOperand(0), 0, "exn.obj", RI);
RI->eraseFromParent();
if (EraseIVIs) {
if (SelIVI->getNumUses() == 0)
SelIVI->eraseFromParent();
if (ExcIVI->getNumUses() == 0)
ExcIVI->eraseFromParent();
if (SelLoad && SelLoad->getNumUses() == 0)
SelLoad->eraseFromParent();
}
return ExnObj;
}
示例4: StoreIndexSearchPredicate
/// PromoteSingleBlockAlloca - Many allocas are only used within a single basic
/// block. If this is the case, avoid traversing the CFG and inserting a lot of
/// potentially useless PHI nodes by just performing a single linear pass over
/// the basic block using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return true. This is necessary in cases where, due to control flow, the
/// alloca is potentially undefined on some control flow paths. e.g. code like
/// this is potentially correct:
///
/// for (...) { if (c) { A = undef; undef = B; } }
///
/// ... so long as A is not used before undef is set.
///
void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
// Walk the use-def list of the alloca, getting the locations of all stores.
typedef SmallVector<std::pair<unsigned, StoreInst*>, 64> StoresByIndexTy;
StoresByIndexTy StoresByIndex;
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
UI != E; ++UI)
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// If there are no stores to the alloca, just replace any loads with undef.
if (StoresByIndex.empty()) {
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;)
if (LoadInst *LI = dyn_cast<LoadInst>(*UI++)) {
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LBI.deleteValue(LI);
LI->eraseFromParent();
}
return;
}
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
std::sort(StoresByIndex.begin(), StoresByIndex.end());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI) continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::pair<unsigned, StoreInst*>(LoadIdx, static_cast<StoreInst*>(0)),
StoreIndexSearchPredicate());
// If there is no store before this load, then we can't promote this load.
if (I == StoresByIndex.begin()) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
// Otherwise, there was a store before this load, the load takes its value.
--I;
LI->replaceAllUsesWith(I->second->getOperand(0));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
}
示例5: PromoteAliasSet
//.........这里部分代码省略.........
// different sizes.
for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
Value *ASIV = ASI->getValue();
PointerMustAliases.insert(ASIV);
// Check that all of the pointers in the alias set have the same type. We
// cannot (yet) promote a memory location that is loaded and stored in
// different sizes.
if (SomePtr->getType() != ASIV->getType())
return;
for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
UI != UE; ++UI) {
// Ignore instructions that are outside the loop.
Instruction *Use = dyn_cast<Instruction>(*UI);
if (!Use || !CurLoop->contains(Use))
continue;
// If there is an non-load/store instruction in the loop, we can't promote
// it.
if (LoadInst *load = dyn_cast<LoadInst>(Use)) {
assert(!load->isVolatile() && "AST broken");
if (!load->isSimple())
return;
} else if (StoreInst *store = dyn_cast<StoreInst>(Use)) {
// Stores *of* the pointer are not interesting, only stores *to* the
// pointer.
if (Use->getOperand(1) != ASIV)
continue;
assert(!store->isVolatile() && "AST broken");
if (!store->isSimple())
return;
// Note that we only check GuaranteedToExecute inside the store case
// so that we do not introduce stores where they did not exist before
// (which would break the LLVM concurrency model).
// If the alignment of this instruction allows us to specify a more
// restrictive (and performant) alignment and if we are sure this
// instruction will be executed, update the alignment.
// Larger is better, with the exception of 0 being the best alignment.
unsigned InstAlignment = store->getAlignment();
if ((InstAlignment > Alignment || InstAlignment == 0)
&& (Alignment != 0))
if (isGuaranteedToExecute(*Use)) {
GuaranteedToExecute = true;
Alignment = InstAlignment;
}
if (!GuaranteedToExecute)
GuaranteedToExecute = isGuaranteedToExecute(*Use);
} else
return; // Not a load or store.
LoopUses.push_back(Use);
}
}
// If there isn't a guaranteed-to-execute instruction, we can't promote.
if (!GuaranteedToExecute)
return;
// Otherwise, this is safe to promote, lets do it!
DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');
Changed = true;
++NumPromoted;
// Grab a debug location for the inserted loads/stores; given that the
// inserted loads/stores have little relation to the original loads/stores,
// this code just arbitrarily picks a location from one, since any debug
// location is better than none.
DebugLoc DL = LoopUses[0]->getDebugLoc();
SmallVector<BasicBlock*, 8> ExitBlocks;
CurLoop->getUniqueExitBlocks(ExitBlocks);
// We use the SSAUpdater interface to insert phi nodes as required.
SmallVector<PHINode*, 16> NewPHIs;
SSAUpdater SSA(&NewPHIs);
LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
*CurAST, DL, Alignment);
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
LoadInst *PreheaderLoad =
new LoadInst(SomePtr, SomePtr->getName()+".promoted",
Preheader->getTerminator());
PreheaderLoad->setAlignment(Alignment);
PreheaderLoad->setDebugLoc(DL);
SSA.AddAvailableValue(Preheader, PreheaderLoad);
// Rewrite all the loads in the loop and remember all the definitions from
// stores in the loop.
Promoter.run(LoopUses);
// If the SSAUpdater didn't use the load in the preheader, just zap it now.
if (PreheaderLoad->use_empty())
PreheaderLoad->eraseFromParent();
}
示例6: runOnFunction
bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<LoadInst *, 4> aggrLoads;
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
DataLayout *TD = &getAnalysis<DataLayout>();
LLVMContext &Context = F.getParent()->getContext();
//
// Collect all the aggrLoads, aggrMemcpys and addrMemsets.
//
//const BasicBlock *firstBB = &F.front(); // first BB in F
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
//BasicBlock *bb = BI;
for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
++II) {
if (LoadInst * load = dyn_cast<LoadInst>(II)) {
if (load->hasOneUse() == false) continue;
if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) continue;
User *use = *(load->use_begin());
if (StoreInst * store = dyn_cast<StoreInst>(use)) {
if (store->getOperand(0) != load) //getValueOperand
continue;
aggrLoads.push_back(load);
}
} else if (MemTransferInst * intr = dyn_cast<MemTransferInst>(II)) {
Value *len = intr->getLength();
// If the number of elements being copied is greater
// than MaxAggrCopySize, lower it to a loop
if (ConstantInt * len_int = dyn_cast < ConstantInt > (len)) {
if (len_int->getZExtValue() >= MaxAggrCopySize) {
aggrMemcpys.push_back(intr);
}
} else {
// turn variable length memcpy/memmov into loop
aggrMemcpys.push_back(intr);
}
} else if (MemSetInst * memsetintr = dyn_cast<MemSetInst>(II)) {
Value *len = memsetintr->getLength();
if (ConstantInt * len_int = dyn_cast<ConstantInt>(len)) {
if (len_int->getZExtValue() >= MaxAggrCopySize) {
aggrMemsets.push_back(memsetintr);
}
} else {
// turn variable length memset into loop
aggrMemsets.push_back(memsetintr);
}
}
}
}
if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0)
&& (aggrMemsets.size() == 0)) return false;
//
// Do the transformation of an aggr load/copy/set to a loop
//
for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
LoadInst *load = aggrLoads[i];
StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
Value *srcAddr = load->getOperand(0);
Value *dstAddr = store->getOperand(1);
unsigned numLoads = TD->getTypeStoreSize(load->getType());
Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
store->isVolatile(), Context, F);
store->eraseFromParent();
load->eraseFromParent();
}
for (unsigned i = 0, e = aggrMemcpys.size(); i != e; ++i) {
MemTransferInst *cpy = aggrMemcpys[i];
Value *len = cpy->getLength();
// llvm 2.7 version of memcpy does not have volatile
// operand yet. So always making it non-volatile
// optimistically, so that we don't see unnecessary
// st.volatile in ptx
convertTransferToLoop(cpy, cpy->getSource(), cpy->getDest(), len, false,
false, Context, F);
cpy->eraseFromParent();
}
for (unsigned i = 0, e = aggrMemsets.size(); i != e; ++i) {
MemSetInst *memsetinst = aggrMemsets[i];
Value *len = memsetinst->getLength();
Value *val = memsetinst->getValue();
convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val, Context,
F);
memsetinst->eraseFromParent();
}
return true;
}
示例7: promoteSingleBlockAlloca
/// Many allocas are only used within a single basic block. If this is the
/// case, avoid traversing the CFG and inserting a lot of potentially useless
/// PHI nodes by just performing a single linear pass over the basic block
/// using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return true. This is necessary in cases where, due to control flow, the
/// alloca is potentially undefined on some control flow paths. e.g. code like
/// this is potentially correct:
///
/// for (...) { if (c) { A = undef; undef = B; } }
///
/// ... so long as A is not used before undef is set.
static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LargeBlockInfo &LBI,
AliasSetTracker *AST) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Walk the use-def list of the alloca, getting the locations of all stores.
typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy;
StoresByIndexTy StoresByIndex;
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;
++UI)
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
std::sort(StoresByIndex.begin(), StoresByIndex.end(),
StoreIndexSearchPredicate());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI)
continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::make_pair(LoadIdx, static_cast<StoreInst *>(0)),
StoreIndexSearchPredicate());
if (I == StoresByIndex.begin())
// If there is no store before this load, the load takes the undef value.
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
else
// Otherwise, there was a store before this load, the load takes its value.
LI->replaceAllUsesWith(llvm::prior(I)->second->getOperand(0));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->use_back());
// Record debuginfo for the store before removing it.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
}
SI->eraseFromParent();
LBI.deleteValue(SI);
}
if (AST)
AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca's debuginfo can be removed as well.
if (DbgDeclareInst *DDI = Info.DbgDeclare)
DDI->eraseFromParent();
++NumLocalPromoted;
}
示例8: rewriteSingleStoreAlloca
/// \brief Rewrite as many loads as possible given a single store.
///
/// When there is only a single store, we can use the domtree to trivially
/// replace all of the dominated loads with the stored value. Do so, and return
/// true if this has successfully promoted the alloca entirely. If this returns
/// false there were some loads which were not dominated by the single store
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI,
DominatorTree &DT,
AliasSetTracker *AST) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!DT.dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Finally, after the scan, check to see if the store is all that is left.
if (!Info.UsingBlocks.empty())
return false; // If not, we'll have to fall back for the remainder.
// Record debuginfo for the store and remove the declaration's
// debuginfo.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore, DIB);
DDI->eraseFromParent();
}
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
if (AST)
AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
return true;
}
示例9: CS
/// updateCallSites - Update all sites that call F to use NF.
CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
CallGraph &CG = getAnalysis<CallGraph>();
SmallVector<Value*, 16> Args;
// Attributes - Keep track of the parameter attributes for the arguments.
SmallVector<AttributeWithIndex, 8> ArgAttrsVec;
// Get a new callgraph node for NF.
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
while (!F->use_empty()) {
CallSite CS(*F->use_begin());
Instruction *Call = CS.getInstruction();
const AttrListPtr &PAL = F->getAttributes();
// Add any return attributes.
if (Attributes attrs = PAL.getRetAttributes())
ArgAttrsVec.push_back(AttributeWithIndex::get(0, attrs));
// Copy arguments, however skip first one.
CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
Value *FirstCArg = *AI;
++AI;
// 0th parameter attribute is reserved for return type.
// 1th parameter attribute is for first 1st sret argument.
unsigned ParamIndex = 2;
while (AI != AE) {
Args.push_back(*AI);
if (Attributes Attrs = PAL.getParamAttributes(ParamIndex))
ArgAttrsVec.push_back(AttributeWithIndex::get(ParamIndex - 1, Attrs));
++ParamIndex;
++AI;
}
// Add any function attributes.
if (Attributes attrs = PAL.getFnAttributes())
ArgAttrsVec.push_back(AttributeWithIndex::get(~0, attrs));
AttrListPtr NewPAL = AttrListPtr::get(ArgAttrsVec.begin(), ArgAttrsVec.end());
// Build new call instruction.
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args.begin(), Args.end(), "", Call);
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
cast<InvokeInst>(New)->setAttributes(NewPAL);
} else {
New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
cast<CallInst>(New)->setAttributes(NewPAL);
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
Args.clear();
ArgAttrsVec.clear();
New->takeName(Call);
// Update the callgraph to know that the callsite has been transformed.
CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
CalleeNode->removeCallEdgeFor(Call);
CalleeNode->addCalledFunction(New, NF_CGN);
// Update all users of sret parameter to extract value using extractvalue.
for (Value::use_iterator UI = FirstCArg->use_begin(),
UE = FirstCArg->use_end(); UI != UE; ) {
User *U2 = *UI++;
CallInst *C2 = dyn_cast<CallInst>(U2);
if (C2 && (C2 == Call))
continue;
GetElementPtrInst *UGEP = cast<GetElementPtrInst>(U2);
ConstantInt *Idx = cast<ConstantInt>(UGEP->getOperand(2));
Value *GR = ExtractValueInst::Create(New, Idx->getZExtValue(),
"evi", UGEP);
while(!UGEP->use_empty()) {
// isSafeToUpdateAllCallers has checked that all GEP uses are
// LoadInsts
LoadInst *L = cast<LoadInst>(*UGEP->use_begin());
L->replaceAllUsesWith(GR);
L->eraseFromParent();
}
UGEP->eraseFromParent();
continue;
}
Call->eraseFromParent();
}
return NF_CGN;
}
示例10: promoteSingleBlockAlloca
/// Many allocas are only used within a single basic block. If this is the
/// case, avoid traversing the CFG and inserting a lot of potentially useless
/// PHI nodes by just performing a single linear pass over the basic block
/// using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return false. This is necessary in cases where, due to control flow, the
/// alloca is undefined only on some control flow paths. e.g. code like
/// this is correct in LLVM IR:
/// // A is an alloca with no stores so far
/// for (...) {
/// int t = *A;
/// if (!first_iteration)
/// use(t);
/// *A = 42;
/// }
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LargeBlockInfo &LBI,
const DataLayout &DL,
DominatorTree &DT,
AssumptionCache *AC) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Walk the use-def list of the alloca, getting the locations of all stores.
using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
StoresByIndexTy StoresByIndex;
for (User *U : AI->users())
if (StoreInst *SI = dyn_cast<StoreInst>(U))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
llvm::sort(StoresByIndex, less_first());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI)
continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::make_pair(LoadIdx,
static_cast<StoreInst *>(nullptr)),
less_first());
if (I == StoresByIndex.begin()) {
if (StoresByIndex.empty())
// If there are no stores, the load takes the undef value.
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
else
// There is no store before this load, bail out (load may be affected
// by the following stores - see main comment).
return false;
} else {
// Otherwise, there was a store before this load, the load takes its value.
// Note, if the load was marked as nonnull we don't want to lose that
// information when we erase it. So we preserve it with an assume.
Value *ReplVal = std::prev(I)->second->getOperand(0);
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
}
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->user_back());
// Record debuginfo for the store before removing it.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
ConvertDebugDeclareToDebugValue(DII, SI, DIB);
}
SI->eraseFromParent();
LBI.deleteValue(SI);
}
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca's debuginfo can be removed as well.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DII->eraseFromParent();
//.........这里部分代码省略.........
示例11: rewriteSingleStoreAlloca
/// Rewrite as many loads as possible given a single store.
///
/// When there is only a single store, we can use the domtree to trivially
/// replace all of the dominated loads with the stored value. Do so, and return
/// true if this has successfully promoted the alloca entirely. If this returns
/// false there were some loads which were not dominated by the single store
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI, const DataLayout &DL,
DominatorTree &DT, AssumptionCache *AC) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!DT.dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
// If the load was marked as nonnull we don't want to lose
// that information when we erase this Load. So we preserve
// it with an assume.
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
LI->replaceAllUsesWith(ReplVal);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Finally, after the scan, check to see if the store is all that is left.
if (!Info.UsingBlocks.empty())
return false; // If not, we'll have to fall back for the remainder.
// Record debuginfo for the store and remove the declaration's
// debuginfo.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
DII->eraseFromParent();
LBI.deleteValue(DII);
}
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
AI->eraseFromParent();
LBI.deleteValue(AI);
return true;
}
示例12: CS
//.........这里部分代码省略.........
CS.getOperandBundlesAsDefs(OpBundles);
CallSite NewCS;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args, OpBundles, "", Call);
} else {
auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", Call);
NewCall->setTailCallKind(cast<CallInst>(Call)->getTailCallKind());
NewCS = NewCall;
}
NewCS.setCallingConv(CS.getCallingConv());
NewCS.setAttributes(
AttributeList::get(F->getContext(), CallPAL.getFnAttributes(),
CallPAL.getRetAttributes(), ArgAttrVec));
NewCS->setDebugLoc(Call->getDebugLoc());
uint64_t W;
if (Call->extractProfTotalWeight(W))
NewCS->setProfWeight(W);
Args.clear();
ArgAttrVec.clear();
// Update the callgraph to know that the callsite has been transformed.
if (ReplaceCallSite)
(*ReplaceCallSite)(CS, NewCS);
if (!Call->use_empty()) {
Call->replaceAllUsesWith(NewCS.getInstruction());
NewCS->takeName(Call);
}
// Finally, remove the old call from the program, reducing the use-count of
// F.
Call->eraseFromParent();
}
const DataLayout &DL = F->getParent()->getDataLayout();
// Since we have now created the new function, splice the body of the old
// function right into the new function, leaving the old rotting hulk of the
// function empty.
NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
// Loop over the argument list, transferring uses of the old arguments over to
// the new arguments, also transferring over the names as well.
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
I2 = NF->arg_begin();
I != E; ++I) {
if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
// If this is an unmodified argument, move the name and users over to the
// new version.
I->replaceAllUsesWith(&*I2);
I2->takeName(&*I);
++I2;
continue;
}
if (ByValArgsToTransform.count(&*I)) {
// In the callee, we create an alloca, and store each of the new incoming
// arguments into the alloca.
Instruction *InsertPt = &NF->begin()->front();
// Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr,
I->getParamAlignment(), "", InsertPt);