本文整理汇总了C++中LoadInst::replaceAllUsesWith方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::replaceAllUsesWith方法的具体用法?C++ LoadInst::replaceAllUsesWith怎么用?C++ LoadInst::replaceAllUsesWith使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::replaceAllUsesWith方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
/// RewriteSingleStoreAlloca - If there is only a single store to this value,
/// replace any loads of it that are directly dominated by the definition with
/// the value stored.
void PromoteMem2Reg::RewriteSingleStoreAlloca(AllocaInst *AI,
AllocaInfo &Info,
LargeBlockInfo &LBI) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
}
示例2: visitLoadInst
bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
if (!WidenLoads)
return false;
if ((I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
canWidenScalarExtLoad(I)) {
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = Builder.getInt32Ty();
Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
LoadInst *WidenLoad = Builder.CreateLoad(BitCast);
WidenLoad->copyMetadata(I);
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
ConstantInt *Lower =
mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->getValue().isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
// Don't make assumptions about the high bits.
ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(Mod->getContext(), LowAndHigh));
}
}
int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
Type *IntNTy = Builder.getIntNTy(TySize);
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
I.replaceAllUsesWith(ValOrig);
I.eraseFromParent();
return true;
}
return false;
}
示例3: StoreIndexSearchPredicate
/// PromoteSingleBlockAlloca - Many allocas are only used within a single basic
/// block. If this is the case, avoid traversing the CFG and inserting a lot of
/// potentially useless PHI nodes by just performing a single linear pass over
/// the basic block using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return true. This is necessary in cases where, due to control flow, the
/// alloca is potentially undefined on some control flow paths. e.g. code like
/// this is potentially correct:
///
/// for (...) { if (c) { A = undef; undef = B; } }
///
/// ... so long as A is not used before undef is set.
///
void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
// Walk the use-def list of the alloca, getting the locations of all stores.
typedef SmallVector<std::pair<unsigned, StoreInst*>, 64> StoresByIndexTy;
StoresByIndexTy StoresByIndex;
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
UI != E; ++UI)
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// If there are no stores to the alloca, just replace any loads with undef.
if (StoresByIndex.empty()) {
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;)
if (LoadInst *LI = dyn_cast<LoadInst>(*UI++)) {
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LBI.deleteValue(LI);
LI->eraseFromParent();
}
return;
}
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
std::sort(StoresByIndex.begin(), StoresByIndex.end());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI) continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::pair<unsigned, StoreInst*>(LoadIdx, static_cast<StoreInst*>(0)),
StoreIndexSearchPredicate());
// If there is no store before this load, then we can't promote this load.
if (I == StoresByIndex.begin()) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
// Otherwise, there was a store before this load, the load takes its value.
--I;
LI->replaceAllUsesWith(I->second->getOperand(0));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
}
示例4: Ranges
//.........这里部分代码省略.........
newStore->takeName(oldStore);
}
}
// cast the pointer that was load/stored to i8 if necessary.
if( StartPtr->getType()->getPointerElementType() == int8Ty ) {
globalPtr = StartPtr;
} else {
globalPtr = builder.CreatePointerCast(StartPtr, globalInt8PtrTy, "agg.cast");
}
// Get a Constant* for the length.
Constant* len = ConstantInt::get(sizeTy, Range.End-Range.Start, false);
// Now add the memcpy instruction
unsigned addrSpaceDst,addrSpaceSrc;
addrSpaceDst = addrSpaceSrc = 0;
if( isStore ) addrSpaceDst = globalSpace;
if( isLoad ) addrSpaceSrc = globalSpace;
Type *types[3];
types[0] = PointerType::get(int8Ty, addrSpaceDst);
types[1] = PointerType::get(int8Ty, addrSpaceSrc);
types[2] = sizeTy;
Function *func = Intrinsic::getDeclaration(M, Intrinsic::memcpy, types);
Value* args[5]; // dst src len alignment isvolatile
if( isStore ) {
// it's a store (ie put)
args[0] = globalPtr;
args[1] = alloc;
} else {
// it's a load (ie get)
args[0] = alloc;
args[1] = globalPtr;
}
args[2] = len;
// alignment
args[3] = ConstantInt::get(Type::getInt32Ty(Context), 0, false);
// isvolatile
args[4] = ConstantInt::get(Type::getInt1Ty(Context), 0, false);
Instruction* aMemCpy = builder.CreateCall(func, args);
/*
DEBUG(dbgs() << "Replace ops:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
dbgs() << *Range.TheStores[i] << '\n';
dbgs() << "With: " << *AMemSet << '\n');
*/
if (!Range.TheStores.empty())
aMemCpy->setDebugLoc(Range.TheStores[0]->getDebugLoc());
lastAddedInsn = aMemCpy;
// If loading, load from the memcpy'd region
if( isLoad ) {
for (SmallVector<Instruction*, 16>::const_iterator
SI = Range.TheStores.begin(),
SE = Range.TheStores.end(); SI != SE; ++SI) {
LoadInst* oldLoad = cast<LoadInst>(*SI);
if( DebugThis ) {
errs() << "have load in range:";
oldLoad->dump();
}
Value* ptrToAlloc = rebasePointer(oldLoad->getPointerOperand(),
StartPtr, alloc, "agg.tmp",
&builder, *TD, oldBaseI, newBaseI);
// Old load must not be volatile or atomic... or we shouldn't have put
// it in ranges
assert(!(oldLoad->isVolatile() || oldLoad->isAtomic()));
LoadInst* newLoad = builder.CreateLoad(ptrToAlloc);
newLoad->setAlignment(oldLoad->getAlignment());
oldLoad->replaceAllUsesWith(newLoad);
newLoad->takeName(oldLoad);
lastAddedInsn = newLoad;
}
}
// Save old loads/stores for removal
for (SmallVector<Instruction*, 16>::const_iterator
SI = Range.TheStores.begin(),
SE = Range.TheStores.end(); SI != SE; ++SI) {
Instruction* insn = *SI;
toRemove.push_back(insn);
}
}
// Zap all the old loads/stores
for (SmallVector<Instruction*, 16>::const_iterator
SI = toRemove.begin(),
SE = toRemove.end(); SI != SE; ++SI) {
(*SI)->eraseFromParent();
}
return lastAddedInsn;
}
示例5: promoteSingleBlockAlloca
/// Many allocas are only used within a single basic block. If this is the
/// case, avoid traversing the CFG and inserting a lot of potentially useless
/// PHI nodes by just performing a single linear pass over the basic block
/// using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return true. This is necessary in cases where, due to control flow, the
/// alloca is potentially undefined on some control flow paths. e.g. code like
/// this is potentially correct:
///
/// for (...) { if (c) { A = undef; undef = B; } }
///
/// ... so long as A is not used before undef is set.
static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LargeBlockInfo &LBI,
AliasSetTracker *AST) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Walk the use-def list of the alloca, getting the locations of all stores.
typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy;
StoresByIndexTy StoresByIndex;
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;
++UI)
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
std::sort(StoresByIndex.begin(), StoresByIndex.end(),
StoreIndexSearchPredicate());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI)
continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::make_pair(LoadIdx, static_cast<StoreInst *>(0)),
StoreIndexSearchPredicate());
if (I == StoresByIndex.begin())
// If there is no store before this load, the load takes the undef value.
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
else
// Otherwise, there was a store before this load, the load takes its value.
LI->replaceAllUsesWith(llvm::prior(I)->second->getOperand(0));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->use_back());
// Record debuginfo for the store before removing it.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
}
SI->eraseFromParent();
LBI.deleteValue(SI);
}
if (AST)
AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca's debuginfo can be removed as well.
if (DbgDeclareInst *DDI = Info.DbgDeclare)
DDI->eraseFromParent();
++NumLocalPromoted;
}
示例6: rewriteSingleStoreAlloca
/// \brief Rewrite as many loads as possible given a single store.
///
/// When there is only a single store, we can use the domtree to trivially
/// replace all of the dominated loads with the stored value. Do so, and return
/// true if this has successfully promoted the alloca entirely. If this returns
/// false there were some loads which were not dominated by the single store
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI,
DominatorTree &DT,
AliasSetTracker *AST) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!DT.dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Finally, after the scan, check to see if the store is all that is left.
if (!Info.UsingBlocks.empty())
return false; // If not, we'll have to fall back for the remainder.
// Record debuginfo for the store and remove the declaration's
// debuginfo.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore, DIB);
DDI->eraseFromParent();
}
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
if (AST)
AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
return true;
}
示例7: CS
/// updateCallSites - Update all sites that call F to use NF.
CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
CallGraph &CG = getAnalysis<CallGraph>();
SmallVector<Value*, 16> Args;
// Attributes - Keep track of the parameter attributes for the arguments.
SmallVector<AttributeWithIndex, 8> ArgAttrsVec;
// Get a new callgraph node for NF.
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
while (!F->use_empty()) {
CallSite CS(*F->use_begin());
Instruction *Call = CS.getInstruction();
const AttrListPtr &PAL = F->getAttributes();
// Add any return attributes.
if (Attributes attrs = PAL.getRetAttributes())
ArgAttrsVec.push_back(AttributeWithIndex::get(0, attrs));
// Copy arguments, however skip first one.
CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
Value *FirstCArg = *AI;
++AI;
// 0th parameter attribute is reserved for return type.
// 1th parameter attribute is for first 1st sret argument.
unsigned ParamIndex = 2;
while (AI != AE) {
Args.push_back(*AI);
if (Attributes Attrs = PAL.getParamAttributes(ParamIndex))
ArgAttrsVec.push_back(AttributeWithIndex::get(ParamIndex - 1, Attrs));
++ParamIndex;
++AI;
}
// Add any function attributes.
if (Attributes attrs = PAL.getFnAttributes())
ArgAttrsVec.push_back(AttributeWithIndex::get(~0, attrs));
AttrListPtr NewPAL = AttrListPtr::get(ArgAttrsVec.begin(), ArgAttrsVec.end());
// Build new call instruction.
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args.begin(), Args.end(), "", Call);
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
cast<InvokeInst>(New)->setAttributes(NewPAL);
} else {
New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
cast<CallInst>(New)->setAttributes(NewPAL);
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
Args.clear();
ArgAttrsVec.clear();
New->takeName(Call);
// Update the callgraph to know that the callsite has been transformed.
CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
CalleeNode->removeCallEdgeFor(Call);
CalleeNode->addCalledFunction(New, NF_CGN);
// Update all users of sret parameter to extract value using extractvalue.
for (Value::use_iterator UI = FirstCArg->use_begin(),
UE = FirstCArg->use_end(); UI != UE; ) {
User *U2 = *UI++;
CallInst *C2 = dyn_cast<CallInst>(U2);
if (C2 && (C2 == Call))
continue;
GetElementPtrInst *UGEP = cast<GetElementPtrInst>(U2);
ConstantInt *Idx = cast<ConstantInt>(UGEP->getOperand(2));
Value *GR = ExtractValueInst::Create(New, Idx->getZExtValue(),
"evi", UGEP);
while(!UGEP->use_empty()) {
// isSafeToUpdateAllCallers has checked that all GEP uses are
// LoadInsts
LoadInst *L = cast<LoadInst>(*UGEP->use_begin());
L->replaceAllUsesWith(GR);
L->eraseFromParent();
}
UGEP->eraseFromParent();
continue;
}
Call->eraseFromParent();
}
return NF_CGN;
}
示例8: promoteSingleBlockAlloca
/// Many allocas are only used within a single basic block. If this is the
/// case, avoid traversing the CFG and inserting a lot of potentially useless
/// PHI nodes by just performing a single linear pass over the basic block
/// using the Alloca.
///
/// If we cannot promote this alloca (because it is read before it is written),
/// return false. This is necessary in cases where, due to control flow, the
/// alloca is undefined only on some control flow paths. e.g. code like
/// this is correct in LLVM IR:
/// // A is an alloca with no stores so far
/// for (...) {
/// int t = *A;
/// if (!first_iteration)
/// use(t);
/// *A = 42;
/// }
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LargeBlockInfo &LBI,
const DataLayout &DL,
DominatorTree &DT,
AssumptionCache *AC) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
// make it efficient to get the index of various operations in the block.
// Walk the use-def list of the alloca, getting the locations of all stores.
using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
StoresByIndexTy StoresByIndex;
for (User *U : AI->users())
if (StoreInst *SI = dyn_cast<StoreInst>(U))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
llvm::sort(StoresByIndex, less_first());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI)
continue;
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
std::make_pair(LoadIdx,
static_cast<StoreInst *>(nullptr)),
less_first());
if (I == StoresByIndex.begin()) {
if (StoresByIndex.empty())
// If there are no stores, the load takes the undef value.
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
else
// There is no store before this load, bail out (load may be affected
// by the following stores - see main comment).
return false;
} else {
// Otherwise, there was a store before this load, the load takes its value.
// Note, if the load was marked as nonnull we don't want to lose that
// information when we erase it. So we preserve it with an assume.
Value *ReplVal = std::prev(I)->second->getOperand(0);
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
LI->replaceAllUsesWith(ReplVal);
}
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->user_back());
// Record debuginfo for the store before removing it.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
ConvertDebugDeclareToDebugValue(DII, SI, DIB);
}
SI->eraseFromParent();
LBI.deleteValue(SI);
}
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca's debuginfo can be removed as well.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DII->eraseFromParent();
//.........这里部分代码省略.........
示例9: rewriteSingleStoreAlloca
/// Rewrite as many loads as possible given a single store.
///
/// When there is only a single store, we can use the domtree to trivially
/// replace all of the dominated loads with the stored value. Do so, and return
/// true if this has successfully promoted the alloca entirely. If this returns
/// false there were some loads which were not dominated by the single store
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
LargeBlockInfo &LBI, const DataLayout &DL,
DominatorTree &DT, AssumptionCache *AC) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
int StoreIndex = -1;
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
continue;
}
LoadInst *LI = cast<LoadInst>(UserInst);
// Okay, if we have a load from the alloca, we want to replace it with the
// only value stored to the alloca. We can do this if the value is
// dominated by the store. If not, we use the rest of the mem2reg machinery
// to insert the phi nodes as needed.
if (!StoringGlobalVal) { // Non-instructions are always dominated.
if (LI->getParent() == StoreBB) {
// If we have a use that is in the same block as the store, compare the
// indices of the two instructions to see which one came first. If the
// load came before the store, we can't handle it.
if (StoreIndex == -1)
StoreIndex = LBI.getInstructionIndex(OnlyStore);
if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
// Can't handle this load, bail out.
Info.UsingBlocks.push_back(StoreBB);
continue;
}
} else if (LI->getParent() != StoreBB &&
!DT.dominates(StoreBB, LI->getParent())) {
// If the load and store are in different blocks, use BB dominance to
// check their relationships. If the store doesn't dom the use, bail
// out.
Info.UsingBlocks.push_back(LI->getParent());
continue;
}
}
// Otherwise, we *can* safely rewrite this load.
Value *ReplVal = OnlyStore->getOperand(0);
// If the replacement value is the load, this must occur in unreachable
// code.
if (ReplVal == LI)
ReplVal = UndefValue::get(LI->getType());
// If the load was marked as nonnull we don't want to lose
// that information when we erase this Load. So we preserve
// it with an assume.
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
!isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
LI->replaceAllUsesWith(ReplVal);
LI->eraseFromParent();
LBI.deleteValue(LI);
}
// Finally, after the scan, check to see if the store is all that is left.
if (!Info.UsingBlocks.empty())
return false; // If not, we'll have to fall back for the remainder.
// Record debuginfo for the store and remove the declaration's
// debuginfo.
for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
DII->eraseFromParent();
LBI.deleteValue(DII);
}
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
AI->eraseFromParent();
LBI.deleteValue(AI);
return true;
}
示例10: PromoteAliasSet
//.........这里部分代码省略.........
// Otherwise, check to see if this block is all loads. If so, we can queue
// them all as live in loads.
bool HasStore = false;
for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
if (isa<StoreInst>(BlockUses[i])) {
HasStore = true;
break;
}
}
if (!HasStore) {
for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
BlockUses.clear();
continue;
}
// Otherwise, we have mixed loads and stores (or just a bunch of stores).
// Since SSAUpdater is purely for cross-block values, we need to determine
// the order of these instructions in the block. If the first use in the
// block is a load, then it uses the live in value. The last store defines
// the live out value. We handle this by doing a linear scan of the block.
BasicBlock *BB = User->getParent();
Value *StoredValue = 0;
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
if (LoadInst *L = dyn_cast<LoadInst>(II)) {
// If this is a load from an unrelated pointer, ignore it.
if (!PointerMustAliases.count(L->getOperand(0))) continue;
// If we haven't seen a store yet, this is a live in use, otherwise
// use the stored value.
if (StoredValue) {
L->replaceAllUsesWith(StoredValue);
ReplacedLoads[L] = StoredValue;
} else {
LiveInLoads.push_back(L);
}
continue;
}
if (StoreInst *S = dyn_cast<StoreInst>(II)) {
// If this is a store to an unrelated pointer, ignore it.
if (!PointerMustAliases.count(S->getOperand(1))) continue;
// Remember that this is the active value in the block.
StoredValue = S->getOperand(0);
}
}
// The last stored value that happened is the live-out for the block.
assert(StoredValue && "Already checked that there is a store in block");
SSA.AddAvailableValue(BB, StoredValue);
BlockUses.clear();
}
// Now that all the intra-loop values are classified, set up the preheader.
// It gets a load of the pointer we're promoting, and it is the live-out value
// from the preheader.
LoadInst *PreheaderLoad = new LoadInst(SomePtr,SomePtr->getName()+".promoted",
Preheader->getTerminator());
SSA.AddAvailableValue(Preheader, PreheaderLoad);
// Now that the preheader is good to go, set up the exit blocks. Each exit
// block gets a store of the live-out values that feed them. Since we've
// already told the SSA updater about the defs in the loop and the preheader
示例11: CS
/// DoPromotion - This method actually performs the promotion of the specified
/// arguments, and returns the new function. At this point, we know that it's
/// safe to do so.
static Function *
doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
SmallPtrSetImpl<Argument *> &ByValArgsToTransform,
Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>>
ReplaceCallSite) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but has modified arguments.
FunctionType *FTy = F->getFunctionType();
std::vector<Type *> Params;
using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>;
// ScalarizedElements - If we are promoting a pointer that has elements
// accessed out of it, keep track of which elements are accessed so that we
// can add one argument for each.
//
// Arguments that are directly loaded will have a zero element value here, to
// handle cases where there are both a direct load and GEP accesses.
std::map<Argument *, ScalarizeTable> ScalarizedElements;
// OriginalLoads - Keep track of a representative load instruction from the
// original function so that we can tell the alias analysis implementation
// what the new GEP/Load instructions we are inserting look like.
// We need to keep the original loads for each argument and the elements
// of the argument that are accessed.
std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads;
// Attribute - Keep track of the parameter attributes for the arguments
// that we are *not* promoting. For the ones that we do promote, the parameter
// attributes are lost
SmallVector<AttributeSet, 8> ArgAttrVec;
AttributeList PAL = F->getAttributes();
// First, determine the new argument list
unsigned ArgNo = 0;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
++I, ++ArgNo) {
if (ByValArgsToTransform.count(&*I)) {
// Simple byval argument? Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
Params.insert(Params.end(), STy->element_begin(), STy->element_end());
ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(),
AttributeSet());
++NumByValArgsPromoted;
} else if (!ArgsToPromote.count(&*I)) {
// Unchanged argument
Params.push_back(I->getType());
ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo));
} else if (I->use_empty()) {
// Dead argument (which are always marked as promotable)
++NumArgumentsDead;
// There may be remaining metadata uses of the argument for things like
// llvm.dbg.value. Replace them with undef.
I->replaceAllUsesWith(UndefValue::get(I->getType()));
} else {
// Okay, this is being promoted. This means that the only uses are loads
// or GEPs which are only used by loads
// In this table, we will track which indices are loaded from the argument
// (where direct loads are tracked as no indices).
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
for (User *U : I->users()) {
Instruction *UI = cast<Instruction>(U);
Type *SrcTy;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
SrcTy = L->getType();
else
SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
IndicesVector Indices;
Indices.reserve(UI->getNumOperands() - 1);
// Since loads will only have a single operand, and GEPs only a single
// non-index operand, this will record direct loads without any indices,
// and gep+loads with the GEP indices.
for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
II != IE; ++II)
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
// GEPs with a single 0 index can be merged with direct loads
if (Indices.size() == 1 && Indices.front() == 0)
Indices.clear();
ArgIndices.insert(std::make_pair(SrcTy, Indices));
LoadInst *OrigLoad;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
OrigLoad = L;
else
// Take any load, we will use it only to update Alias Analysis
OrigLoad = cast<LoadInst>(UI->user_back());
OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
}
// Add a parameter to the function for each element passed in.
for (const auto &ArgIndex : ArgIndices) {
// not allowed to dereference ->begin() if size() is 0
Params.push_back(GetElementPtrInst::getIndexedType(
cast<PointerType>(I->getType()->getScalarType())->getElementType(),
ArgIndex.second));
//.........这里部分代码省略.........
示例12: PM
//.........这里部分代码省略.........
// get the address loaded by load instruction
Value *ptr_value = load->getPointerOperand();
// we're only interested in constant addresses
ConstantExpr * ptr_constant_expr = dyn_cast<ConstantExpr>( ptr_value );
if( !ptr_constant_expr ) continue;
ptr_constant_expr->dump();
// compute real address of constant pointer expression
Constant * ptr_constant = ConstantFoldConstantExpression( ptr_constant_expr, TD );
if( !ptr_constant ) continue;
ptr_constant->dump();
// convert to int constant
ConstantInt *int_constant = dyn_cast<ConstantInt>( ConstantExpr::getPtrToInt( ptr_constant, Type::getInt64Ty( context )));
if( !int_constant ) continue;
int_constant->dump();
// get data size
int data_length = TD->getTypeAllocSize( load->getType() );
ptr_value->getType()->dump();
// get real address (at last !)
const unsigned char * c_ptr = (const unsigned char *) int_constant->getLimitedValue();
printf( "%ld %d %d\n", c_ptr, constant_addresses_set.count( c_ptr ), data_length );
// check what's in this address
int isconst = 1;
for( int offset=0; offset<data_length; offset++ )
isconst &= constant_addresses_set.count( c_ptr + offset );
if( !isconst ) continue;
printf( "It is constant.\n" );
// make a LLVM const with the data
Constant *new_constant = NULL;
switch( data_length )
{
case 1: new_constant = ConstantInt::get( Type::getInt8Ty( context ), *(uint8_t*)c_ptr, false /* signed */ ); break;
case 2: new_constant = ConstantInt::get( Type::getInt16Ty( context ), *(uint16_t*)c_ptr, false /* signed */ ); break;
case 4: new_constant = ConstantInt::get( Type::getInt32Ty( context ), *(uint32_t*)c_ptr, false /* signed */ ); break;
case 8: new_constant = ConstantInt::get( Type::getInt64Ty( context ), *(uint64_t*)c_ptr, false /* signed */ ); break;
default:
{
StringRef const_data ( (const char *) c_ptr, data_length );
new_constant = ConstantArray::get( context, const_data, false /* dont add terminating null */ );
}
}
if( !new_constant ) continue;
new_constant->dump();
//~ // get the type that is loaded
const Type *Ty = load->getType();
// do we need a cast ?
if( load->getType() != new_constant->getType() )
{
new_constant = ConstantExpr::getBitCast( new_constant, Ty );
new_constant->dump();
}
// zap the load and replace with constant address
load->replaceAllUsesWith( new_constant );
printf( "\nREPLACED :...\n" );
load->dump();
new_constant->dump();
Changed = true;
}
if( Changed )
continue; // re-optimize and do another pass of constant load elimination
// if we can't do anything else, do an inlining pass
if( inline_iterations > 0 )
{
inline_iterations --;
PM_Inline.doInitialization();
Changed |= PM_Inline.run( *specialized_func );
PM_Inline.doFinalization();
//~ for( int i=0; i<3; i++ )
{
PM.doInitialization();
Changed |= PM.run( *specialized_func );
PM.doFinalization();
}
}
if( iterations>0 && !Changed )
iterations--;
} while( Changed || iterations>0 );
return specialized_func;
}