本文整理汇总了C++中basicblock::iterator::mayWriteToMemory方法的典型用法代码示例。如果您正苦于以下问题:C++ iterator::mayWriteToMemory方法的具体用法?C++ iterator::mayWriteToMemory怎么用?C++ iterator::mayWriteToMemory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类basicblock::iterator
的用法示例。
在下文中一共展示了iterator::mayWriteToMemory方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: isSafeToLoadUnconditionally
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
// If it is an alloca it is always safe to load from.
if (isa<AllocaInst>(V)) return true;
// If it is a global variable it is mostly safe to load from.
if (const GlobalValue *GV = dyn_cast<GlobalVariable>(V))
// Don't try to evaluate aliases. External weak GV can be null.
return !isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage();
// Otherwise, be a little bit agressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
while (BBI != E) {
--BBI;
// If we see a free or a call which may write to memory (i.e. which might do
// a free) the pointer could be marked invalid.
if (isa<FreeInst>(BBI) ||
(isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
!isa<DbgInfoIntrinsic>(BBI)))
return false;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
if (LI->getOperand(0) == V) return true;
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
if (SI->getOperand(1) == V) return true;
}
}
return false;
}
示例2: CompareIfRegionBlock
/// Compare blocks from two if-regions, where \param Head1 is the entry of the
/// 1st if-region. \param Head2 is the entry of the 2nd if-region. \param
/// Block1 is a block in the 1st if-region to compare. \param Block2 is a block
// in the 2nd if-region to compare. \returns true if \param Block1 and \param
/// Block2 have identical instructions and do not have memory reference alias
/// with \param Head2.
///
bool FlattenCFGOpt::CompareIfRegionBlock(BasicBlock *Head1, BasicBlock *Head2,
BasicBlock *Block1,
BasicBlock *Block2) {
TerminatorInst *PTI2 = Head2->getTerminator();
Instruction *PBI2 = Head2->begin();
bool eq1 = (Block1 == Head1);
bool eq2 = (Block2 == Head2);
if (eq1 || eq2) {
// An empty then-path or else-path.
return (eq1 == eq2);
}
// Check whether instructions in Block1 and Block2 are identical
// and do not alias with instructions in Head2.
BasicBlock::iterator iter1 = Block1->begin();
BasicBlock::iterator end1 = Block1->getTerminator();
BasicBlock::iterator iter2 = Block2->begin();
BasicBlock::iterator end2 = Block2->getTerminator();
while (1) {
if (iter1 == end1) {
if (iter2 != end2)
return false;
break;
}
if (!iter1->isIdenticalTo(iter2))
return false;
// Illegal to remove instructions with side effects except
// non-volatile stores.
if (iter1->mayHaveSideEffects()) {
Instruction *CurI = &*iter1;
StoreInst *SI = dyn_cast<StoreInst>(CurI);
if (!SI || SI->isVolatile())
return false;
}
// For simplicity and speed, data dependency check can be
// avoided if read from memory doesn't exist.
if (iter1->mayReadFromMemory())
return false;
if (iter1->mayWriteToMemory()) {
for (BasicBlock::iterator BI = PBI2, BE = PTI2; BI != BE; ++BI) {
if (BI->mayReadFromMemory() || BI->mayWriteToMemory()) {
// Check alias with Head2.
if (!AA || AA->alias(iter1, BI))
return false;
}
}
}
++iter1;
++iter2;
}
return true;
}
示例3: assert
Value *BoUpSLP::isUnsafeToSink(Instruction *Src, Instruction *Dst) {
assert(Src->getParent() == Dst->getParent() && "Not the same BB");
BasicBlock::iterator I = Src, E = Dst;
/// Scan all of the instruction from SRC to DST and check if
/// the source may alias.
for (++I; I != E; ++I) {
// Ignore store instructions that are marked as 'ignore'.
if (MemBarrierIgnoreList.count(I)) continue;
if (Src->mayWriteToMemory()) /* Write */ {
if (!I->mayReadOrWriteMemory()) continue;
} else /* Read */ {
if (!I->mayWriteToMemory()) continue;
}
AliasAnalysis::Location A = getLocation(&*I);
AliasAnalysis::Location B = getLocation(Src);
if (!A.Ptr || !B.Ptr || AA->alias(A, B))
return I;
}
return 0;
}
示例4: Ranges
/// tryMergingIntoMemset - When scanning forward over instructions, we look for
/// some other patterns to fold away. In particular, this looks for stores to
/// neighboring locations of memory. If it sees enough consecutive ones, it
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
if (TD == 0) return 0;
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(*TD);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
break;
continue;
}
if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this stored value is of the same byte-splattable value.
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
} else {
MemSetInst *MSI = cast<MemSetInst>(BI);
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
break;
Ranges.addMemSet(Offset, MSI);
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
return 0;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
Ranges.addInst(0, StartInst);
// If we create any memsets, we put it right before the first instruction that
// isn't part of the memset block. This ensure that the memset is dominated
// by any addressing instruction needed by the start of the block.
IRBuilder<> Builder(BI);
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
Instruction *AMemSet = 0;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
if (!Range.isProfitableToUseMemset(*TD))
continue;
// Otherwise, we do want to transform this! Create a new memset.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = TD->getABITypeAlignment(EltType);
}
AMemSet =
Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
DEBUG(dbgs() << "Replace stores:\n";
//.........这里部分代码省略.........
示例5: SimplifyStoreAtEndOfBlock
/// SimplifyStoreAtEndOfBlock - Turn things like:
/// if () { *P = v1; } else { *P = v2 }
/// into a phi node with a store in the successor.
///
/// Simplify things like:
/// *P = v1; if () { *P = v2; }
/// into a phi node with a store in the successor.
///
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
BasicBlock *StoreBB = SI.getParent();
// Check to see if the successor block has exactly two incoming edges. If
// so, see if the other predecessor contains a store to the same location.
// if so, insert a PHI node (if needed) and move the stores down.
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
// Determine whether Dest has exactly two predecessors and, if so, compute
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
BasicBlock *P = *PI;
BasicBlock *OtherBB = nullptr;
if (P != StoreBB)
OtherBB = P;
if (++PI == pred_end(DestBB))
return false;
P = *PI;
if (P != StoreBB) {
if (OtherBB)
return false;
OtherBB = P;
}
if (++PI != pred_end(DestBB))
return false;
// Bail out if all the relevant blocks aren't distinct (this can happen,
// for example, if SI is in an infinite loop)
if (StoreBB == DestBB || OtherBB == DestBB)
return false;
// Verify that the other block ends in a branch and is not otherwise empty.
BasicBlock::iterator BBI(OtherBB->getTerminator());
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
if (!OtherBr || BBI == OtherBB->begin())
return false;
// If the other block ends in an unconditional branch, check for the 'if then
// else' case. there is an instruction before the branch.
StoreInst *OtherStore = nullptr;
if (OtherBr->isUnconditional()) {
--BBI;
// Skip over debugging info.
while (isa<DbgInfoIntrinsic>(BBI) ||
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
if (BBI==OtherBB->begin())
return false;
--BBI;
}
// If this isn't a store, isn't a store to the same location, or is not the
// right kind of store, bail out.
OtherStore = dyn_cast<StoreInst>(BBI);
if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
!SI.isSameOperationAs(OtherStore))
return false;
} else {
// Otherwise, the other block ended with a conditional branch. If one of the
// destinations is StoreBB, then we have the if/then case.
if (OtherBr->getSuccessor(0) != StoreBB &&
OtherBr->getSuccessor(1) != StoreBB)
return false;
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
// if/then triangle. See if there is a store to the same ptr as SI that
// lives in OtherBB.
for (;; --BBI) {
// Check to see if we find the matching store.
if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
if (OtherStore->getOperand(1) != SI.getOperand(1) ||
!SI.isSameOperationAs(OtherStore))
return false;
break;
}
// If we find something that may be using or overwriting the stored
// value, or if we run out of instructions, we can't do the xform.
if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
BBI == OtherBB->begin())
return false;
}
// In order to eliminate the store in OtherBr, we have to
// make sure nothing reads or overwrites the stored value in
// StoreBB.
for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
// FIXME: This should really be AA driven.
if (I->mayReadFromMemory() || I->mayWriteToMemory())
return false;
}
}
//.........这里部分代码省略.........
示例6: isSafeToLoadUnconditionally
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const DataLayout *TD) {
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, TD);
if (ByteOffset < 0) // out of bounds
return false;
Type *BaseType = 0;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
BaseType = AI->getAllocatedType();
BaseAlign = AI->getAlignment();
} else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// Global variables are safe to load from but their size cannot be
// guaranteed if they are overridden.
if (!GV->mayBeOverridden()) {
BaseType = GV->getType()->getElementType();
BaseAlign = GV->getAlignment();
}
}
if (BaseType && BaseType->isSized()) {
if (TD && BaseAlign == 0)
BaseAlign = TD->getPrefTypeAlignment(BaseType);
if (Align <= BaseAlign) {
if (!TD)
return true; // Loading directly from an alloca or global is OK.
// Check if the load is within the bounds of the underlying object.
PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
return true;
}
}
// Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
while (BBI != E) {
--BBI;
// If we see a free or a call which may write to memory (i.e. which might do
// a free) the pointer could be marked invalid.
if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
!isa<DbgInfoIntrinsic>(BBI))
return false;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
if (AreEquivalentAddressValues(LI->getOperand(0), V)) return true;
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
if (AreEquivalentAddressValues(SI->getOperand(1), V)) return true;
}
}
return false;
}
示例7: Ranges
/// tryAggregating - When scanning forward over instructions, we look for
/// other loads or stores that could be aggregated with this one.
/// Returns the last instruction added (if one was added) since we might have
/// removed some loads or stores and that might invalidate an iterator.
Instruction *AggregateGlobalOpsOpt::tryAggregating(Instruction *StartInst, Value *StartPtr,
bool DebugThis) {
if (TD == 0) return 0;
Module* M = StartInst->getParent()->getParent()->getParent();
LLVMContext& Context = StartInst->getContext();
Type* int8Ty = Type::getInt8Ty(Context);
Type* sizeTy = Type::getInt64Ty(Context);
Type* globalInt8PtrTy = int8Ty->getPointerTo(globalSpace);
bool isLoad = isa<LoadInst>(StartInst);
bool isStore = isa<StoreInst>(StartInst);
Instruction *lastAddedInsn = NULL;
Instruction *LastLoadOrStore = NULL;
SmallVector<Instruction*, 8> toRemove;
// Okay, so we now have a single global load/store. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemOpRanges Ranges(*TD);
// Put the first store in since we want to preserve the order.
Ranges.addInst(0, StartInst);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if( isGlobalLoadOrStore(BI, globalSpace, isLoad, isStore) ) {
// OK!
} else {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory())
break;
if (isStore && BI->mayReadFromMemory())
break;
continue;
}
if ( isStore && isa<StoreInst>(BI) ) {
StoreInst *NextStore = cast<StoreInst>(BI);
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
LastLoadOrStore = NextStore;
} else {
LoadInst *NextLoad = cast<LoadInst>(BI);
if (!NextLoad->isSimple()) break;
// Check to see if this load is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextLoad->getPointerOperand(), Offset, *TD))
break;
Ranges.addLoad(Offset, NextLoad);
LastLoadOrStore = NextLoad;
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (!Ranges.moreThanOneOp())
return 0;
// Divide the instructions between StartInst and LastLoadOrStore into
// addressing, memops, and uses of memops (uses of loads)
reorderAddressingMemopsUses(StartInst, LastLoadOrStore, DebugThis);
Instruction* insertBefore = StartInst;
IRBuilder<> builder(insertBefore);
// Now that we have full information about ranges, loop over the ranges and
// emit memcpy's for anything big enough to be worthwhile.
for (MemOpRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemOpRange &Range = *I;
Value* oldBaseI = NULL;
Value* newBaseI = NULL;
if (Range.TheStores.size() == 1) continue; // Don't bother if there's only one thing...
builder.SetInsertPoint(insertBefore);
// Otherwise, we do want to transform this! Create a new memcpy.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
//.........这里部分代码省略.........
示例8: isSafeToLoadUnconditionally
/// \brief Check if executing a load of this pointer value cannot trap.
///
/// If DT is specified this method performs context-sensitive analysis.
///
/// If it is not obviously safe to load from the specified pointer, we do
/// a quick local scan of the basic block containing \c ScanFrom, to determine
/// if the address is already accessed.
///
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
Instruction *ScanFrom,
const DominatorTree *DT,
const TargetLibraryInfo *TLI) {
const DataLayout &DL = ScanFrom->getModule()->getDataLayout();
// Zero alignment means that the load has the ABI alignment for the target
if (Align == 0)
Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
assert(isPowerOf2_32(Align));
// If DT is not specified we can't make context-sensitive query
const Instruction* CtxI = DT ? ScanFrom : nullptr;
if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI))
return true;
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
if (ByteOffset < 0) // out of bounds
return false;
Type *BaseType = nullptr;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
BaseType = AI->getAllocatedType();
BaseAlign = AI->getAlignment();
} else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// Global variables are not necessarily safe to load from if they are
// interposed arbitrarily. Their size may change or they may be weak and
// require a test to determine if they were in fact provided.
if (!GV->isInterposable()) {
BaseType = GV->getType()->getElementType();
BaseAlign = GV->getAlignment();
}
}
PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());
// If we found a base allocated type from either an alloca or global variable,
// try to see if we are definitively within the allocated region. We need to
// know the size of the base type and the loaded type to do anything in this
// case.
if (BaseType && BaseType->isSized()) {
if (BaseAlign == 0)
BaseAlign = DL.getPrefTypeAlignment(BaseType);
if (Align <= BaseAlign) {
// Check if the load is within the bounds of the underlying object.
if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
((ByteOffset % Align) == 0))
return true;
}
}
// Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
BasicBlock::iterator BBI = ScanFrom->getIterator(),
E = ScanFrom->getParent()->begin();
// We can at least always strip pointer casts even though we can't use the
// base here.
V = V->stripPointerCasts();
while (BBI != E) {
--BBI;
// If we see a free or a call which may write to memory (i.e. which might do
// a free) the pointer could be marked invalid.
if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
!isa<DbgInfoIntrinsic>(BBI))
return false;
Value *AccessedPtr;
unsigned AccessedAlign;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
AccessedPtr = LI->getPointerOperand();
AccessedAlign = LI->getAlignment();
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
AccessedPtr = SI->getPointerOperand();
AccessedAlign = SI->getAlignment();
} else
continue;
//.........这里部分代码省略.........
示例9: if
Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
Value *Val = SI.getOperand(0);
Value *Ptr = SI.getOperand(1);
// Attempt to improve the alignment.
if (DL) {
unsigned KnownAlign =
getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
DL);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
DL->getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
else if (StoreAlign == 0)
SI.setAlignment(EffectiveStoreAlign);
}
// Don't hack volatile/atomic stores.
// FIXME: Some bits are legal for atomic stores; needs refactoring.
if (!SI.isSimple()) return nullptr;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
if (Ptr->hasOneUse()) {
if (isa<AllocaInst>(Ptr))
return EraseInstFromFunction(SI);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (isa<AllocaInst>(GEP->getOperand(0))) {
if (GEP->getOperand(0)->hasOneUse())
return EraseInstFromFunction(SI);
}
}
}
// Do really simple DSE, to catch cases where there are several consecutive
// stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses.
BasicBlock::iterator BBI = &SI;
for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
--ScanInsts) {
--BBI;
// Don't count debug info directives, lest they affect codegen,
// and we skip pointer-to-pointer bitcasts, which are NOPs.
if (isa<DbgInfoIntrinsic>(BBI) ||
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
ScanInsts++;
continue;
}
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
// Prev store isn't volatile, and stores to the same location?
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
SI.getOperand(1))) {
++NumDeadStore;
++BBI;
EraseInstFromFunction(*PrevSI);
continue;
}
break;
}
// If this is a load, we have to stop. However, if the loaded value is from
// the pointer we're loading and is producing the pointer we're storing,
// then *this* store is dead (X = load P; store X -> P).
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
LI->isSimple())
return EraseInstFromFunction(SI);
// Otherwise, this is a load from some other location. Stores before it
// may not be dead.
break;
}
// Don't skip over loads or things that can modify memory.
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
break;
}
// store X, null -> turns into 'unreachable' in SimplifyCFG
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
if (!isa<UndefValue>(Val)) {
SI.setOperand(0, UndefValue::get(Val->getType()));
if (Instruction *U = dyn_cast<Instruction>(Val))
Worklist.Add(U); // Dropped a use.
}
return nullptr; // Do not modify these!
}
// store undef, Ptr -> noop
if (isa<UndefValue>(Val))
return EraseInstFromFunction(SI);
// If the pointer destination is a cast, see if we can fold the cast into the
// source instead.
if (isa<CastInst>(Ptr))
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
return Res;
//.........这里部分代码省略.........
示例10: isSafeToLoadUnconditionally
/// \brief Check if executing a load of this pointer value cannot trap.
///
/// If it is not obviously safe to load from the specified pointer, we do
/// a quick local scan of the basic block containing \c ScanFrom, to determine
/// if the address is already accessed.
///
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const DataLayout *DL) {
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
if (ByteOffset < 0) // out of bounds
return false;
Type *BaseType = nullptr;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
BaseType = AI->getAllocatedType();
BaseAlign = AI->getAlignment();
} else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// Global variables are not necessarily safe to load from if they are
// overridden. Their size may change or they may be weak and require a test
// to determine if they were in fact provided.
if (!GV->mayBeOverridden()) {
BaseType = GV->getType()->getElementType();
BaseAlign = GV->getAlignment();
}
}
PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = DL ? DL->getTypeStoreSize(AddrTy->getElementType()) : 0;
// If we found a base allocated type from either an alloca or global variable,
// try to see if we are definitively within the allocated region. We need to
// know the size of the base type and the loaded type to do anything in this
// case, so only try this when we have the DataLayout available.
if (BaseType && BaseType->isSized() && DL) {
if (BaseAlign == 0)
BaseAlign = DL->getPrefTypeAlignment(BaseType);
if (Align <= BaseAlign) {
// Check if the load is within the bounds of the underlying object.
if (ByteOffset + LoadSize <= DL->getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
return true;
}
}
// Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
// We can at least always strip pointer casts even though we can't use the
// base here.
V = V->stripPointerCasts();
while (BBI != E) {
--BBI;
// If we see a free or a call which may write to memory (i.e. which might do
// a free) the pointer could be marked invalid.
if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
!isa<DbgInfoIntrinsic>(BBI))
return false;
Value *AccessedPtr;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI))
AccessedPtr = LI->getPointerOperand();
else if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
AccessedPtr = SI->getPointerOperand();
else
continue;
// Handle trivial cases even w/o DataLayout or other work.
if (AccessedPtr == V)
return true;
if (!DL)
continue;
auto *AccessedTy = cast<PointerType>(AccessedPtr->getType());
if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
LoadSize <= DL->getTypeStoreSize(AccessedTy->getElementType()))
return true;
}
return false;
}