本文整理汇总了C++中basicblock::iterator::mayReadFromMemory方法的典型用法代码示例。如果您正苦于以下问题:C++ iterator::mayReadFromMemory方法的具体用法?C++ iterator::mayReadFromMemory怎么用?C++ iterator::mayReadFromMemory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类basicblock::iterator
的用法示例。
在下文中一共展示了iterator::mayReadFromMemory方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CompareIfRegionBlock
/// Compare blocks from two if-regions, where \param Head1 is the entry of the
/// 1st if-region. \param Head2 is the entry of the 2nd if-region. \param
/// Block1 is a block in the 1st if-region to compare. \param Block2 is a block
// in the 2nd if-region to compare. \returns true if \param Block1 and \param
/// Block2 have identical instructions and do not have memory reference alias
/// with \param Head2.
///
bool FlattenCFGOpt::CompareIfRegionBlock(BasicBlock *Head1, BasicBlock *Head2,
BasicBlock *Block1,
BasicBlock *Block2) {
TerminatorInst *PTI2 = Head2->getTerminator();
Instruction *PBI2 = Head2->begin();
bool eq1 = (Block1 == Head1);
bool eq2 = (Block2 == Head2);
if (eq1 || eq2) {
// An empty then-path or else-path.
return (eq1 == eq2);
}
// Check whether instructions in Block1 and Block2 are identical
// and do not alias with instructions in Head2.
BasicBlock::iterator iter1 = Block1->begin();
BasicBlock::iterator end1 = Block1->getTerminator();
BasicBlock::iterator iter2 = Block2->begin();
BasicBlock::iterator end2 = Block2->getTerminator();
while (1) {
if (iter1 == end1) {
if (iter2 != end2)
return false;
break;
}
if (!iter1->isIdenticalTo(iter2))
return false;
// Illegal to remove instructions with side effects except
// non-volatile stores.
if (iter1->mayHaveSideEffects()) {
Instruction *CurI = &*iter1;
StoreInst *SI = dyn_cast<StoreInst>(CurI);
if (!SI || SI->isVolatile())
return false;
}
// For simplicity and speed, data dependency check can be
// avoided if read from memory doesn't exist.
if (iter1->mayReadFromMemory())
return false;
if (iter1->mayWriteToMemory()) {
for (BasicBlock::iterator BI = PBI2, BE = PTI2; BI != BE; ++BI) {
if (BI->mayReadFromMemory() || BI->mayWriteToMemory()) {
// Check alias with Head2.
if (!AA || AA->alias(iter1, BI))
return false;
}
}
}
++iter1;
++iter2;
}
return true;
}
示例2: SinkUnusedInvariants
/// If there's a single exit block, sink any loop-invariant values that
/// were defined in the preheader but not used inside the loop into the
/// exit block to reduce register pressure in the loop.
void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
BasicBlock *ExitBlock = L->getExitBlock();
if (!ExitBlock) return;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) return;
Instruction *InsertPt = ExitBlock->getFirstNonPHI();
BasicBlock::iterator I = Preheader->getTerminator();
while (I != Preheader->begin()) {
--I;
// New instructions were inserted at the end of the preheader.
if (isa<PHINode>(I))
break;
// Don't move instructions which might have side effects, since the side
// effects need to complete before instructions inside the loop. Also
// don't move instructions which might read memory, since the loop may
// modify memory. Note that it's okay if the instruction might have
// undefined behavior: LoopSimplify guarantees that the preheader
// dominates the exit block.
if (I->mayHaveSideEffects() || I->mayReadFromMemory())
continue;
// Don't sink static AllocaInsts out of the entry block, which would
// turn them into dynamic allocas!
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (AI->isStaticAlloca())
continue;
// Determine if there is a use in or before the loop (direct or
// otherwise).
bool UsedInLoop = false;
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI) {
BasicBlock *UseBB = cast<Instruction>(UI)->getParent();
if (PHINode *P = dyn_cast<PHINode>(UI)) {
unsigned i =
PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
UseBB = P->getIncomingBlock(i);
}
if (UseBB == Preheader || L->contains(UseBB)) {
UsedInLoop = true;
break;
}
}
// If there is, the def must remain in the preheader.
if (UsedInLoop)
continue;
// Otherwise, sink it to the exit block.
Instruction *ToMove = I;
bool Done = false;
if (I != Preheader->begin())
--I;
else
Done = true;
ToMove->moveBefore(InsertPt);
if (Done)
break;
InsertPt = ToMove;
}
}
示例3: Ranges
/// tryMergingIntoMemset - When scanning forward over instructions, we look for
/// some other patterns to fold away. In particular, this looks for stores to
/// neighboring locations of memory. If it sees enough consecutive ones, it
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
if (TD == 0) return 0;
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(*TD);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
break;
continue;
}
if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this stored value is of the same byte-splattable value.
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
} else {
MemSetInst *MSI = cast<MemSetInst>(BI);
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
break;
Ranges.addMemSet(Offset, MSI);
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
return 0;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
Ranges.addInst(0, StartInst);
// If we create any memsets, we put it right before the first instruction that
// isn't part of the memset block. This ensure that the memset is dominated
// by any addressing instruction needed by the start of the block.
IRBuilder<> Builder(BI);
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
Instruction *AMemSet = 0;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
if (!Range.isProfitableToUseMemset(*TD))
continue;
// Otherwise, we do want to transform this! Create a new memset.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = TD->getABITypeAlignment(EltType);
}
AMemSet =
Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
DEBUG(dbgs() << "Replace stores:\n";
//.........这里部分代码省略.........
示例4: SimplifyStoreAtEndOfBlock
/// SimplifyStoreAtEndOfBlock - Turn things like:
/// if () { *P = v1; } else { *P = v2 }
/// into a phi node with a store in the successor.
///
/// Simplify things like:
/// *P = v1; if () { *P = v2; }
/// into a phi node with a store in the successor.
///
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
BasicBlock *StoreBB = SI.getParent();
// Check to see if the successor block has exactly two incoming edges. If
// so, see if the other predecessor contains a store to the same location.
// if so, insert a PHI node (if needed) and move the stores down.
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
// Determine whether Dest has exactly two predecessors and, if so, compute
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
BasicBlock *P = *PI;
BasicBlock *OtherBB = nullptr;
if (P != StoreBB)
OtherBB = P;
if (++PI == pred_end(DestBB))
return false;
P = *PI;
if (P != StoreBB) {
if (OtherBB)
return false;
OtherBB = P;
}
if (++PI != pred_end(DestBB))
return false;
// Bail out if all the relevant blocks aren't distinct (this can happen,
// for example, if SI is in an infinite loop)
if (StoreBB == DestBB || OtherBB == DestBB)
return false;
// Verify that the other block ends in a branch and is not otherwise empty.
BasicBlock::iterator BBI(OtherBB->getTerminator());
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
if (!OtherBr || BBI == OtherBB->begin())
return false;
// If the other block ends in an unconditional branch, check for the 'if then
// else' case. there is an instruction before the branch.
StoreInst *OtherStore = nullptr;
if (OtherBr->isUnconditional()) {
--BBI;
// Skip over debugging info.
while (isa<DbgInfoIntrinsic>(BBI) ||
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
if (BBI==OtherBB->begin())
return false;
--BBI;
}
// If this isn't a store, isn't a store to the same location, or is not the
// right kind of store, bail out.
OtherStore = dyn_cast<StoreInst>(BBI);
if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
!SI.isSameOperationAs(OtherStore))
return false;
} else {
// Otherwise, the other block ended with a conditional branch. If one of the
// destinations is StoreBB, then we have the if/then case.
if (OtherBr->getSuccessor(0) != StoreBB &&
OtherBr->getSuccessor(1) != StoreBB)
return false;
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
// if/then triangle. See if there is a store to the same ptr as SI that
// lives in OtherBB.
for (;; --BBI) {
// Check to see if we find the matching store.
if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
if (OtherStore->getOperand(1) != SI.getOperand(1) ||
!SI.isSameOperationAs(OtherStore))
return false;
break;
}
// If we find something that may be using or overwriting the stored
// value, or if we run out of instructions, we can't do the xform.
if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
BBI == OtherBB->begin())
return false;
}
// In order to eliminate the store in OtherBr, we have to
// make sure nothing reads or overwrites the stored value in
// StoreBB.
for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
// FIXME: This should really be AA driven.
if (I->mayReadFromMemory() || I->mayWriteToMemory())
return false;
}
}
//.........这里部分代码省略.........
示例5: Ranges
/// tryAggregating - When scanning forward over instructions, we look for
/// other loads or stores that could be aggregated with this one.
/// Returns the last instruction added (if one was added) since we might have
/// removed some loads or stores and that might invalidate an iterator.
Instruction *AggregateGlobalOpsOpt::tryAggregating(Instruction *StartInst, Value *StartPtr,
bool DebugThis) {
if (TD == 0) return 0;
Module* M = StartInst->getParent()->getParent()->getParent();
LLVMContext& Context = StartInst->getContext();
Type* int8Ty = Type::getInt8Ty(Context);
Type* sizeTy = Type::getInt64Ty(Context);
Type* globalInt8PtrTy = int8Ty->getPointerTo(globalSpace);
bool isLoad = isa<LoadInst>(StartInst);
bool isStore = isa<StoreInst>(StartInst);
Instruction *lastAddedInsn = NULL;
Instruction *LastLoadOrStore = NULL;
SmallVector<Instruction*, 8> toRemove;
// Okay, so we now have a single global load/store. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemOpRanges Ranges(*TD);
// Put the first store in since we want to preserve the order.
Ranges.addInst(0, StartInst);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if( isGlobalLoadOrStore(BI, globalSpace, isLoad, isStore) ) {
// OK!
} else {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory())
break;
if (isStore && BI->mayReadFromMemory())
break;
continue;
}
if ( isStore && isa<StoreInst>(BI) ) {
StoreInst *NextStore = cast<StoreInst>(BI);
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
LastLoadOrStore = NextStore;
} else {
LoadInst *NextLoad = cast<LoadInst>(BI);
if (!NextLoad->isSimple()) break;
// Check to see if this load is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextLoad->getPointerOperand(), Offset, *TD))
break;
Ranges.addLoad(Offset, NextLoad);
LastLoadOrStore = NextLoad;
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (!Ranges.moreThanOneOp())
return 0;
// Divide the instructions between StartInst and LastLoadOrStore into
// addressing, memops, and uses of memops (uses of loads)
reorderAddressingMemopsUses(StartInst, LastLoadOrStore, DebugThis);
Instruction* insertBefore = StartInst;
IRBuilder<> builder(insertBefore);
// Now that we have full information about ranges, loop over the ranges and
// emit memcpy's for anything big enough to be worthwhile.
for (MemOpRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemOpRange &Range = *I;
Value* oldBaseI = NULL;
Value* newBaseI = NULL;
if (Range.TheStores.size() == 1) continue; // Don't bother if there's only one thing...
builder.SetInsertPoint(insertBefore);
// Otherwise, we do want to transform this! Create a new memcpy.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
//.........这里部分代码省略.........
示例6: if
Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
Value *Val = SI.getOperand(0);
Value *Ptr = SI.getOperand(1);
// Attempt to improve the alignment.
if (DL) {
unsigned KnownAlign =
getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
DL);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
DL->getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
else if (StoreAlign == 0)
SI.setAlignment(EffectiveStoreAlign);
}
// Don't hack volatile/atomic stores.
// FIXME: Some bits are legal for atomic stores; needs refactoring.
if (!SI.isSimple()) return nullptr;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
if (Ptr->hasOneUse()) {
if (isa<AllocaInst>(Ptr))
return EraseInstFromFunction(SI);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (isa<AllocaInst>(GEP->getOperand(0))) {
if (GEP->getOperand(0)->hasOneUse())
return EraseInstFromFunction(SI);
}
}
}
// Do really simple DSE, to catch cases where there are several consecutive
// stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses.
BasicBlock::iterator BBI = &SI;
for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
--ScanInsts) {
--BBI;
// Don't count debug info directives, lest they affect codegen,
// and we skip pointer-to-pointer bitcasts, which are NOPs.
if (isa<DbgInfoIntrinsic>(BBI) ||
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
ScanInsts++;
continue;
}
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
// Prev store isn't volatile, and stores to the same location?
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
SI.getOperand(1))) {
++NumDeadStore;
++BBI;
EraseInstFromFunction(*PrevSI);
continue;
}
break;
}
// If this is a load, we have to stop. However, if the loaded value is from
// the pointer we're loading and is producing the pointer we're storing,
// then *this* store is dead (X = load P; store X -> P).
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
LI->isSimple())
return EraseInstFromFunction(SI);
// Otherwise, this is a load from some other location. Stores before it
// may not be dead.
break;
}
// Don't skip over loads or things that can modify memory.
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
break;
}
// store X, null -> turns into 'unreachable' in SimplifyCFG
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
if (!isa<UndefValue>(Val)) {
SI.setOperand(0, UndefValue::get(Val->getType()));
if (Instruction *U = dyn_cast<Instruction>(Val))
Worklist.Add(U); // Dropped a use.
}
return nullptr; // Do not modify these!
}
// store undef, Ptr -> noop
if (isa<UndefValue>(Val))
return EraseInstFromFunction(SI);
// If the pointer destination is a cast, see if we can fold the cast into the
// source instead.
if (isa<CastInst>(Ptr))
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
return Res;
//.........这里部分代码省略.........