本文整理汇总了C++中StoreInst::isSimple方法的典型用法代码示例。如果您正苦于以下问题:C++ StoreInst::isSimple方法的具体用法?C++ StoreInst::isSimple怎么用?C++ StoreInst::isSimple使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类StoreInst
的用法示例。
在下文中一共展示了StoreInst::isSimple方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unpackStoreToAggregate
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
Type *T = V->getType();
if (!T->isAggregateType())
return false;
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
if (ST->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
if (auto *AT = dyn_cast<ArrayType>(T)) {
// If the array only have one element, we unpack.
if (AT->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
return false;
}
示例2: visitStoreInst
bool Scalarizer::visitStoreInst(StoreInst &SI) {
if (!ScalarizeLoadStore)
return false;
if (!SI.isSimple())
return false;
VectorLayout Layout;
Value *FullValue = SI.getValueOperand();
if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
IRBuilder<> Builder(SI.getParent(), &SI);
Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
Scatterer Val = scatter(&SI, FullValue);
ValueVector Stores;
Stores.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I) {
unsigned Align = Layout.getElemAlign(I);
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
}
transferMetadata(&SI, Stores);
return true;
}
示例3: unpackStoreToAggregate
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
Type *T = V->getType();
if (!T->isAggregateType())
return false;
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
unsigned Count = ST->getNumElements();
if (Count == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
// We don't want to break loads with padding here as we'd loose
// the knowledge that padding exists for the rest of the pipeline.
const DataLayout &DL = IC.getDataLayout();
auto *SL = DL.getStructLayout(ST);
if (SL->hasPadding())
return false;
SmallString<16> EltName = V->getName();
EltName += ".elt";
auto *Addr = SI.getPointerOperand();
SmallString<16> AddrName = Addr->getName();
AddrName += ".repack";
auto *IdxType = Type::getInt32Ty(ST->getContext());
auto *Zero = ConstantInt::get(IdxType, 0);
for (unsigned i = 0; i < Count; i++) {
Value *Indices[2] = {
Zero,
ConstantInt::get(IdxType, i),
};
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
IC.Builder->CreateStore(Val, Ptr);
}
return true;
}
if (auto *AT = dyn_cast<ArrayType>(T)) {
// If the array only have one element, we unpack.
if (AT->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
return false;
}
示例4: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction sas otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *Ptr = SI.getPointerOperand();
Value *V = SI.getValueOperand();
unsigned AS = SI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a store instruction changing *only its
// type*. The only metadata it makes sense to drop is metadata which is
// invalidated when the pointer type changes. This should essentially
// never be the case in LLVM, but we explicitly switch over only known
// metadata to be conservatively correct. If you are adding metadata to
// LLVM which pertains to stores, you almost certainly want to add it
// here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_range:
break;
}
}
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例5: checkArgumentUses
bool AMDGPURewriteOutArguments::checkArgumentUses(Value &Arg) const {
const int MaxUses = 10;
int UseCount = 0;
for (Use &U : Arg.uses()) {
StoreInst *SI = dyn_cast<StoreInst>(U.getUser());
if (UseCount > MaxUses)
return false;
if (!SI) {
auto *BCI = dyn_cast<BitCastInst>(U.getUser());
if (!BCI || !BCI->hasOneUse())
return false;
// We don't handle multiple stores currently, so stores to aggregate
// pointers aren't worth the trouble since they are canonically split up.
Type *DestEltTy = BCI->getType()->getPointerElementType();
if (DestEltTy->isAggregateType())
return false;
// We could handle these if we had a convenient way to bitcast between
// them.
Type *SrcEltTy = Arg.getType()->getPointerElementType();
if (SrcEltTy->isArrayTy())
return false;
// Special case handle structs with single members. It is useful to handle
// some casts between structs and non-structs, but we can't bitcast
// directly between them. directly bitcast between them. Blender uses
// some casts that look like { <3 x float> }* to <4 x float>*
if ((SrcEltTy->isStructTy() && (SrcEltTy->getStructNumElements() != 1)))
return false;
// Clang emits OpenCL 3-vector type accesses with a bitcast to the
// equivalent 4-element vector and accesses that, and we're looking for
// this pointer cast.
if (DL->getTypeAllocSize(SrcEltTy) != DL->getTypeAllocSize(DestEltTy))
return false;
return checkArgumentUses(*BCI);
}
if (!SI->isSimple() ||
U.getOperandNo() != StoreInst::getPointerOperandIndex())
return false;
++UseCount;
}
// Skip unused arguments.
return UseCount > 0;
}
示例6: visitStore
bool CallAnalyzer::visitStore(StoreInst &I) {
Value *SROAArg;
DenseMap<Value *, int>::iterator CostIt;
if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
if (I.isSimple()) {
accumulateSROACost(CostIt, InlineConstants::InstrCost);
return true;
}
disableSROA(CostIt);
}
return false;
}
示例7: translateStore
bool IRTranslator::translateStore(const StoreInst &SI) {
assert(SI.isSimple() && "only simple loads are supported at the moment");
MachineFunction &MF = MIRBuilder.getMF();
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType()},
PTy{*SI.getPointerOperand()->getType()};
MIRBuilder.buildStore(
VTy, PTy, Val, Addr,
*MF.getMachineMemOperand(MachinePointerInfo(SI.getPointerOperand()),
MachineMemOperand::MOStore,
VTy.getSizeInBits() / 8, getMemOpAlignment(SI)));
return true;
}
示例8: instructionSafeForVersioning
/// \brief Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
/// 2) Check function call safety, by ensuring its not accessing memory.
/// 3) Loop body shouldn't have any may throw instruction.
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
DEBUG(dbgs() << " Unsafe call site found.\n");
return false;
}
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
DEBUG(dbgs() << " May throw instruction found in loop body\n");
return false;
}
// If current instruction is load instructions
// make sure it's a simple load (non atomic & non volatile)
if (I->mayReadFromMemory()) {
LoadInst *Ld = dyn_cast<LoadInst>(I);
if (!Ld || !Ld->isSimple()) {
DEBUG(dbgs() << " Found a non-simple load.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(Ld);
Value *Ptr = Ld->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
}
// If current instruction is store instruction
// make sure it's a simple store (non atomic & non volatile)
else if (I->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St || !St->isSimple()) {
DEBUG(dbgs() << " Found a non-simple store.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(St);
Value *Ptr = St->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
IsReadOnlyLoop = false;
}
return true;
}
示例9: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
combineStoreToNewValue(IC, SI, V);
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例10: visitStoreInst
/// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
/// becomes:
/// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
void AtomicVisitor::visitStoreInst(StoreInst &I) {
return; // XXX EMSCRIPTEN
if (I.isSimple())
return;
PointerHelper<StoreInst> PH(*this, I);
const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
findAtomicIntrinsic(I, Intrinsic::nacl_atomic_store, PH.PET);
checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
Value *V = I.getValueOperand();
if (!V->getType()->isIntegerTy()) {
// The store isn't of an integer type. We define atomics in terms of
// integers, so bitcast the value to store to an integer of the
// proper width.
CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize),
V->getName() + ".cast");
Cast->setDebugLoc(I.getDebugLoc());
V = Cast;
}
checkSizeMatchesType(I, PH.BitSize, V->getType());
Value *Args[] = {V, PH.P, freezeMemoryOrder(I, I.getOrdering())};
replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
Args);
}
示例11: GetBasicARCInstKind
static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
Instruction *Release,
ProvenanceAnalysis &PA,
AliasAnalysis *AA) {
StoreInst *Store = nullptr;
bool SawRelease = false;
// Get the location associated with Load.
MemoryLocation Loc = MemoryLocation::get(Load);
// Walk down to find the store and the release, which may be in either order.
for (auto I = std::next(BasicBlock::iterator(Load)),
E = Load->getParent()->end();
I != E; ++I) {
// If we found the store we were looking for and saw the release,
// break. There is no more work to be done.
if (Store && SawRelease)
break;
// Now we know that we have not seen either the store or the release. If I
// is the release, mark that we saw the release and continue.
Instruction *Inst = &*I;
if (Inst == Release) {
SawRelease = true;
continue;
}
// Otherwise, we check if Inst is a "good" store. Grab the instruction class
// of Inst.
ARCInstKind Class = GetBasicARCInstKind(Inst);
// If Inst is an unrelated retain, we don't care about it.
//
// TODO: This is one area where the optimization could be made more
// aggressive.
if (IsRetain(Class))
continue;
// If we have seen the store, but not the release...
if (Store) {
// We need to make sure that it is safe to move the release from its
// current position to the store. This implies proving that any
// instruction in between Store and the Release conservatively can not use
// the RCIdentityRoot of Release. If we can prove we can ignore Inst, so
// continue...
if (!CanUse(Inst, Load, PA, Class)) {
continue;
}
// Otherwise, be conservative and return nullptr.
return nullptr;
}
// Ok, now we know we have not seen a store yet. See if Inst can write to
// our load location, if it can not, just ignore the instruction.
if (!(AA->getModRefInfo(Inst, Loc) & MRI_Mod))
continue;
Store = dyn_cast<StoreInst>(Inst);
// If Inst can, then check if Inst is a simple store. If Inst is not a
// store or a store that is not simple, then we have some we do not
// understand writing to this memory implying we can not move the load
// over the write to any subsequent store that we may find.
if (!Store || !Store->isSimple())
return nullptr;
// Then make sure that the pointer we are storing to is Ptr. If so, we
// found our Store!
if (Store->getPointerOperand() == Loc.Ptr)
continue;
// Otherwise, we have an unknown store to some other ptr that clobbers
// Loc.Ptr. Bail!
return nullptr;
}
// If we did not find the store or did not see the release, fail.
if (!Store || !SawRelease)
return nullptr;
// We succeeded!
return Store;
}
示例12: Ranges
/// tryAggregating - When scanning forward over instructions, we look for
/// other loads or stores that could be aggregated with this one.
/// Returns the last instruction added (if one was added) since we might have
/// removed some loads or stores and that might invalidate an iterator.
Instruction *AggregateGlobalOpsOpt::tryAggregating(Instruction *StartInst, Value *StartPtr,
bool DebugThis) {
if (TD == 0) return 0;
Module* M = StartInst->getParent()->getParent()->getParent();
LLVMContext& Context = StartInst->getContext();
Type* int8Ty = Type::getInt8Ty(Context);
Type* sizeTy = Type::getInt64Ty(Context);
Type* globalInt8PtrTy = int8Ty->getPointerTo(globalSpace);
bool isLoad = isa<LoadInst>(StartInst);
bool isStore = isa<StoreInst>(StartInst);
Instruction *lastAddedInsn = NULL;
Instruction *LastLoadOrStore = NULL;
SmallVector<Instruction*, 8> toRemove;
// Okay, so we now have a single global load/store. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemOpRanges Ranges(*TD);
// Put the first store in since we want to preserve the order.
Ranges.addInst(0, StartInst);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if( isGlobalLoadOrStore(BI, globalSpace, isLoad, isStore) ) {
// OK!
} else {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory())
break;
if (isStore && BI->mayReadFromMemory())
break;
continue;
}
if ( isStore && isa<StoreInst>(BI) ) {
StoreInst *NextStore = cast<StoreInst>(BI);
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
LastLoadOrStore = NextStore;
} else {
LoadInst *NextLoad = cast<LoadInst>(BI);
if (!NextLoad->isSimple()) break;
// Check to see if this load is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextLoad->getPointerOperand(), Offset, *TD))
break;
Ranges.addLoad(Offset, NextLoad);
LastLoadOrStore = NextLoad;
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (!Ranges.moreThanOneOp())
return 0;
// Divide the instructions between StartInst and LastLoadOrStore into
// addressing, memops, and uses of memops (uses of loads)
reorderAddressingMemopsUses(StartInst, LastLoadOrStore, DebugThis);
Instruction* insertBefore = StartInst;
IRBuilder<> builder(insertBefore);
// Now that we have full information about ranges, loop over the ranges and
// emit memcpy's for anything big enough to be worthwhile.
for (MemOpRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemOpRange &Range = *I;
Value* oldBaseI = NULL;
Value* newBaseI = NULL;
if (Range.TheStores.size() == 1) continue; // Don't bother if there's only one thing...
builder.SetInsertPoint(insertBefore);
// Otherwise, we do want to transform this! Create a new memcpy.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
//.........这里部分代码省略.........
示例13: ContractRelease
/// Attempt to merge an objc_release with a store, load, and objc_retain to form
/// an objc_storeStrong. This can be a little tricky because the instructions
/// don't always appear in order, and there may be unrelated intervening
/// instructions.
void ObjCARCContract::ContractRelease(Instruction *Release,
inst_iterator &Iter) {
LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release));
if (!Load || !Load->isSimple()) return;
// For now, require everything to be in one basic block.
BasicBlock *BB = Release->getParent();
if (Load->getParent() != BB) return;
// Walk down to find the store and the release, which may be in either order.
BasicBlock::iterator I = Load, End = BB->end();
++I;
AliasAnalysis::Location Loc = AA->getLocation(Load);
StoreInst *Store = 0;
bool SawRelease = false;
for (; !Store || !SawRelease; ++I) {
if (I == End)
return;
Instruction *Inst = I;
if (Inst == Release) {
SawRelease = true;
continue;
}
InstructionClass Class = GetBasicInstructionClass(Inst);
// Unrelated retains are harmless.
if (IsRetain(Class))
continue;
if (Store) {
// The store is the point where we're going to put the objc_storeStrong,
// so make sure there are no uses after it.
if (CanUse(Inst, Load, PA, Class))
return;
} else if (AA->getModRefInfo(Inst, Loc) & AliasAnalysis::Mod) {
// We are moving the load down to the store, so check for anything
// else which writes to the memory between the load and the store.
Store = dyn_cast<StoreInst>(Inst);
if (!Store || !Store->isSimple()) return;
if (Store->getPointerOperand() != Loc.Ptr) return;
}
}
Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand());
// Walk up to find the retain.
I = Store;
BasicBlock::iterator Begin = BB->begin();
while (I != Begin && GetBasicInstructionClass(I) != IC_Retain)
--I;
Instruction *Retain = I;
if (GetBasicInstructionClass(Retain) != IC_Retain) return;
if (GetObjCArg(Retain) != New) return;
Changed = true;
++NumStoreStrongs;
LLVMContext &C = Release->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *I8XX = PointerType::getUnqual(I8X);
Value *Args[] = { Load->getPointerOperand(), New };
if (Args[0]->getType() != I8XX)
Args[0] = new BitCastInst(Args[0], I8XX, "", Store);
if (Args[1]->getType() != I8X)
Args[1] = new BitCastInst(Args[1], I8X, "", Store);
CallInst *StoreStrong =
CallInst::Create(getStoreStrongCallee(BB->getParent()->getParent()),
Args, "", Store);
StoreStrong->setDoesNotThrow();
StoreStrong->setDebugLoc(Store->getDebugLoc());
// We can't set the tail flag yet, because we haven't yet determined
// whether there are any escaping allocas. Remember this call, so that
// we can set the tail flag once we know it's safe.
StoreStrongCalls.insert(StoreStrong);
if (&*Iter == Store) ++Iter;
Store->eraseFromParent();
Release->eraseFromParent();
EraseInstruction(Retain);
if (Load->use_empty())
Load->eraseFromParent();
}
示例14: runOnFunction
bool runOnFunction(Function &F) override {
AliasAnalysis AA = getAnalysis<AliasAnalysis>();
DependenceAnalysis *DA = &(getAnalysis<DependenceAnalysis>());
// iterate over basic blocks
Function *func = &F;
unsigned bb_num = 0;
for (Function::iterator BB = func->begin(), BE = func->end();
BB != BE; ++BB) {
errs() << "BB-" << bb_num << "\n";
bb_num++;
// iterator over instructions
unsigned inst_num = 0;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I) {
Instruction *Ins = dyn_cast<Instruction>(I);
if (!Ins)
return false;
LoadInst *Ld = dyn_cast<LoadInst>(I);
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St && !Ld)
continue;
if (Ld && !Ld->isSimple())
return false;
if (St && !St->isSimple())
return false;
inst_num++;
MemInstr.push_back(&*I);
errs() << "MemInst-" << inst_num << ":" << *I << "\n";
}
ValueVector::iterator I, IE, J, JE;
for (I = MemInstr.begin(), IE = MemInstr.end(); I != IE; ++I) {
for (J = I, JE = MemInstr.end(); J != JE; ++J) {
std::vector<char> Dep;
Instruction *Src = dyn_cast<Instruction>(*I);
Instruction *Des = dyn_cast<Instruction>(*J);
if (Src == Des)
continue;
if (isa<LoadInst>(Src) && isa<LoadInst>(Des))
continue;
if (auto D = DA->depends(Src, Des, true)) {
errs() << "Found Dependency between:\nSrc:" << *Src << "\nDes:" << *Des
<< "\n";
if (D->isFlow()) {
errs () << "Flow dependence not handled";
return false;
}
if (D->isAnti()) {
errs() << "Found Anti dependence \n";
AliasAnalysis::AliasResult AA_dep = AA.alias(Src, Des);
AliasAnalysis::AliasResult AA_dep_1 = AA.alias(Des, Src);
errs() << "The Ld->St alias result is " << AA_dep << "\n";
errs() << "The St->Ld alias result is " << AA_dep_1 << "\n";
unsigned Levels = D->getLevels();
errs() << "levels = " << Levels << "\n";
char Direction;
for (unsigned II = 1; II <= Levels; ++II) {
const SCEV *Distance = D->getDistance(II);
const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
if (SCEVConst) {
const ConstantInt *CI = SCEVConst->getValue();
//int64_t it_dist = CI->getUniqueInteger().getSExtValue();
//int it_dist = CI->getUniqueInteger().getSExtValue();
unsigned it_dist = abs(CI->getUniqueInteger().getSExtValue());
errs() << "distance is not null\n";
//errs() << "distance = "<< *CI << "\n";
errs() << "distance = "<< it_dist << "\n";
if (CI->isNegative())
Direction = '<';
else if (CI->isZero())
Direction = '=';
else
Direction = '>';
Dep.push_back(Direction);
}
else if (D->isScalar(II)) {
Direction = 'S';
Dep.push_back(Direction);
}
else {
unsigned Dir = D->getDirection(II);
if (Dir == Dependence::DVEntry::LT || Dir == Dependence::DVEntry::LE)
Direction = '<';
else if (Dir == Dependence::DVEntry::GT || Dir == Dependence::DVEntry::GE)
Direction = '>';
else if (Dir == Dependence::DVEntry::EQ)
Direction = '=';
else
Direction = '*';
Dep.push_back(Direction);
}
}
}
}
}
}
}
errs() << "------Hello World!--------\n";
//.........这里部分代码省略.........