本文整理汇总了C++中LoadInst::isSimple方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::isSimple方法的具体用法?C++ LoadInst::isSimple怎么用?C++ LoadInst::isSimple使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::isSimple方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: canWidenScalarExtLoad
bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
Type *Ty = I.getType();
const DataLayout &DL = Mod->getDataLayout();
int TySize = DL.getTypeSizeInBits(Ty);
unsigned Align = I.getAlignment() ?
I.getAlignment() : DL.getABITypeAlignment(Ty);
return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I);
}
示例2: visitLoadInst
/// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
/// becomes:
/// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
void AtomicVisitor::visitLoadInst(LoadInst &I) {
return; // XXX EMSCRIPTEN
if (I.isSimple())
return;
PointerHelper<LoadInst> PH(*this, I);
const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
findAtomicIntrinsic(I, Intrinsic::nacl_atomic_load, PH.PET);
checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
Value *Args[] = {PH.P, freezeMemoryOrder(I, I.getOrdering())};
replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
Args);
}
示例3: visitLoad
bool CallAnalyzer::visitLoad(LoadInst &I) {
Value *SROAArg;
DenseMap<Value *, int>::iterator CostIt;
if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
if (I.isSimple()) {
accumulateSROACost(CostIt, InlineConstants::InstrCost);
return true;
}
disableSROA(CostIt);
}
return false;
}
示例4: translateLoad
bool IRTranslator::translateLoad(const LoadInst &LI) {
assert(LI.isSimple() && "only simple loads are supported at the moment");
MachineFunction &MF = MIRBuilder.getMF();
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
LLT VTy{*LI.getType()}, PTy{*LI.getPointerOperand()->getType()};
MIRBuilder.buildLoad(
VTy, PTy, Res, Addr,
*MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
MachineMemOperand::MOLoad,
VTy.getSizeInBits() / 8, getMemOpAlignment(LI)));
return true;
}
示例5: instructionSafeForVersioning
/// \brief Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
/// 2) Check function call safety, by ensuring its not accessing memory.
/// 3) Loop body shouldn't have any may throw instruction.
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
DEBUG(dbgs() << " Unsafe call site found.\n");
return false;
}
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
DEBUG(dbgs() << " May throw instruction found in loop body\n");
return false;
}
// If current instruction is load instructions
// make sure it's a simple load (non atomic & non volatile)
if (I->mayReadFromMemory()) {
LoadInst *Ld = dyn_cast<LoadInst>(I);
if (!Ld || !Ld->isSimple()) {
DEBUG(dbgs() << " Found a non-simple load.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(Ld);
Value *Ptr = Ld->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
}
// If current instruction is store instruction
// make sure it's a simple store (non atomic & non volatile)
else if (I->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St || !St->isSimple()) {
DEBUG(dbgs() << " Found a non-simple store.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(St);
Value *Ptr = St->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
IsReadOnlyLoop = false;
}
return true;
}
示例6: visitLoadInst
bool Scalarizer::visitLoadInst(LoadInst &LI) {
if (!ScalarizeLoadStore)
return false;
if (!LI.isSimple())
return false;
VectorLayout Layout;
if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
IRBuilder<> Builder(LI.getParent(), &LI);
Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
ValueVector Res;
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I)
Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;
}
示例7: Ranges
/// tryAggregating - When scanning forward over instructions, we look for
/// other loads or stores that could be aggregated with this one.
/// Returns the last instruction added (if one was added) since we might have
/// removed some loads or stores and that might invalidate an iterator.
Instruction *AggregateGlobalOpsOpt::tryAggregating(Instruction *StartInst, Value *StartPtr,
bool DebugThis) {
if (TD == 0) return 0;
Module* M = StartInst->getParent()->getParent()->getParent();
LLVMContext& Context = StartInst->getContext();
Type* int8Ty = Type::getInt8Ty(Context);
Type* sizeTy = Type::getInt64Ty(Context);
Type* globalInt8PtrTy = int8Ty->getPointerTo(globalSpace);
bool isLoad = isa<LoadInst>(StartInst);
bool isStore = isa<StoreInst>(StartInst);
Instruction *lastAddedInsn = NULL;
Instruction *LastLoadOrStore = NULL;
SmallVector<Instruction*, 8> toRemove;
// Okay, so we now have a single global load/store. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemOpRanges Ranges(*TD);
// Put the first store in since we want to preserve the order.
Ranges.addInst(0, StartInst);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if( isGlobalLoadOrStore(BI, globalSpace, isLoad, isStore) ) {
// OK!
} else {
// If the instruction is readnone, ignore it, otherwise bail out. We
// don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
if (BI->mayWriteToMemory())
break;
if (isStore && BI->mayReadFromMemory())
break;
continue;
}
if ( isStore && isa<StoreInst>(BI) ) {
StoreInst *NextStore = cast<StoreInst>(BI);
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
break;
Ranges.addStore(Offset, NextStore);
LastLoadOrStore = NextStore;
} else {
LoadInst *NextLoad = cast<LoadInst>(BI);
if (!NextLoad->isSimple()) break;
// Check to see if this load is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextLoad->getPointerOperand(), Offset, *TD))
break;
Ranges.addLoad(Offset, NextLoad);
LastLoadOrStore = NextLoad;
}
}
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (!Ranges.moreThanOneOp())
return 0;
// Divide the instructions between StartInst and LastLoadOrStore into
// addressing, memops, and uses of memops (uses of loads)
reorderAddressingMemopsUses(StartInst, LastLoadOrStore, DebugThis);
Instruction* insertBefore = StartInst;
IRBuilder<> builder(insertBefore);
// Now that we have full information about ranges, loop over the ranges and
// emit memcpy's for anything big enough to be worthwhile.
for (MemOpRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemOpRange &Range = *I;
Value* oldBaseI = NULL;
Value* newBaseI = NULL;
if (Range.TheStores.size() == 1) continue; // Don't bother if there's only one thing...
builder.SetInsertPoint(insertBefore);
// Otherwise, we do want to transform this! Create a new memcpy.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
//.........这里部分代码省略.........
示例8: countCodeReductionForSROAInst
/// \brief Compute the reduction possible for a given instruction if we are able
/// to SROA an alloca.
///
/// The reduction for this instruction is added to the SROAReduction output
/// parameter. Returns false if this instruction is expected to defeat SROA in
/// general.
static bool countCodeReductionForSROAInst(Instruction *I,
SmallVectorImpl<Value *> &Worklist,
unsigned &SROAReduction) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isSimple())
return false;
SROAReduction += InlineConstants::InstrCost;
return true;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!SI->isSimple())
return false;
SROAReduction += InlineConstants::InstrCost;
return true;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
// If the GEP has variable indices, we won't be able to do much with it.
if (!GEP->hasAllConstantIndices())
return false;
// A non-zero GEP will likely become a mask operation after SROA.
if (GEP->hasAllZeroIndices())
SROAReduction += InlineConstants::InstrCost;
Worklist.push_back(GEP);
return true;
}
if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
// Track pointer through bitcasts.
Worklist.push_back(BCI);
SROAReduction += InlineConstants::InstrCost;
return true;
}
// We just look for non-constant operands to ICmp instructions as those will
// defeat SROA. The actual reduction for these happens even without SROA.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
return isa<Constant>(ICI->getOperand(1));
if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
// SROA can handle a select of alloca iff all uses of the alloca are
// loads, and dereferenceable. We assume it's dereferenceable since
// we're told the input is an alloca.
for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || !LI->isSimple())
return false;
}
// We don't know whether we'll be deleting the rest of the chain of
// instructions from the SelectInst on, because we don't know whether
// the other side of the select is also an alloca or not.
return true;
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default:
return false;
case Intrinsic::memset:
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
// SROA can usually chew through these intrinsics.
SROAReduction += InlineConstants::InstrCost;
return true;
}
}
// If there is some other strange instruction, we're not going to be
// able to do much if we inline this.
return false;
}
示例9: runOnFunction
bool runOnFunction(Function &F) override {
AliasAnalysis AA = getAnalysis<AliasAnalysis>();
DependenceAnalysis *DA = &(getAnalysis<DependenceAnalysis>());
// iterate over basic blocks
Function *func = &F;
unsigned bb_num = 0;
for (Function::iterator BB = func->begin(), BE = func->end();
BB != BE; ++BB) {
errs() << "BB-" << bb_num << "\n";
bb_num++;
// iterator over instructions
unsigned inst_num = 0;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I) {
Instruction *Ins = dyn_cast<Instruction>(I);
if (!Ins)
return false;
LoadInst *Ld = dyn_cast<LoadInst>(I);
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St && !Ld)
continue;
if (Ld && !Ld->isSimple())
return false;
if (St && !St->isSimple())
return false;
inst_num++;
MemInstr.push_back(&*I);
errs() << "MemInst-" << inst_num << ":" << *I << "\n";
}
ValueVector::iterator I, IE, J, JE;
for (I = MemInstr.begin(), IE = MemInstr.end(); I != IE; ++I) {
for (J = I, JE = MemInstr.end(); J != JE; ++J) {
std::vector<char> Dep;
Instruction *Src = dyn_cast<Instruction>(*I);
Instruction *Des = dyn_cast<Instruction>(*J);
if (Src == Des)
continue;
if (isa<LoadInst>(Src) && isa<LoadInst>(Des))
continue;
if (auto D = DA->depends(Src, Des, true)) {
errs() << "Found Dependency between:\nSrc:" << *Src << "\nDes:" << *Des
<< "\n";
if (D->isFlow()) {
errs () << "Flow dependence not handled";
return false;
}
if (D->isAnti()) {
errs() << "Found Anti dependence \n";
AliasAnalysis::AliasResult AA_dep = AA.alias(Src, Des);
AliasAnalysis::AliasResult AA_dep_1 = AA.alias(Des, Src);
errs() << "The Ld->St alias result is " << AA_dep << "\n";
errs() << "The St->Ld alias result is " << AA_dep_1 << "\n";
unsigned Levels = D->getLevels();
errs() << "levels = " << Levels << "\n";
char Direction;
for (unsigned II = 1; II <= Levels; ++II) {
const SCEV *Distance = D->getDistance(II);
const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
if (SCEVConst) {
const ConstantInt *CI = SCEVConst->getValue();
//int64_t it_dist = CI->getUniqueInteger().getSExtValue();
//int it_dist = CI->getUniqueInteger().getSExtValue();
unsigned it_dist = abs(CI->getUniqueInteger().getSExtValue());
errs() << "distance is not null\n";
//errs() << "distance = "<< *CI << "\n";
errs() << "distance = "<< it_dist << "\n";
if (CI->isNegative())
Direction = '<';
else if (CI->isZero())
Direction = '=';
else
Direction = '>';
Dep.push_back(Direction);
}
else if (D->isScalar(II)) {
Direction = 'S';
Dep.push_back(Direction);
}
else {
unsigned Dir = D->getDirection(II);
if (Dir == Dependence::DVEntry::LT || Dir == Dependence::DVEntry::LE)
Direction = '<';
else if (Dir == Dependence::DVEntry::GT || Dir == Dependence::DVEntry::GE)
Direction = '>';
else if (Dir == Dependence::DVEntry::EQ)
Direction = '=';
else
Direction = '*';
Dep.push_back(Direction);
}
}
}
}
}
}
}
errs() << "------Hello World!--------\n";
//.........这里部分代码省略.........