本文整理汇总了C++中LoadInst::getPointerOperand方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::getPointerOperand方法的具体用法?C++ LoadInst::getPointerOperand怎么用?C++ LoadInst::getPointerOperand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::getPointerOperand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
void X86InterleavedAccessGroup::decompose(
Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
SmallVectorImpl<Instruction *> &DecomposedVectors) {
assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&
"Expected Load or Shuffle");
Type *VecTy = VecInst->getType();
(void)VecTy;
assert(VecTy->isVectorTy() &&
DL.getTypeSizeInBits(VecTy) >=
DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&
"Invalid Inst-size!!!");
if (auto *SVI = dyn_cast<ShuffleVectorInst>(VecInst)) {
Value *Op0 = SVI->getOperand(0);
Value *Op1 = SVI->getOperand(1);
// Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type.
for (unsigned i = 0; i < NumSubVectors; ++i)
DecomposedVectors.push_back(
cast<ShuffleVectorInst>(Builder.CreateShuffleVector(
Op0, Op1,
createSequentialMask(Builder, Indices[i],
SubVecTy->getVectorNumElements(), 0))));
return;
}
// Decompose the load instruction.
LoadInst *LI = cast<LoadInst>(VecInst);
Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
Value *VecBasePtr;
unsigned int NumLoads = NumSubVectors;
// In the case of stride 3 with a vector of 32 elements load the information
// in the following way:
// [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1]
if (DL.getTypeSizeInBits(VecTy) == 768) {
Type *VecTran =
VectorType::get(Type::getInt8Ty(LI->getContext()), 16)->getPointerTo();
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecTran);
NumLoads = NumSubVectors * 2;
} else
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
// Generate N loads of T type.
for (unsigned i = 0; i < NumLoads; i++) {
// TODO: Support inbounds GEP.
Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
DecomposedVectors.push_back(NewLoad);
}
}
示例2: translateLoad
bool IRTranslator::translateLoad(const LoadInst &LI) {
assert(LI.isSimple() && "only simple loads are supported at the moment");
MachineFunction &MF = MIRBuilder.getMF();
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
LLT VTy{*LI.getType()}, PTy{*LI.getPointerOperand()->getType()};
MIRBuilder.buildLoad(
VTy, PTy, Res, Addr,
*MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
MachineMemOperand::MOLoad,
VTy.getSizeInBits() / 8, getMemOpAlignment(LI)));
return true;
}
示例3: visitLoadInst
void InstrumentMemoryAccesses::visitLoadInst(LoadInst &LI) {
// Instrument a load instruction with a load check.
Value *AccessSize = ConstantInt::get(SizeTy,
TD->getTypeStoreSize(LI.getType()));
instrument(LI.getPointerOperand(), AccessSize, LoadCheckFunction, LI);
++LoadsInstrumented;
}
示例4: visitLoadInst
void Interpreter::visitLoadInst(LoadInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
GenericValue Result = LoadValueFromMemory(Ptr, I.getType());
SetValue(&I, Result, SF);
}
示例5: chooseInstructionsToInstrument
// Instrumenting some of the accesses may be proven redundant.
// Currently handled:
// - read-before-write (within same BB, no calls between)
//
// We do not handle some of the patterns that should not survive
// after the classic compiler optimizations.
// E.g. two reads from the same temp should be eliminated by CSE,
// two writes should be eliminated by DSE, etc.
//
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
SmallVectorImpl<Instruction*> &Local,
SmallVectorImpl<Instruction*> &All) {
SmallSet<Value*, 8> WriteTargets;
// Iterate from the end.
for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
E = Local.rend(); It != E; ++It) {
Instruction *I = *It;
if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
WriteTargets.insert(Store->getPointerOperand());
} else {
LoadInst *Load = cast<LoadInst>(I);
Value *Addr = Load->getPointerOperand();
if (WriteTargets.count(Addr)) {
// We will write to this temp, so no reason to analyze the read.
NumOmittedReadsBeforeWrite++;
continue;
}
if (addrPointsToConstantData(Addr)) {
// Addr points to some constant data -- it can not race with any writes.
continue;
}
}
All.push_back(I);
}
Local.clear();
}
示例6: visitLoadInst
void GraphBuilder::visitLoadInst(LoadInst &LI) {
//
// Create a DSNode for the pointer dereferenced by the load. If the DSNode
// is NULL, do nothing more (this can occur if the load is loading from a
// NULL pointer constant (bugpoint can generate such code).
//
DSNodeHandle Ptr = getValueDest(LI.getPointerOperand());
if (Ptr.isNull()) return; // Load from null
// Make that the node is read from...
Ptr.getNode()->setReadMarker();
// Ensure a typerecord exists...
Ptr.getNode()->growSizeForType(LI.getType(), Ptr.getOffset());
if (isa<PointerType>(LI.getType()))
setDestTo(LI, getLink(Ptr));
// check that it is the inserted value
if(TypeInferenceOptimize)
if(LI.hasOneUse())
if(StoreInst *SI = dyn_cast<StoreInst>(*(LI.use_begin())))
if(SI->getOperand(0) == &LI) {
++NumIgnoredInst;
return;
}
Ptr.getNode()->mergeTypeInfo(LI.getType(), Ptr.getOffset());
}
示例7: LiftPointer
void PropagateJuliaAddrspaces::visitLoadInst(LoadInst &LI) {
unsigned AS = LI.getPointerAddressSpace();
if (!isSpecialAS(AS))
return;
Value *Replacement = LiftPointer(LI.getPointerOperand(), LI.getType(), &LI);
if (!Replacement)
return;
LI.setOperand(LoadInst::getPointerOperandIndex(), Replacement);
}
示例8: visitLoadInst
void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
Value *Ptr = I.getPointerOperand();
if (!DA->isUniform(Ptr))
return;
if (Instruction *PtrI = dyn_cast<Instruction>(Ptr))
setUniformMetadata(PtrI);
}
示例9: getLoopViLoad
LoadInst* getLoopViLoad(Loop *L)
{
AllocaInst* viAlloc = getLoopVi(L);
//Instruction* firstHeaderInstr = L->getHeader()->begin();
Instruction* firstHeaderInstr = L->getHeader()->getFirstNonPHI();
//If such load exists, return it and don't create a new one.
LoadInst* firstHeaderInstrLoad = dyn_cast<LoadInst>(firstHeaderInstr);
if(firstHeaderInstrLoad && firstHeaderInstrLoad->getPointerOperand() == viAlloc)
return firstHeaderInstrLoad;
return new LoadInst(viAlloc, viAlloc->getName() + ".load", firstHeaderInstr);
}
示例10: DEBUG
void smtit::performTest1() {
for (Module::iterator FI = Mod->begin(), FE = Mod->end(); FI != FE; ++FI) {
Function *Func = &*FI;
// DEBUG(errs() << *Func << "\n");
for (Function::iterator BI = Func->begin(), BE = Func->end(); BI != BE;
++BI) {
BasicBlock *BB = &*BI;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
Instruction *BBI = &*I;
//if (true == isa<StoreInst>(BBI)) {
if (true == isa<LoadInst>(BBI)) {
LoadInst *li = dyn_cast<LoadInst>(BBI);
Value *ptrOp = li->getPointerOperand();
DEBUG(errs() << *li << "\t Result Name: " << li->getName() << "\t Pointer Name: " << ptrOp->getName() << "\n");
// DEBUG(errs() << "\tStore Instruction: " << *BBI << " \n");
// DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(SI->getType())
// << " \n");
// Instruction* V = cast<Instruction>(SI->getOperand(1));
// DEBUG(errs() << "\tOperand : " << *V << " \n");
// DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(V->getType())
// << " \n");
} else if(true == isa<GetElementPtrInst>(BBI)) {
GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(BBI);
DEBUG(errs() << *gep << "\t Result Name: " << gep->getName() << "\n");
// DEBUG(errs() << "\tInstruction: " << *BBI << " \n");
// DEBUG(errs() << "\t\tPointerType: " <<
// isLLVMPAPtrTy(BBI->getType()) << " \n");
}
// For def-use chains: All the uses of the definition
//DEBUG(errs() << *BBI << "\n");
/*
for (User *U : BBI->users()) {
if (Instruction *Inst = dyn_cast<Instruction>(U)) {
DEBUG(errs()<< " " << *Inst << "\n");
}
}
for (Value::user_iterator i = BBI->user_begin(), e = BBI->user_end();
i != e; ++i) {
if (Instruction *user_inst = dyn_cast<Instruction>(*i)) {
DEBUG(errs()<< " " << *user_inst << "\n");
}
}
*/
}
}
}
}
示例11: isLegalToShrinkwrapLifetimeMarkers
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
Instruction *Addr) const {
AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
Function *Func = (*Blocks.begin())->getParent();
for (BasicBlock &BB : *Func) {
if (Blocks.count(&BB))
continue;
for (Instruction &II : BB) {
if (isa<DbgInfoIntrinsic>(II))
continue;
unsigned Opcode = II.getOpcode();
Value *MemAddr = nullptr;
switch (Opcode) {
case Instruction::Store:
case Instruction::Load: {
if (Opcode == Instruction::Store) {
StoreInst *SI = cast<StoreInst>(&II);
MemAddr = SI->getPointerOperand();
} else {
LoadInst *LI = cast<LoadInst>(&II);
MemAddr = LI->getPointerOperand();
}
// Global variable can not be aliased with locals.
if (dyn_cast<Constant>(MemAddr))
break;
Value *Base = MemAddr->stripInBoundsConstantOffsets();
if (!dyn_cast<AllocaInst>(Base) || Base == AI)
return false;
break;
}
default: {
IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
if (IntrInst) {
if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
break;
return false;
}
// Treat all the other cases conservatively if it has side effects.
if (II.mayHaveSideEffects())
return false;
}
}
}
}
return true;
}
示例12: visitLoadInst
void GCInvariantVerifier::visitLoadInst(LoadInst &LI) {
Type *Ty = LI.getType();
if (Ty->isPointerTy()) {
unsigned AS = cast<PointerType>(Ty)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted &&
AS != AddressSpace::Derived,
"Illegal load of gc relevant value", &LI);
}
Ty = LI.getPointerOperand()->getType();
if (Ty->isPointerTy()) {
unsigned AS = cast<PointerType>(Ty)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted,
"Illegal store of callee rooted value", &LI);
}
}
示例13: visitLoadInst
void ArrayIndexChecker::visitLoadInst(LoadInst& I) {
DEBUG(dbgs() << "ArrayIndexChecker: visiting load " << I << "\n");
visitValue(*I.getPointerOperand());
if (I.getType()->isPointerTy()) {
auto pos = std::find(ptr_value_vec_.begin(), ptr_value_vec_.end(), &I);
assert(pos != ptr_value_vec_.end());
index_t varIdx = pos - ptr_value_vec_.begin();
assert(idx2addr_.find(varIdx) != idx2addr_.end());
if (addr2version_[idx2addr_[varIdx]] != 0)
throw ArrayIndexIsNotConstant;
}
DEBUG(dbgs() << "ArrayIndexChecker: visited load\n");
}
示例14: countStridedLoads
// For Falkor, we want to avoid having too many strided loads in a loop since
// that can exhaust the HW prefetcher resources. We adjust the unroller
// MaxCount preference below to attempt to ensure unrolling doesn't create too
// many strided loads.
static void
getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TargetTransformInfo::UnrollingPreferences &UP) {
enum { MaxStridedLoads = 7 };
auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
int StridedLoads = 0;
// FIXME? We could make this more precise by looking at the CFG and
// e.g. not counting loads in each side of an if-then-else diamond.
for (const auto BB : L->blocks()) {
for (auto &I : *BB) {
LoadInst *LMemI = dyn_cast<LoadInst>(&I);
if (!LMemI)
continue;
Value *PtrValue = LMemI->getPointerOperand();
if (L->isLoopInvariant(PtrValue))
continue;
const SCEV *LSCEV = SE.getSCEV(PtrValue);
const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
continue;
// FIXME? We could take pairing of unrolled load copies into account
// by looking at the AddRec, but we would probably have to limit this
// to loops with no stores or other memory optimization barriers.
++StridedLoads;
// We've seen enough strided loads that seeing more won't make a
// difference.
if (StridedLoads > MaxStridedLoads / 2)
return StridedLoads;
}
}
return StridedLoads;
};
int StridedLoads = countStridedLoads(L, SE);
LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
<< " strided loads\n");
// Pick the largest power of 2 unroll count that won't result in too many
// strided loads.
if (StridedLoads) {
UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
<< UP.MaxCount << '\n');
}
}
示例15: visitLoadInst
bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
if (!WidenLoads)
return false;
if ((I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
canWidenScalarExtLoad(I)) {
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = Builder.getInt32Ty();
Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
LoadInst *WidenLoad = Builder.CreateLoad(BitCast);
WidenLoad->copyMetadata(I);
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
ConstantInt *Lower =
mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->getValue().isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
// Don't make assumptions about the high bits.
ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(Mod->getContext(), LowAndHigh));
}
}
int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
Type *IntNTy = Builder.getIntNTy(TySize);
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
I.replaceAllUsesWith(ValOrig);
I.eraseFromParent();
return true;
}
return false;
}