本文整理汇总了C++中StoreInst::getPointerOperand方法的典型用法代码示例。如果您正苦于以下问题:C++ StoreInst::getPointerOperand方法的具体用法?C++ StoreInst::getPointerOperand怎么用?C++ StoreInst::getPointerOperand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类StoreInst
的用法示例。
在下文中一共展示了StoreInst::getPointerOperand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: translateStore
bool IRTranslator::translateStore(const StoreInst &SI) {
assert(SI.isSimple() && "only simple loads are supported at the moment");
MachineFunction &MF = MIRBuilder.getMF();
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType()},
PTy{*SI.getPointerOperand()->getType()};
MIRBuilder.buildStore(
VTy, PTy, Val, Addr,
*MF.getMachineMemOperand(MachinePointerInfo(SI.getPointerOperand()),
MachineMemOperand::MOStore,
VTy.getSizeInBits() / 8, getMemOpAlignment(SI)));
return true;
}
示例2: visitStoreInst
bool Scalarizer::visitStoreInst(StoreInst &SI) {
if (!ScalarizeLoadStore)
return false;
if (!SI.isSimple())
return false;
VectorLayout Layout;
Value *FullValue = SI.getValueOperand();
if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
IRBuilder<> Builder(SI.getParent(), &SI);
Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
Scatterer Val = scatter(&SI, FullValue);
ValueVector Stores;
Stores.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I) {
unsigned Align = Layout.getElemAlign(I);
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
}
transferMetadata(&SI, Stores);
return true;
}
示例3: visitStoreInst
void InstrumentMemoryAccesses::visitStoreInst(StoreInst &SI) {
// Instrument a store instruction with a store check.
uint64_t Bytes = TD->getTypeStoreSize(SI.getValueOperand()->getType());
Value *AccessSize = ConstantInt::get(SizeTy, Bytes);
instrument(SI.getPointerOperand(), AccessSize, StoreCheckFunction, SI);
++StoresInstrumented;
}
示例4: visitStoreInst
void Interpreter::visitStoreInst(StoreInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Val = getOperandValue(I.getOperand(0), SF);
GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
I.getOperand(0)->getType());
}
示例5: getFnThatStoreOnArgs
/*
* Build information about functions that store on pointer arguments
* For simplification, we only consider a function to store on an argument
* if it has exactly one StoreInst to that argument and the arg has no other use.
*/
int DeadStoreEliminationPass::getFnThatStoreOnArgs(Module &M) {
int numStores = 0;
DEBUG(errs() << "Getting functions that store on arguments...\n");
for (Module::iterator F = M.begin(); F != M.end(); ++F) {
if (F->arg_empty() || F->isDeclaration()) continue;
// Get args
std::set<Value*> args;
for (Function::arg_iterator formalArgIter = F->arg_begin();
formalArgIter != F->arg_end(); ++formalArgIter) {
Value *formalArg = formalArgIter;
if (formalArg->getType()->isPointerTy()) {
args.insert(formalArg);
}
}
// Find stores on arguments
for (Function::iterator BB = F->begin(); BB != F->end(); ++BB) {
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
Instruction *inst = I;
if (!isa<StoreInst>(inst)) continue;
StoreInst *SI = dyn_cast<StoreInst>(inst);
Value *ptrOp = SI->getPointerOperand();
if (args.count(ptrOp) && ptrOp->hasNUses(1)) {
fnThatStoreOnArgs[F].insert(ptrOp);
numStores++;
DEBUG(errs() << " " << F->getName() << " stores on argument "
<< ptrOp->getName() << "\n"); }
}
}
}
DEBUG(errs() << "\n");
return numStores;
}
示例6: unpackStoreToAggregate
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
Type *T = V->getType();
if (!T->isAggregateType())
return false;
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
unsigned Count = ST->getNumElements();
if (Count == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
// We don't want to break loads with padding here as we'd loose
// the knowledge that padding exists for the rest of the pipeline.
const DataLayout &DL = IC.getDataLayout();
auto *SL = DL.getStructLayout(ST);
if (SL->hasPadding())
return false;
SmallString<16> EltName = V->getName();
EltName += ".elt";
auto *Addr = SI.getPointerOperand();
SmallString<16> AddrName = Addr->getName();
AddrName += ".repack";
auto *IdxType = Type::getInt32Ty(ST->getContext());
auto *Zero = ConstantInt::get(IdxType, 0);
for (unsigned i = 0; i < Count; i++) {
Value *Indices[2] = {
Zero,
ConstantInt::get(IdxType, i),
};
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
IC.Builder->CreateStore(Val, Ptr);
}
return true;
}
if (auto *AT = dyn_cast<ArrayType>(T)) {
// If the array only have one element, we unpack.
if (AT->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
return false;
}
示例7: LiftPointer
void PropagateJuliaAddrspaces::visitStoreInst(StoreInst &SI) {
unsigned AS = SI.getPointerAddressSpace();
if (!isSpecialAS(AS))
return;
Value *Replacement = LiftPointer(SI.getPointerOperand(), SI.getValueOperand()->getType(), &SI);
if (!Replacement)
return;
SI.setOperand(StoreInst::getPointerOperandIndex(), Replacement);
}
示例8: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction sas otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *Ptr = SI.getPointerOperand();
Value *V = SI.getValueOperand();
unsigned AS = SI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a store instruction changing *only its
// type*. The only metadata it makes sense to drop is metadata which is
// invalidated when the pointer type changes. This should essentially
// never be the case in LLVM, but we explicitly switch over only known
// metadata to be conservatively correct. If you are adding metadata to
// LLVM which pertains to stores, you almost certainly want to add it
// here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_range:
break;
}
}
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例9: handleStoreInstruction
// -- handle store instruction --
void UnsafeTypeCastingCheck::handleStoreInstruction (Instruction *inst) {
StoreInst *sinst = dyn_cast<StoreInst>(inst);
if (sinst == NULL)
utccAbort("handleStoreInstruction cannot process with a non-store instruction");
Value *pt = sinst->getPointerOperand();
Value *vl = sinst->getValueOperand();
UTCC_TYPE ptt = queryPointedType(pt);
UTCC_TYPE vlt = queryExprType(vl);
setPointedType(pt, vlt);
}
示例10: isLegalToShrinkwrapLifetimeMarkers
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
Instruction *Addr) const {
AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
Function *Func = (*Blocks.begin())->getParent();
for (BasicBlock &BB : *Func) {
if (Blocks.count(&BB))
continue;
for (Instruction &II : BB) {
if (isa<DbgInfoIntrinsic>(II))
continue;
unsigned Opcode = II.getOpcode();
Value *MemAddr = nullptr;
switch (Opcode) {
case Instruction::Store:
case Instruction::Load: {
if (Opcode == Instruction::Store) {
StoreInst *SI = cast<StoreInst>(&II);
MemAddr = SI->getPointerOperand();
} else {
LoadInst *LI = cast<LoadInst>(&II);
MemAddr = LI->getPointerOperand();
}
// Global variable can not be aliased with locals.
if (dyn_cast<Constant>(MemAddr))
break;
Value *Base = MemAddr->stripInBoundsConstantOffsets();
if (!dyn_cast<AllocaInst>(Base) || Base == AI)
return false;
break;
}
default: {
IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
if (IntrInst) {
if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
break;
return false;
}
// Treat all the other cases conservatively if it has side effects.
if (II.mayHaveSideEffects())
return false;
}
}
}
}
return true;
}
示例11: canVectorizeInst
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
case Instruction::Load:
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value.
StoreInst *SI = cast<StoreInst>(Inst);
return SI->getPointerOperand() == User;
}
default:
return false;
}
}
示例12: instructionSafeForVersioning
/// \brief Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
/// 2) Check function call safety, by ensuring its not accessing memory.
/// 3) Loop body shouldn't have any may throw instruction.
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
DEBUG(dbgs() << " Unsafe call site found.\n");
return false;
}
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
DEBUG(dbgs() << " May throw instruction found in loop body\n");
return false;
}
// If current instruction is load instructions
// make sure it's a simple load (non atomic & non volatile)
if (I->mayReadFromMemory()) {
LoadInst *Ld = dyn_cast<LoadInst>(I);
if (!Ld || !Ld->isSimple()) {
DEBUG(dbgs() << " Found a non-simple load.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(Ld);
Value *Ptr = Ld->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
}
// If current instruction is store instruction
// make sure it's a simple store (non atomic & non volatile)
else if (I->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St || !St->isSimple()) {
DEBUG(dbgs() << " Found a non-simple store.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(St);
Value *Ptr = St->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
IsReadOnlyLoop = false;
}
return true;
}
示例13: visitStoreInst
void TracingNoGiri::visitStoreInst(StoreInst &SI) {
instrumentLock(&SI);
// Cast the pointer into a void pointer type.
Value * Pointer = SI.getPointerOperand();
Pointer = castTo(Pointer, VoidPtrType, Pointer->getName(), &SI);
// Get the size of the stored data.
uint64_t size = TD->getTypeStoreSize(SI.getOperand(0)->getType());
Value *StoreSize = ConstantInt::get(Int64Type, size);
// Get the ID of the store instruction.
Value *StoreID = ConstantInt::get(Int32Type, lsNumPass->getID(&SI));
// Create the call to the run-time to record the store instruction.
std::vector<Value *> args=make_vector<Value *>(StoreID, Pointer, StoreSize, 0);
CallInst::Create(RecordStore, args, "", &SI);
instrumentUnlock(&SI);
++NumStores; // Update statistics
}
示例14: visitStoreInst
void GCInvariantVerifier::visitStoreInst(StoreInst &SI) {
Type *VTy = SI.getValueOperand()->getType();
if (VTy->isPointerTy()) {
/* We currently don't obey this for arguments. That's ok - they're
externally rooted. */
if (!isa<Argument>(SI.getValueOperand())) {
unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted &&
AS != AddressSpace::Derived,
"Illegal store of decayed value", &SI);
}
}
VTy = SI.getPointerOperand()->getType();
if (VTy->isPointerTy()) {
unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted,
"Illegal store to callee rooted value", &SI);
}
}
示例15: canVectorizeInst
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(Inst);
// Currently only handle the case where the Pointer Operand is a GEP so check for that case.
return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile();
}
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value, plus
// since it should be canonical form, the User should be a GEP.
StoreInst *SI = cast<StoreInst>(Inst);
return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile();
}
default:
return false;
}
}