本文整理汇总了C++中StoreInst类的典型用法代码示例。如果您正苦于以下问题:C++ StoreInst类的具体用法?C++ StoreInst怎么用?C++ StoreInst使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了StoreInst类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: IRAccess
IRAccess
TempScopInfo::buildIRAccess(Instruction *Inst, Loop *L, Region *R,
const ScopDetection::BoxedLoopsSetTy *BoxedLoops) {
unsigned Size;
Type *SizeType;
enum IRAccess::TypeKind Type;
if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
SizeType = Load->getType();
Size = TD->getTypeStoreSize(SizeType);
Type = IRAccess::READ;
} else {
StoreInst *Store = cast<StoreInst>(Inst);
SizeType = Store->getValueOperand()->getType();
Size = TD->getTypeStoreSize(SizeType);
Type = IRAccess::MUST_WRITE;
}
const SCEV *AccessFunction = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
assert(BasePointer && "Could not find base pointer");
AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
auto AccItr = InsnToMemAcc.find(Inst);
if (PollyDelinearize && AccItr != InsnToMemAcc.end())
return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, true,
AccItr->second.DelinearizedSubscripts,
AccItr->second.Shape->DelinearizedSizes);
// Check if the access depends on a loop contained in a non-affine subregion.
bool isVariantInNonAffineLoop = false;
if (BoxedLoops) {
SetVector<const Loop *> Loops;
findLoops(AccessFunction, Loops);
for (const Loop *L : Loops)
if (BoxedLoops->count(L))
isVariantInNonAffineLoop = true;
}
bool IsAffine = !isVariantInNonAffineLoop &&
isAffineExpr(R, AccessFunction, *SE, BasePointer->getValue());
SmallVector<const SCEV *, 4> Subscripts, Sizes;
Subscripts.push_back(AccessFunction);
Sizes.push_back(SE->getConstant(ZeroOffset->getType(), Size));
if (!IsAffine && Type == IRAccess::MUST_WRITE)
Type = IRAccess::MAY_WRITE;
return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, IsAffine,
Subscripts, Sizes);
}
示例2: LLVMBuildAtomicStore
extern "C" LLVMValueRef LLVMBuildAtomicStore(LLVMBuilderRef B,
LLVMValueRef val,
LLVMValueRef target,
AtomicOrdering order,
unsigned alignment) {
StoreInst* si = new StoreInst(unwrap(val),unwrap(target));
si->setVolatile(true);
si->setAtomic(order);
si->setAlignment(alignment);
return wrap(unwrap(B)->Insert(si));
}
示例3: mono_llvm_build_aligned_store
LLVMValueRef
mono_llvm_build_aligned_store (LLVMBuilderRef builder, LLVMValueRef Val, LLVMValueRef PointerVal,
gboolean is_volatile, int alignment)
{
StoreInst *ins;
ins = unwrap(builder)->CreateStore(unwrap(Val), unwrap(PointerVal), is_volatile);
ins->setAlignment (alignment);
return wrap (ins);
}
示例4: genSTDStore
void MutationGen::genSTDStore(Instruction * inst, StringRef fname, int index){
StoreInst *st = cast<StoreInst>(inst);
Type* t = st->getValueOperand()->getType();
if(isSupportedType(t)){
std::stringstream ss;
ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
<< ":"<<0<<'\n';
ofresult<<ss.str();
ofresult.flush();
muts_num++;
}
}
示例5: checkArgumentUses
bool AMDGPURewriteOutArguments::checkArgumentUses(Value &Arg) const {
const int MaxUses = 10;
int UseCount = 0;
for (Use &U : Arg.uses()) {
StoreInst *SI = dyn_cast<StoreInst>(U.getUser());
if (UseCount > MaxUses)
return false;
if (!SI) {
auto *BCI = dyn_cast<BitCastInst>(U.getUser());
if (!BCI || !BCI->hasOneUse())
return false;
// We don't handle multiple stores currently, so stores to aggregate
// pointers aren't worth the trouble since they are canonically split up.
Type *DestEltTy = BCI->getType()->getPointerElementType();
if (DestEltTy->isAggregateType())
return false;
// We could handle these if we had a convenient way to bitcast between
// them.
Type *SrcEltTy = Arg.getType()->getPointerElementType();
if (SrcEltTy->isArrayTy())
return false;
// Special case handle structs with single members. It is useful to handle
// some casts between structs and non-structs, but we can't bitcast
// directly between them. directly bitcast between them. Blender uses
// some casts that look like { <3 x float> }* to <4 x float>*
if ((SrcEltTy->isStructTy() && (SrcEltTy->getStructNumElements() != 1)))
return false;
// Clang emits OpenCL 3-vector type accesses with a bitcast to the
// equivalent 4-element vector and accesses that, and we're looking for
// this pointer cast.
if (DL->getTypeAllocSize(SrcEltTy) != DL->getTypeAllocSize(DestEltTy))
return false;
return checkArgumentUses(*BCI);
}
if (!SI->isSimple() ||
U.getOperandNo() != StoreInst::getPointerOperandIndex())
return false;
++UseCount;
}
// Skip unused arguments.
return UseCount > 0;
}
示例6: eraseInstFromFunction
SILInstruction*
SILCombiner::visitAllocExistentialBoxInst(AllocExistentialBoxInst *AEBI) {
// Optimize away the pattern below that happens when exceptions are created
// and in some cases, due to inlining, are not needed.
//
// %6 = alloc_existential_box $ErrorType, $ColorError
// %7 = enum $VendingMachineError, #ColorError.Red
// store %7 to %6#1 : $*ColorError
// debug_value %6#0 : $ErrorType
// strong_release %6#0 : $ErrorType
StoreInst *SingleStore = nullptr;
StrongReleaseInst *SingleRelease = nullptr;
// For each user U of the alloc_existential_box...
for (auto U : getNonDebugUses(*AEBI)) {
// Record stores into the box.
if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
// If this is not the only store into the box then bail out.
if (SingleStore) return nullptr;
SingleStore = SI;
continue;
}
// Record releases of the box.
if (auto *RI = dyn_cast<StrongReleaseInst>(U->getUser())) {
// If this is not the only release of the box then bail out.
if (SingleRelease) return nullptr;
SingleRelease = RI;
continue;
}
// If there are other users to the box then bail out.
return nullptr;
}
if (SingleStore && SingleRelease) {
// Release the value that was stored into the existential box. The box
// is going away so we need to release the stored value now.
Builder.setInsertionPoint(SingleStore);
Builder.createReleaseValue(AEBI->getLoc(), SingleStore->getSrc());
// Erase the instruction that stores into the box and the release that
// releases the box, and finally, release the box.
eraseInstFromFunction(*SingleRelease);
eraseInstFromFunction(*SingleStore);
return eraseInstFromFunction(*AEBI);
}
return nullptr;
}
示例7: LANARTS_ASSERT
void PlayerInst::purchase_from_store(GameState* gs, const GameAction& action) {
StoreInst* store = (StoreInst*)gs->get_instance(action.use_id);
if (!store) {
return;
}
LANARTS_ASSERT(dynamic_cast<StoreInst*>(gs->get_instance(action.use_id)));
StoreInventory& inv = store->inventory();
StoreItemSlot& slot = inv.get(action.use_id2);
if (gold() >= slot.cost) {
inventory().add(slot.item);
gold() -= slot.cost;
slot.item.clear();
}
}
示例8: switch
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
Instruction *Addr) const {
AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
Function *Func = (*Blocks.begin())->getParent();
for (BasicBlock &BB : *Func) {
if (Blocks.count(&BB))
continue;
for (Instruction &II : BB) {
if (isa<DbgInfoIntrinsic>(II))
continue;
unsigned Opcode = II.getOpcode();
Value *MemAddr = nullptr;
switch (Opcode) {
case Instruction::Store:
case Instruction::Load: {
if (Opcode == Instruction::Store) {
StoreInst *SI = cast<StoreInst>(&II);
MemAddr = SI->getPointerOperand();
} else {
LoadInst *LI = cast<LoadInst>(&II);
MemAddr = LI->getPointerOperand();
}
// Global variable can not be aliased with locals.
if (dyn_cast<Constant>(MemAddr))
break;
Value *Base = MemAddr->stripInBoundsConstantOffsets();
if (!dyn_cast<AllocaInst>(Base) || Base == AI)
return false;
break;
}
default: {
IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
if (IntrInst) {
if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
break;
return false;
}
// Treat all the other cases conservatively if it has side effects.
if (II.mayHaveSideEffects())
return false;
}
}
}
}
return true;
}
示例9: visitStore
bool CallAnalyzer::visitStore(StoreInst &I) {
Value *SROAArg;
DenseMap<Value *, int>::iterator CostIt;
if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
if (I.isSimple()) {
accumulateSROACost(CostIt, InlineConstants::InstrCost);
return true;
}
disableSROA(CostIt);
}
return false;
}
示例10: canVectorizeInst
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
case Instruction::Load:
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value.
StoreInst *SI = cast<StoreInst>(Inst);
return SI->getPointerOperand() == User;
}
default:
return false;
}
}
示例11: LLVM_General_BuildStore
LLVMValueRef LLVM_General_BuildStore(
LLVMBuilderRef b,
LLVMValueRef v,
LLVMValueRef p,
unsigned align,
LLVMBool isVolatile,
LLVMAtomicOrdering atomicOrdering,
LLVMSynchronizationScope synchScope,
const char *name
) {
StoreInst *i = unwrap(b)->CreateAlignedStore(unwrap(v), unwrap(p), align, isVolatile);
i->setName(name);
i->setOrdering(unwrap(atomicOrdering));
if (atomicOrdering != LLVMAtomicOrderingNotAtomic) i->setSynchScope(unwrap(synchScope));
return wrap(i);
}
示例12: switch
/// \brief Combine a store to a new type.
///
/// Returns the newly created store instruction.
static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
Value *Ptr = SI.getPointerOperand();
unsigned AS = SI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a store instruction changing *only its
// type*. The only metadata it makes sense to drop is metadata which is
// invalidated when the pointer type changes. This should essentially
// never be the case in LLVM, but we explicitly switch over only known
// metadata to be conservatively correct. If you are adding metadata to
// LLVM which pertains to stores, you almost certainly want to add it
// here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_nonnull:
case LLVMContext::MD_range:
case LLVMContext::MD_align:
case LLVMContext::MD_dereferenceable:
case LLVMContext::MD_dereferenceable_or_null:
// These don't apply for stores.
break;
}
}
return NewStore;
}
示例13: assert
/// \brief Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
/// 2) Check function call safety, by ensuring its not accessing memory.
/// 3) Loop body shouldn't have any may throw instruction.
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
DEBUG(dbgs() << " Unsafe call site found.\n");
return false;
}
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
DEBUG(dbgs() << " May throw instruction found in loop body\n");
return false;
}
// If current instruction is load instructions
// make sure it's a simple load (non atomic & non volatile)
if (I->mayReadFromMemory()) {
LoadInst *Ld = dyn_cast<LoadInst>(I);
if (!Ld || !Ld->isSimple()) {
DEBUG(dbgs() << " Found a non-simple load.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(Ld);
Value *Ptr = Ld->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
}
// If current instruction is store instruction
// make sure it's a simple store (non atomic & non volatile)
else if (I->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St || !St->isSimple()) {
DEBUG(dbgs() << " Found a non-simple store.\n");
return false;
}
LoadAndStoreCounter++;
collectStridedAccess(St);
Value *Ptr = St->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
InvariantCounter++;
IsReadOnlyLoop = false;
}
return true;
}
示例14: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction sas otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *Ptr = SI.getPointerOperand();
Value *V = SI.getValueOperand();
unsigned AS = SI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a store instruction changing *only its
// type*. The only metadata it makes sense to drop is metadata which is
// invalidated when the pointer type changes. This should essentially
// never be the case in LLVM, but we explicitly switch over only known
// metadata to be conservatively correct. If you are adding metadata to
// LLVM which pertains to stores, you almost certainly want to add it
// here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_range:
break;
}
}
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例15: visitStoreInst
bool SFVInstVisitor::visitStoreInst(StoreInst &SI) {
if ((Callbacks & Visit::Stores) == 0) return false;
if (LatticeValue *LV = getLatticeValueForField(SI.getOperand(1)))
if (LV->visitStore(SI))
return RemoveLatticeValueAtBottom(LV);
return false;
}