本文整理汇总了C++中StoreInst::getValueOperand方法的典型用法代码示例。如果您正苦于以下问题:C++ StoreInst::getValueOperand方法的具体用法?C++ StoreInst::getValueOperand怎么用?C++ StoreInst::getValueOperand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类StoreInst
的用法示例。
在下文中一共展示了StoreInst::getValueOperand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unpackStoreToAggregate
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
Type *T = V->getType();
if (!T->isAggregateType())
return false;
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
if (ST->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
if (auto *AT = dyn_cast<ArrayType>(T)) {
// If the array only have one element, we unpack.
if (AT->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
return false;
}
示例2: visitStoreInst
void InstrumentMemoryAccesses::visitStoreInst(StoreInst &SI) {
// Instrument a store instruction with a store check.
uint64_t Bytes = TD->getTypeStoreSize(SI.getValueOperand()->getType());
Value *AccessSize = ConstantInt::get(SizeTy, Bytes);
instrument(SI.getPointerOperand(), AccessSize, StoreCheckFunction, SI);
++StoresInstrumented;
}
示例3: buildAccessFunctions
void TempScopInfo::buildAccessFunctions(Region &R, ParamSetType &Params,
BasicBlock &BB) {
AccFuncSetType Functions;
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) {
Instruction &Inst = *I;
if (isa<LoadInst>(&Inst) || isa<StoreInst>(&Inst)) {
// Create the SCEVAffFunc.
if (LoadInst *ld = dyn_cast<LoadInst>(&Inst)) {
unsigned size = TD->getTypeStoreSize(ld->getType());
Functions.push_back(
std::make_pair(SCEVAffFunc(SCEVAffFunc::ReadMem, size), &Inst));
} else {//Else it must be a StoreInst.
StoreInst *st = cast<StoreInst>(&Inst);
unsigned size = TD->getTypeStoreSize(st->getValueOperand()->getType());
Functions.push_back(
std::make_pair(SCEVAffFunc(SCEVAffFunc::WriteMem, size), &Inst));
}
Value *Ptr = getPointerOperand(Inst);
buildAffineFunction(SE->getSCEV(Ptr), Functions.back().first, R, Params);
}
}
if (Functions.empty())
return;
AccFuncSetType &Accs = AccFuncMap[&BB];
Accs.insert(Accs.end(), Functions.begin(), Functions.end());
}
示例4: translateStore
bool IRTranslator::translateStore(const StoreInst &SI) {
assert(SI.isSimple() && "only simple loads are supported at the moment");
MachineFunction &MF = MIRBuilder.getMF();
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType()},
PTy{*SI.getPointerOperand()->getType()};
MIRBuilder.buildStore(
VTy, PTy, Val, Addr,
*MF.getMachineMemOperand(MachinePointerInfo(SI.getPointerOperand()),
MachineMemOperand::MOStore,
VTy.getSizeInBits() / 8, getMemOpAlignment(SI)));
return true;
}
示例5: visitStoreInst
bool Scalarizer::visitStoreInst(StoreInst &SI) {
if (!ScalarizeLoadStore)
return false;
if (!SI.isSimple())
return false;
VectorLayout Layout;
Value *FullValue = SI.getValueOperand();
if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
IRBuilder<> Builder(SI.getParent(), &SI);
Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
Scatterer Val = scatter(&SI, FullValue);
ValueVector Stores;
Stores.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I) {
unsigned Align = Layout.getElemAlign(I);
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
}
transferMetadata(&SI, Stores);
return true;
}
示例6: unpackStoreToAggregate
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
Type *T = V->getType();
if (!T->isAggregateType())
return false;
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
unsigned Count = ST->getNumElements();
if (Count == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
// We don't want to break loads with padding here as we'd loose
// the knowledge that padding exists for the rest of the pipeline.
const DataLayout &DL = IC.getDataLayout();
auto *SL = DL.getStructLayout(ST);
if (SL->hasPadding())
return false;
SmallString<16> EltName = V->getName();
EltName += ".elt";
auto *Addr = SI.getPointerOperand();
SmallString<16> AddrName = Addr->getName();
AddrName += ".repack";
auto *IdxType = Type::getInt32Ty(ST->getContext());
auto *Zero = ConstantInt::get(IdxType, 0);
for (unsigned i = 0; i < Count; i++) {
Value *Indices[2] = {
Zero,
ConstantInt::get(IdxType, i),
};
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
IC.Builder->CreateStore(Val, Ptr);
}
return true;
}
if (auto *AT = dyn_cast<ArrayType>(T)) {
// If the array only have one element, we unpack.
if (AT->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V);
return true;
}
}
return false;
}
示例7: visitStoreInst
void PropagateJuliaAddrspaces::visitStoreInst(StoreInst &SI) {
unsigned AS = SI.getPointerAddressSpace();
if (!isSpecialAS(AS))
return;
Value *Replacement = LiftPointer(SI.getPointerOperand(), SI.getValueOperand()->getType(), &SI);
if (!Replacement)
return;
SI.setOperand(StoreInst::getPointerOperandIndex(), Replacement);
}
示例8: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction sas otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *Ptr = SI.getPointerOperand();
Value *V = SI.getValueOperand();
unsigned AS = SI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD);
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a store instruction changing *only its
// type*. The only metadata it makes sense to drop is metadata which is
// invalidated when the pointer type changes. This should essentially
// never be the case in LLVM, but we explicitly switch over only known
// metadata to be conservatively correct. If you are adding metadata to
// LLVM which pertains to stores, you almost certainly want to add it
// here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_range:
break;
}
}
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例9: visitStoreInst
void GCInvariantVerifier::visitStoreInst(StoreInst &SI) {
Type *VTy = SI.getValueOperand()->getType();
if (VTy->isPointerTy()) {
/* We currently don't obey this for arguments. That's ok - they're
externally rooted. */
if (!isa<Argument>(SI.getValueOperand())) {
unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted &&
AS != AddressSpace::Derived,
"Illegal store of decayed value", &SI);
}
}
VTy = SI.getPointerOperand()->getType();
if (VTy->isPointerTy()) {
unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
Check(AS != AddressSpace::CalleeRooted,
"Illegal store to callee rooted value", &SI);
}
}
示例10: IRAccess
IRAccess
TempScopInfo::buildIRAccess(Instruction *Inst, Loop *L, Region *R,
const ScopDetection::BoxedLoopsSetTy *BoxedLoops) {
unsigned Size;
Type *SizeType;
enum IRAccess::TypeKind Type;
if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
SizeType = Load->getType();
Size = TD->getTypeStoreSize(SizeType);
Type = IRAccess::READ;
} else {
StoreInst *Store = cast<StoreInst>(Inst);
SizeType = Store->getValueOperand()->getType();
Size = TD->getTypeStoreSize(SizeType);
Type = IRAccess::MUST_WRITE;
}
const SCEV *AccessFunction = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
assert(BasePointer && "Could not find base pointer");
AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
auto AccItr = InsnToMemAcc.find(Inst);
if (PollyDelinearize && AccItr != InsnToMemAcc.end())
return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, true,
AccItr->second.DelinearizedSubscripts,
AccItr->second.Shape->DelinearizedSizes);
// Check if the access depends on a loop contained in a non-affine subregion.
bool isVariantInNonAffineLoop = false;
if (BoxedLoops) {
SetVector<const Loop *> Loops;
findLoops(AccessFunction, Loops);
for (const Loop *L : Loops)
if (BoxedLoops->count(L))
isVariantInNonAffineLoop = true;
}
bool IsAffine = !isVariantInNonAffineLoop &&
isAffineExpr(R, AccessFunction, *SE, BasePointer->getValue());
SmallVector<const SCEV *, 4> Subscripts, Sizes;
Subscripts.push_back(AccessFunction);
Sizes.push_back(SE->getConstant(ZeroOffset->getType(), Size));
if (!IsAffine && Type == IRAccess::MUST_WRITE)
Type = IRAccess::MAY_WRITE;
return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, IsAffine,
Subscripts, Sizes);
}
示例11: handleStoreInstruction
// -- handle store instruction --
void UnsafeTypeCastingCheck::handleStoreInstruction (Instruction *inst) {
StoreInst *sinst = dyn_cast<StoreInst>(inst);
if (sinst == NULL)
utccAbort("handleStoreInstruction cannot process with a non-store instruction");
Value *pt = sinst->getPointerOperand();
Value *vl = sinst->getValueOperand();
UTCC_TYPE ptt = queryPointedType(pt);
UTCC_TYPE vlt = queryExprType(vl);
setPointedType(pt, vlt);
}
示例12: genSTDStore
void MutationGen::genSTDStore(Instruction * inst, StringRef fname, int index){
StoreInst *st = cast<StoreInst>(inst);
Type* t = st->getValueOperand()->getType();
if(isSupportedType(t)){
std::stringstream ss;
ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
<< ":"<<0<<'\n';
ofresult<<ss.str();
ofresult.flush();
muts_num++;
}
}
示例13: combineStoreToValueType
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// stores here but it isn't clear that this is important.
if (!SI.isSimple())
return false;
Value *V = SI.getValueOperand();
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
V = BC->getOperand(0);
combineStoreToNewValue(IC, SI, V);
return true;
}
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
示例14: buildAccessFunctions
void TempScopInfo::buildAccessFunctions(Region &R, BasicBlock &BB) {
AccFuncSetType Functions;
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) {
Instruction &Inst = *I;
if (isa<LoadInst>(&Inst) || isa<StoreInst>(&Inst)) {
unsigned Size;
enum IRAccess::TypeKind Type;
if (LoadInst *Load = dyn_cast<LoadInst>(&Inst)) {
Size = TD->getTypeStoreSize(Load->getType());
Type = IRAccess::READ;
} else {
StoreInst *Store = cast<StoreInst>(&Inst);
Size = TD->getTypeStoreSize(Store->getValueOperand()->getType());
Type = IRAccess::WRITE;
}
const SCEV *AccessFunction = SE->getSCEV(getPointerOperand(Inst));
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
assert(BasePointer && "Could not find base pointer");
AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
bool IsAffine =
isAffineExpr(&R, AccessFunction, *SE, BasePointer->getValue());
Functions.push_back(
std::make_pair(IRAccess(Type, BasePointer->getValue(), AccessFunction,
Size, IsAffine), &Inst));
}
}
if (Functions.empty())
return;
AccFuncSetType &Accs = AccFuncMap[&BB];
Accs.insert(Accs.end(), Functions.begin(), Functions.end());
}
示例15: visitStoreInst
/// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
/// becomes:
/// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
void AtomicVisitor::visitStoreInst(StoreInst &I) {
return; // XXX EMSCRIPTEN
if (I.isSimple())
return;
PointerHelper<StoreInst> PH(*this, I);
const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
findAtomicIntrinsic(I, Intrinsic::nacl_atomic_store, PH.PET);
checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
Value *V = I.getValueOperand();
if (!V->getType()->isIntegerTy()) {
// The store isn't of an integer type. We define atomics in terms of
// integers, so bitcast the value to store to an integer of the
// proper width.
CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize),
V->getName() + ".cast");
Cast->setDebugLoc(I.getDebugLoc());
V = Cast;
}
checkSizeMatchesType(I, PH.BitSize, V->getType());
Value *Args[] = {V, PH.P, freezeMemoryOrder(I, I.getOrdering())};
replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
Args);
}