本文整理汇总了C++中LoadInst::getAlignment方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::getAlignment方法的具体用法?C++ LoadInst::getAlignment怎么用?C++ LoadInst::getAlignment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::getAlignment方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: canWidenScalarExtLoad
bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
Type *Ty = I.getType();
const DataLayout &DL = Mod->getDataLayout();
int TySize = DL.getTypeSizeInBits(Ty);
unsigned Align = I.getAlignment() ?
I.getAlignment() : DL.getABITypeAlignment(Ty);
return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I);
}
示例2: visitLoadInst
/// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
/// becomes:
/// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
void AtomicVisitor::visitLoadInst(LoadInst &I) {
return; // XXX EMSCRIPTEN
if (I.isSimple())
return;
PointerHelper<LoadInst> PH(*this, I);
const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
findAtomicIntrinsic(I, Intrinsic::nacl_atomic_load, PH.PET);
checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
Value *Args[] = {PH.P, freezeMemoryOrder(I, I.getOrdering())};
replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
Args);
}
示例3: assert
void X86InterleavedAccessGroup::decompose(
Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
SmallVectorImpl<Instruction *> &DecomposedVectors) {
assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&
"Expected Load or Shuffle");
Type *VecTy = VecInst->getType();
(void)VecTy;
assert(VecTy->isVectorTy() &&
DL.getTypeSizeInBits(VecTy) >=
DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&
"Invalid Inst-size!!!");
if (auto *SVI = dyn_cast<ShuffleVectorInst>(VecInst)) {
Value *Op0 = SVI->getOperand(0);
Value *Op1 = SVI->getOperand(1);
// Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type.
for (unsigned i = 0; i < NumSubVectors; ++i)
DecomposedVectors.push_back(
cast<ShuffleVectorInst>(Builder.CreateShuffleVector(
Op0, Op1,
createSequentialMask(Builder, Indices[i],
SubVecTy->getVectorNumElements(), 0))));
return;
}
// Decompose the load instruction.
LoadInst *LI = cast<LoadInst>(VecInst);
Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
Value *VecBasePtr;
unsigned int NumLoads = NumSubVectors;
// In the case of stride 3 with a vector of 32 elements load the information
// in the following way:
// [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1]
if (DL.getTypeSizeInBits(VecTy) == 768) {
Type *VecTran =
VectorType::get(Type::getInt8Ty(LI->getContext()), 16)->getPointerTo();
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecTran);
NumLoads = NumSubVectors * 2;
} else
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
// Generate N loads of T type.
for (unsigned i = 0; i < NumLoads; i++) {
// TODO: Support inbounds GEP.
Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
DecomposedVectors.push_back(NewLoad);
}
}
示例4: aggregateLoads
/// \brief Try to aggregate loads from a sorted list of loads to be combined.
///
/// It is guaranteed that no writes occur between any of the loads. All loads
/// have the same base pointer. There are at least two loads.
bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
assert(Loads.size() >= 2 && "Insufficient loads!");
LoadInst *BaseLoad = nullptr;
SmallVector<LoadPOPPair, 8> AggregateLoads;
bool Combined = false;
bool ValidPrevOffset = false;
APInt PrevOffset;
uint64_t PrevSize = 0;
for (auto &L : Loads) {
if (ValidPrevOffset == false) {
BaseLoad = L.Load;
PrevOffset = L.POP.Offset;
PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
L.Load->getType());
AggregateLoads.push_back(L);
ValidPrevOffset = true;
continue;
}
if (L.Load->getAlignment() > BaseLoad->getAlignment())
continue;
APInt PrevEnd = PrevOffset + PrevSize;
if (L.POP.Offset.sgt(PrevEnd)) {
// No other load will be combinable
if (combineLoads(AggregateLoads))
Combined = true;
AggregateLoads.clear();
ValidPrevOffset = false;
continue;
}
if (L.POP.Offset != PrevEnd)
// This load is offset less than the size of the last load.
// FIXME: We may want to handle this case.
continue;
PrevOffset = L.POP.Offset;
PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
L.Load->getType());
AggregateLoads.push_back(L);
}
if (combineLoads(AggregateLoads))
Combined = true;
return Combined;
}
示例5: visitLoadInst
bool Scalarizer::visitLoadInst(LoadInst &LI) {
if (!ScalarizeLoadStore)
return false;
if (!LI.isSimple())
return false;
VectorLayout Layout;
if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
IRBuilder<> Builder(LI.getParent(), &LI);
Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
ValueVector Res;
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I)
Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;
}
示例6: visitLoadInst
void Lint::visitLoadInst(LoadInst &I) {
visitMemoryReference(I, I.getPointerOperand(),
DL->getTypeStoreSize(I.getType()), I.getAlignment(),
I.getType(), MemRef::Read);
}
示例7: getInstructionCost
unsigned CostModelAnalysis::getInstructionCost(Instruction *I) const {
if (!VTTI)
return -1;
switch (I->getOpcode()) {
case Instruction::Ret:
case Instruction::PHI:
case Instruction::Br: {
return VTTI->getCFInstrCost(I->getOpcode());
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
return VTTI->getArithmeticInstrCost(I->getOpcode(), I->getType());
}
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(I);
Type *CondTy = SI->getCondition()->getType();
return VTTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = I->getOperand(0)->getType();
return VTTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
}
case Instruction::Store: {
StoreInst *SI = cast<StoreInst>(I);
Type *ValTy = SI->getValueOperand()->getType();
return VTTI->getMemoryOpCost(I->getOpcode(), ValTy,
SI->getAlignment(),
SI->getPointerAddressSpace());
}
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(I);
return VTTI->getMemoryOpCost(I->getOpcode(), I->getType(),
LI->getAlignment(),
LI->getPointerAddressSpace());
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
Type *SrcTy = I->getOperand(0)->getType();
return VTTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
}
case Instruction::ExtractElement: {
ExtractElementInst * EEI = cast<ExtractElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return VTTI->getVectorInstrCost(I->getOpcode(),
EEI->getOperand(0)->getType(), Idx);
}
case Instruction::InsertElement: {
InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return VTTI->getVectorInstrCost(I->getOpcode(),
IE->getType(), Idx);
}
default:
// We don't have any information on this instruction.
return -1;
}
}
示例8: Ranges
//.........这里部分代码省略.........
// create temporary alloca space to communicate to/from.
alloc = makeAlloca(int8Ty, "agg.tmp", insertBefore,
Range.End-Range.Start, Alignment);
// Generate the old and new base pointers before we output
// anything else.
{
Type* iPtrTy = TD->getIntPtrType(alloc->getType());
Type* iNewBaseTy = TD->getIntPtrType(alloc->getType());
oldBaseI = builder.CreatePtrToInt(StartPtr, iPtrTy, "agg.tmp.oldb.i");
newBaseI = builder.CreatePtrToInt(alloc, iNewBaseTy, "agg.tmp.newb.i");
}
// If storing, do the stores we had into our alloca'd region.
if( isStore ) {
for (SmallVector<Instruction*, 16>::const_iterator
SI = Range.TheStores.begin(),
SE = Range.TheStores.end(); SI != SE; ++SI) {
StoreInst* oldStore = cast<StoreInst>(*SI);
if( DebugThis ) {
errs() << "have store in range:";
oldStore->dump();
}
Value* ptrToAlloc = rebasePointer(oldStore->getPointerOperand(),
StartPtr, alloc, "agg.tmp",
&builder, *TD, oldBaseI, newBaseI);
// Old load must not be volatile or atomic... or we shouldn't have put
// it in ranges
assert(!(oldStore->isVolatile() || oldStore->isAtomic()));
StoreInst* newStore =
builder.CreateStore(oldStore->getValueOperand(), ptrToAlloc);
newStore->setAlignment(oldStore->getAlignment());
newStore->takeName(oldStore);
}
}
// cast the pointer that was load/stored to i8 if necessary.
if( StartPtr->getType()->getPointerElementType() == int8Ty ) {
globalPtr = StartPtr;
} else {
globalPtr = builder.CreatePointerCast(StartPtr, globalInt8PtrTy, "agg.cast");
}
// Get a Constant* for the length.
Constant* len = ConstantInt::get(sizeTy, Range.End-Range.Start, false);
// Now add the memcpy instruction
unsigned addrSpaceDst,addrSpaceSrc;
addrSpaceDst = addrSpaceSrc = 0;
if( isStore ) addrSpaceDst = globalSpace;
if( isLoad ) addrSpaceSrc = globalSpace;
Type *types[3];
types[0] = PointerType::get(int8Ty, addrSpaceDst);
types[1] = PointerType::get(int8Ty, addrSpaceSrc);
types[2] = sizeTy;
Function *func = Intrinsic::getDeclaration(M, Intrinsic::memcpy, types);
Value* args[5]; // dst src len alignment isvolatile
if( isStore ) {
// it's a store (ie put)
args[0] = globalPtr;
args[1] = alloc;
示例9: Scalarize
Value *BoUpSLP::vectorizeTree(ValueList &VL, int VF) {
Type *ScalarTy = VL[0]->getType();
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
ScalarTy = SI->getValueOperand()->getType();
VectorType *VecTy = VectorType::get(ScalarTy, VF);
// Check if all of the operands are constants or identical.
bool AllConst = true;
bool AllSameScalar = true;
for (unsigned i = 0, e = VF; i < e; ++i) {
AllConst &= !!dyn_cast<Constant>(VL[i]);
AllSameScalar &= (VL[0] == VL[i]);
// Must have a single use.
Instruction *I = dyn_cast<Instruction>(VL[i]);
if (I && (I->getNumUses() > 1 || I->getParent() != BB))
return Scalarize(VL, VecTy);
}
// Is this a simple vector constant.
if (AllConst || AllSameScalar) return Scalarize(VL, VecTy);
// Scalarize unknown structures.
Instruction *VL0 = dyn_cast<Instruction>(VL[0]);
if (!VL0) return Scalarize(VL, VecTy);
unsigned Opcode = VL0->getOpcode();
for (unsigned i = 0, e = VF; i < e; ++i) {
Instruction *I = dyn_cast<Instruction>(VL[i]);
// If not all of the instructions are identical then we have to scalarize.
if (!I || Opcode != I->getOpcode()) return Scalarize(VL, VecTy);
}
switch (Opcode) {
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
ValueList LHSVL, RHSVL;
for (int i = 0; i < VF; ++i) {
RHSVL.push_back(cast<Instruction>(VL[i])->getOperand(0));
LHSVL.push_back(cast<Instruction>(VL[i])->getOperand(1));
}
Value *RHS = vectorizeTree(RHSVL, VF);
Value *LHS = vectorizeTree(LHSVL, VF);
IRBuilder<> Builder(GetLastInstr(VL, VF));
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(VL0);
return Builder.CreateBinOp(BinOp->getOpcode(), RHS,LHS);
}
case Instruction::Load: {
LoadInst *LI = dyn_cast<LoadInst>(VL0);
unsigned Alignment = LI->getAlignment();
// Check if all of the loads are consecutive.
for (unsigned i = 1, e = VF; i < e; ++i)
if (!isConsecutiveAccess(VL[i-1], VL[i]))
return Scalarize(VL, VecTy);
IRBuilder<> Builder(GetLastInstr(VL, VF));
Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
VecTy->getPointerTo());
LI = Builder.CreateLoad(VecPtr);
LI->setAlignment(Alignment);
return LI;
}
case Instruction::Store: {
StoreInst *SI = dyn_cast<StoreInst>(VL0);
unsigned Alignment = SI->getAlignment();
ValueList ValueOp;
for (int i = 0; i < VF; ++i)
ValueOp.push_back(cast<StoreInst>(VL[i])->getValueOperand());
Value *VecValue = vectorizeTree(ValueOp, VF);
IRBuilder<> Builder(GetLastInstr(VL, VF));
Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
VecTy->getPointerTo());
Builder.CreateStore(VecValue, VecPtr)->setAlignment(Alignment);
for (int i = 0; i < VF; ++i)
cast<Instruction>(VL[i])->eraseFromParent();
return 0;
}
default:
return Scalarize(VL, VecTy);
//.........这里部分代码省略.........
示例10: visitLoadInst
void Lint::visitLoadInst(LoadInst &I) {
visitMemoryReference(I, I.getPointerOperand(), I.getAlignment(), I.getType());
}
示例11: CS
//.........这里部分代码省略.........
// Non-dead argument: insert GEPs and loads as appropriate.
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
// Store the Value* version of the indices in here, but declare it now
// for reuse.
std::vector<Value *> Ops;
for (const auto &ArgIndex : ArgIndices) {
Value *V = *AI;
LoadInst *OrigLoad =
OriginalLoads[std::make_pair(&*I, ArgIndex.second)];
if (!ArgIndex.second.empty()) {
Ops.reserve(ArgIndex.second.size());
Type *ElTy = V->getType();
for (auto II : ArgIndex.second) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
Type *IdxTy =
(ElTy->isStructTy() ? Type::getInt32Ty(F->getContext())
: Type::getInt64Ty(F->getContext()));
Ops.push_back(ConstantInt::get(IdxTy, II));
// Keep track of the type we're currently indexing.
if (auto *ElPTy = dyn_cast<PointerType>(ElTy))
ElTy = ElPTy->getElementType();
else
ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(II);
}
// And create a GEP to extract those indices.
V = GetElementPtrInst::Create(ArgIndex.first, V, Ops,
V->getName() + ".idx", Call);
Ops.clear();
}
// Since we're replacing a load make sure we take the alignment
// of the previous load.
LoadInst *newLoad = new LoadInst(V, V->getName() + ".val", Call);
newLoad->setAlignment(OrigLoad->getAlignment());
// Transfer the AA info too.
AAMDNodes AAInfo;
OrigLoad->getAAMetadata(AAInfo);
newLoad->setAAMetadata(AAInfo);
Args.push_back(newLoad);
ArgAttrVec.push_back(AttributeSet());
}
}
// Push any varargs arguments on the list.
for (; AI != CS.arg_end(); ++AI, ++ArgNo) {
Args.push_back(*AI);
ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo));
}
SmallVector<OperandBundleDef, 1> OpBundles;
CS.getOperandBundlesAsDefs(OpBundles);
CallSite NewCS;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args, OpBundles, "", Call);
} else {
auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", Call);
NewCall->setTailCallKind(cast<CallInst>(Call)->getTailCallKind());
NewCS = NewCall;
}
NewCS.setCallingConv(CS.getCallingConv());
NewCS.setAttributes(
AttributeList::get(F->getContext(), CallPAL.getFnAttributes(),
CallPAL.getRetAttributes(), ArgAttrVec));