本文整理汇总了C++中LoadInst::setMetadata方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::setMetadata方法的具体用法?C++ LoadInst::setMetadata怎么用?C++ LoadInst::setMetadata使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::setMetadata方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unpack_struct
void Closure::unpack_struct(Scope<Value *> &dst,
llvm::Type *
#if LLVM_VERSION >= 37
type
#endif
,
Value *src,
IRBuilder<> *builder) {
// type, type of src should be a pointer to a struct of the type returned by build_type
int idx = 0;
LLVMContext &context = builder->getContext();
vector<string> nm = names();
for (size_t i = 0; i < nm.size(); i++) {
#if LLVM_VERSION >= 37
Value *ptr = builder->CreateConstInBoundsGEP2_32(type, src, 0, idx++);
#else
Value *ptr = builder->CreateConstInBoundsGEP2_32(src, 0, idx++);
#endif
LoadInst *load = builder->CreateLoad(ptr);
if (load->getType()->isPointerTy()) {
// Give it a unique type so that tbaa tells llvm that this can't alias anything
LLVMMDNodeArgumentType md_args[] = {MDString::get(context, nm[i])};
load->setMetadata("tbaa", MDNode::get(context, md_args));
}
dst.push(nm[i], load);
load->setName(nm[i]);
}
}
示例2: visitLoadInst
bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
if (!WidenLoads)
return false;
if ((I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
I.getPointerAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
canWidenScalarExtLoad(I)) {
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = Builder.getInt32Ty();
Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
LoadInst *WidenLoad = Builder.CreateLoad(BitCast);
WidenLoad->copyMetadata(I);
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
ConstantInt *Lower =
mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->getValue().isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
// Don't make assumptions about the high bits.
ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(Mod->getContext(), LowAndHigh));
}
}
int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
Type *IntNTy = Builder.getIntNTy(TySize);
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
I.replaceAllUsesWith(ValOrig);
I.eraseFromParent();
return true;
}
return false;
}
示例3: InjectCoverageAtBlock
void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F,
BasicBlock &BB) {
BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
// Skip static allocas at the top of the entry block so they don't become
// dynamic when we split the block. If we used our optimized stack layout,
// then there will only be one alloca and it will come first.
for (; IP != BE; ++IP) {
AllocaInst *AI = dyn_cast<AllocaInst>(IP);
if (!AI || !AI->isStaticAlloca())
break;
}
bool IsEntryBB = &BB == &F.getEntryBlock();
DebugLoc EntryLoc =
IsEntryBB ? IP->getDebugLoc().getFnDebugLoc(*C) : IP->getDebugLoc();
IRBuilder<> IRB(IP);
IRB.SetCurrentDebugLocation(EntryLoc);
SmallVector<Value *, 1> Indices;
Value *GuardP = IRB.CreateAdd(
IRB.CreatePointerCast(GuardArray, IntptrTy),
ConstantInt::get(IntptrTy, (1 + SanCovFunction->getNumUses()) * 4));
Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy);
LoadInst *Load = IRB.CreateLoad(GuardP);
Load->setAtomic(Monotonic);
Load->setAlignment(4);
Load->setMetadata(F.getParent()->getMDKindID("nosanitize"),
MDNode::get(*C, None));
Value *Cmp = IRB.CreateICmpSGE(Constant::getNullValue(Load->getType()), Load);
Instruction *Ins = SplitBlockAndInsertIfThen(
Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
IRB.SetInsertPoint(Ins);
IRB.SetCurrentDebugLocation(EntryLoc);
// __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
IRB.CreateCall(SanCovFunction, GuardP);
IRB.CreateCall(EmptyAsm); // Avoids callback merge.
if (ClExperimentalTracing) {
// Experimental support for tracing.
// Insert a callback with the same guard variable as used for coverage.
IRB.SetInsertPoint(IP);
IRB.CreateCall(IsEntryBB ? SanCovTraceEnter : SanCovTraceBB, GuardP);
}
}
示例4: switch
/// \brief Helper to combine a load to a new type.
///
/// This just does the work of combining a load to a new type. It handles
/// metadata, etc., and returns the new instruction. The \c NewTy should be the
/// loaded *value* type. This will convert it to a pointer, cast the operand to
/// that pointer type, load it, etc.
///
/// Note that this will create all of the instructions with whatever insert
/// point the \c InstCombiner currently is using.
static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
Value *Ptr = LI.getPointerOperand();
unsigned AS = LI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
LI.getAllMetadata(MD);
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
LI.getAlignment(), LI.getName());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a load instruction changing *only its type*.
// The only metadata it makes sense to drop is metadata which is invalidated
// when the pointer type changes. This should essentially never be the case
// in LLVM, but we explicitly switch over only known metadata to be
// conservatively correct. If you are adding metadata to LLVM which pertains
// to loads, you almost certainly want to add it here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewLoad->setMetadata(ID, N);
break;
case LLVMContext::MD_range:
// FIXME: It would be nice to propagate this in some way, but the type
// conversions make it hard.
break;
}
}
return NewLoad;
}
示例5: unpack_struct
void Closure::unpack_struct(Scope<Value *> &dst,
Value *src,
IRBuilder<> *builder) {
// src should be a pointer to a struct of the type returned by build_type
int idx = 0;
LLVMContext &context = builder->getContext();
vector<string> nm = names();
for (size_t i = 0; i < nm.size(); i++) {
Value *ptr = builder->CreateConstInBoundsGEP2_32(src, 0, idx++);
LoadInst *load = builder->CreateLoad(ptr);
if (load->getType()->isPointerTy()) {
// Give it a unique type so that tbaa tells llvm that this can't alias anything
load->setMetadata("tbaa", MDNode::get(context,
vec<Value *>(MDString::get(context, nm[i]))));
}
dst.push(nm[i], load);
load->setName(nm[i]);
}
}
示例6: make_pair
std::pair<Value *, Value *>
AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
if (!IsAMDHSA) {
Function *LocalSizeYFn
= Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
Function *LocalSizeZFn
= Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
LocalSizeY->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
LocalSizeZ->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
return std::make_pair(LocalSizeY, LocalSizeZ);
}
// We must read the size out of the dispatch pointer.
assert(IsAMDGCN);
// We are indexing into this struct, and want to extract the workgroup_size_*
// fields.
//
// typedef struct hsa_kernel_dispatch_packet_s {
// uint16_t header;
// uint16_t setup;
// uint16_t workgroup_size_x ;
// uint16_t workgroup_size_y;
// uint16_t workgroup_size_z;
// uint16_t reserved0;
// uint32_t grid_size_x ;
// uint32_t grid_size_y ;
// uint32_t grid_size_z;
//
// uint32_t private_segment_size;
// uint32_t group_segment_size;
// uint64_t kernel_object;
//
// #ifdef HSA_LARGE_MODEL
// void *kernarg_address;
// #elif defined HSA_LITTLE_ENDIAN
// void *kernarg_address;
// uint32_t reserved1;
// #else
// uint32_t reserved1;
// void *kernarg_address;
// #endif
// uint64_t reserved2;
// hsa_signal_t completion_signal; // uint64_t wrapper
// } hsa_kernel_dispatch_packet_t
//
Function *DispatchPtrFn
= Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
// Size of the dispatch packet struct.
DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
Type *I32Ty = Type::getInt32Ty(Mod->getContext());
Value *CastDispatchPtr = Builder.CreateBitCast(
DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
// We could do a single 64-bit load here, but it's likely that the basic
// 32-bit and extract sequence is already present, and it is probably easier
// to CSE this. The loads should be mergable later anyway.
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
MDNode *MD = llvm::MDNode::get(Mod->getContext(), None);
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
LoadZU->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
// Extract y component. Upper half of LoadZU should be zero already.
Value *Y = Builder.CreateLShr(LoadXY, 16);
return std::make_pair(Y, LoadZU);
}
示例7: MDB
/// \brief Helper to combine a load to a new type.
///
/// This just does the work of combining a load to a new type. It handles
/// metadata, etc., and returns the new instruction. The \c NewTy should be the
/// loaded *value* type. This will convert it to a pointer, cast the operand to
/// that pointer type, load it, etc.
///
/// Note that this will create all of the instructions with whatever insert
/// point the \c InstCombiner currently is using.
static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
const Twine &Suffix = "") {
Value *Ptr = LI.getPointerOperand();
unsigned AS = LI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
LI.getAllMetadata(MD);
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
LI.getAlignment(), LI.getName() + Suffix);
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
// Note, essentially every kind of metadata should be preserved here! This
// routine is supposed to clone a load instruction changing *only its type*.
// The only metadata it makes sense to drop is metadata which is invalidated
// when the pointer type changes. This should essentially never be the case
// in LLVM, but we explicitly switch over only known metadata to be
// conservatively correct. If you are adding metadata to LLVM which pertains
// to loads, you almost certainly want to add it here.
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_prof:
case LLVMContext::MD_fpmath:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
// All of these directly apply.
NewLoad->setMetadata(ID, N);
break;
case LLVMContext::MD_nonnull:
// This only directly applies if the new type is also a pointer.
if (NewTy->isPointerTy()) {
NewLoad->setMetadata(ID, N);
break;
}
// If it's integral now, translate it to !range metadata.
if (NewTy->isIntegerTy()) {
auto *ITy = cast<IntegerType>(NewTy);
auto *NullInt = ConstantExpr::getPtrToInt(
ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
auto *NonNullInt =
ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
NewLoad->setMetadata(LLVMContext::MD_range,
MDB.createRange(NonNullInt, NullInt));
}
break;
case LLVMContext::MD_align:
case LLVMContext::MD_dereferenceable:
case LLVMContext::MD_dereferenceable_or_null:
// These only directly apply if the new type is also a pointer.
if (NewTy->isPointerTy())
NewLoad->setMetadata(ID, N);
break;
case LLVMContext::MD_range:
// FIXME: It would be nice to propagate this in some way, but the type
// conversions make it hard. If the new type is a pointer, we could
// translate it to !nonnull metadata.
break;
}
}
return NewLoad;
}
示例8: CS
//.........这里部分代码省略.........
ScalarizeTable &ArgIndices = ScalarizedElements[I];
// Store the Value* version of the indices in here, but declare it now
// for reuse.
std::vector<Value*> Ops;
for (ScalarizeTable::iterator SI = ArgIndices.begin(),
E = ArgIndices.end(); SI != E; ++SI) {
Value *V = *AI;
LoadInst *OrigLoad = OriginalLoads[*SI];
if (!SI->empty()) {
Ops.reserve(SI->size());
const Type *ElTy = V->getType();
for (IndicesVector::const_iterator II = SI->begin(),
IE = SI->end(); II != IE; ++II) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
const Type *IdxTy = (ElTy->isStructTy() ?
Type::getInt32Ty(F->getContext()) :
Type::getInt64Ty(F->getContext()));
Ops.push_back(ConstantInt::get(IdxTy, *II));
// Keep track of the type we're currently indexing.
ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
}
// And create a GEP to extract those indices.
V = GetElementPtrInst::Create(V, Ops.begin(), Ops.end(),
V->getName()+".idx", Call);
Ops.clear();
AA.copyValue(OrigLoad->getOperand(0), V);
}
// Since we're replacing a load make sure we take the alignment
// of the previous load.
LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
newLoad->setAlignment(OrigLoad->getAlignment());
// Transfer the TBAA info too.
newLoad->setMetadata(LLVMContext::MD_tbaa,
OrigLoad->getMetadata(LLVMContext::MD_tbaa));
Args.push_back(newLoad);
AA.copyValue(OrigLoad, Args.back());
}
}
if (ExtraArgHack)
Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));
// Push any varargs arguments on the list.
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
Args.push_back(*AI);
if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
}
// Add any function attributes.
if (Attributes attrs = CallPAL.getFnAttributes())
AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args.begin(), Args.end(), "", Call);
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
AttributesVec.end()));
} else {
New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
AttributesVec.end()));
示例9: runOnFunction
//.........这里部分代码省略.........
int64_t AlignDownOffset = alignDown(EltOffset, 4);
int64_t OffsetDiff = EltOffset - AlignDownOffset;
unsigned AdjustedAlign = MinAlign(DoShiftOpt ? AlignDownOffset : EltOffset,
KernArgBaseAlign);
Value *ArgPtr;
Type *AdjustedArgTy;
if (DoShiftOpt) { // FIXME: Handle aggregate types
// Since we don't have sub-dword scalar loads, avoid doing an extload by
// loading earlier than the argument address, and extracting the relevant
// bits.
//
// Additionally widen any sub-dword load to i32 even if suitably aligned,
// so that CSE between different argument loads works easily.
ArgPtr = Builder.CreateConstInBoundsGEP1_64(
Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
Arg.getName() + ".kernarg.offset.align.down");
AdjustedArgTy = Builder.getInt32Ty();
} else {
ArgPtr = Builder.CreateConstInBoundsGEP1_64(
Builder.getInt8Ty(), KernArgSegment, EltOffset,
Arg.getName() + ".kernarg.offset");
AdjustedArgTy = ArgTy;
}
if (IsV3 && Size >= 32) {
V4Ty = VectorType::get(VT->getVectorElementType(), 4);
// Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
AdjustedArgTy = V4Ty;
}
ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
ArgPtr->getName() + ".cast");
LoadInst *Load =
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
MDBuilder MDB(Ctx);
if (isa<PointerType>(ArgTy)) {
if (Arg.hasNonNullAttr())
Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
uint64_t DerefBytes = Arg.getDereferenceableBytes();
if (DerefBytes != 0) {
Load->setMetadata(
LLVMContext::MD_dereferenceable,
MDNode::get(Ctx,
MDB.createConstant(
ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
}
uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
if (DerefOrNullBytes != 0) {
Load->setMetadata(
LLVMContext::MD_dereferenceable_or_null,
MDNode::get(Ctx,
MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
DerefOrNullBytes))));
}
unsigned ParamAlign = Arg.getParamAlignment();
if (ParamAlign != 0) {
Load->setMetadata(
LLVMContext::MD_align,
MDNode::get(Ctx,
MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
ParamAlign))));
}
}
// TODO: Convert noalias arg to !noalias
if (DoShiftOpt) {
Value *ExtractBits = OffsetDiff == 0 ?
Load : Builder.CreateLShr(Load, OffsetDiff * 8);
IntegerType *ArgIntTy = Builder.getIntNTy(Size);
Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
Arg.getName() + ".load");
Arg.replaceAllUsesWith(NewVal);
} else if (IsV3) {
Value *Shuf = Builder.CreateShuffleVector(Load, UndefValue::get(V4Ty),
{0, 1, 2},
Arg.getName() + ".load");
Arg.replaceAllUsesWith(Shuf);
} else {
Load->setName(Arg.getName() + ".load");
Arg.replaceAllUsesWith(Load);
}
}
KernArgSegment->addAttribute(
AttributeList::ReturnIndex,
Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
return true;
}