本文整理汇总了C++中IntrinsicInst::getType方法的典型用法代码示例。如果您正苦于以下问题:C++ IntrinsicInst::getType方法的具体用法?C++ IntrinsicInst::getType怎么用?C++ IntrinsicInst::getType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IntrinsicInst
的用法示例。
在下文中一共展示了IntrinsicInst::getType方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
IntrinsicInst &I) const {
assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
"I must be bitreverse intrinsic");
assert(needsPromotionToI32(I.getType()) &&
"I does not need promotion to i32");
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = getI32Ty(Builder, I.getType());
Function *I32 =
Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Value *LShrOp =
Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Value *TruncRes =
Builder.CreateTrunc(LShrOp, I.getType());
I.replaceAllUsesWith(TruncRes);
I.eraseFromParent();
return true;
}
示例2: visitBitreverseIntrinsicInst
bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
bool Changed = false;
if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
DA->isUniform(&I))
Changed |= promoteUniformBitreverseToI32(I);
return Changed;
}
示例3: handleAlloca
void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
// Array allocations are probably not worth handling, since an allocation of
// the array type is the canonical form.
if (!I.isStaticAlloca() || I.isArrayAllocation())
return;
IRBuilder<> Builder(&I);
// First try to replace the alloca with a vector
Type *AllocaTy = I.getAllocatedType();
DEBUG(dbgs() << "Trying to promote " << I << '\n');
if (tryPromoteAllocaToVector(&I))
return;
DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
const Function &ContainingFunction = *I.getParent()->getParent();
// FIXME: We should also try to get this value from the reqd_work_group_size
// function attribute if it is available.
unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction);
int AllocaSize =
WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);
if (AllocaSize > LocalMemAvailable) {
DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
return;
}
std::vector<Value*> WorkList;
if (!collectUsesWithPtrTypes(&I, WorkList)) {
DEBUG(dbgs() << " Do not know how to convert all uses\n");
return;
}
DEBUG(dbgs() << "Promoting alloca to local memory\n");
LocalMemAvailable -= AllocaSize;
Function *F = I.getParent()->getParent();
Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
GlobalVariable *GV = new GlobalVariable(
*Mod, GVTy, false, GlobalValue::InternalLinkage,
UndefValue::get(GVTy),
Twine(F->getName()) + Twine('.') + I.getName(),
nullptr,
GlobalVariable::NotThreadLocal,
AMDGPUAS::LOCAL_ADDRESS);
GV->setUnnamedAddr(true);
GV->setAlignment(I.getAlignment());
Value *TCntY, *TCntZ;
std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
Value *TIdX = getWorkitemID(Builder, 0);
Value *TIdY = getWorkitemID(Builder, 1);
Value *TIdZ = getWorkitemID(Builder, 2);
Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
TID = Builder.CreateAdd(TID, TIdZ);
Value *Indices[] = {
Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
TID
};
Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
I.mutateType(Offset->getType());
I.replaceAllUsesWith(Offset);
I.eraseFromParent();
for (Value *V : WorkList) {
CallInst *Call = dyn_cast<CallInst>(V);
if (!Call) {
Type *EltTy = V->getType()->getPointerElementType();
PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
// The operand's value should be corrected on its own.
if (isa<AddrSpaceCastInst>(V))
continue;
// FIXME: It doesn't really make sense to try to do this for all
// instructions.
V->mutateType(NewTy);
continue;
}
IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
if (!Intr) {
// FIXME: What is this for? It doesn't make sense to promote arbitrary
// function calls. If the call is to a defined function that can also be
// promoted, we should be able to do this once that function is also
// rewritten.
//.........这里部分代码省略.........
示例4: handleAlloca
// FIXME: Should try to pick the most likely to be profitable allocas first.
bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
// Array allocations are probably not worth handling, since an allocation of
// the array type is the canonical form.
if (!I.isStaticAlloca() || I.isArrayAllocation())
return false;
IRBuilder<> Builder(&I);
// First try to replace the alloca with a vector
Type *AllocaTy = I.getAllocatedType();
DEBUG(dbgs() << "Trying to promote " << I << '\n');
if (tryPromoteAllocaToVector(&I, AS))
return true; // Promoted to vector.
const Function &ContainingFunction = *I.getParent()->getParent();
CallingConv::ID CC = ContainingFunction.getCallingConv();
// Don't promote the alloca to LDS for shader calling conventions as the work
// item ID intrinsics are not supported for these calling conventions.
// Furthermore not all LDS is available for some of the stages.
switch (CC) {
case CallingConv::AMDGPU_KERNEL:
case CallingConv::SPIR_KERNEL:
break;
default:
DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n");
return false;
}
// Not likely to have sufficient local memory for promotion.
if (!SufficientLDS)
return false;
const AMDGPUSubtarget &ST =
TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction);
unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
const DataLayout &DL = Mod->getDataLayout();
unsigned Align = I.getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(I.getAllocatedType());
// FIXME: This computed padding is likely wrong since it depends on inverse
// usage order.
//
// FIXME: It is also possible that if we're allowed to use all of the memory
// could could end up using more than the maximum due to alignment padding.
uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
NewSize += AllocSize;
if (NewSize > LocalMemLimit) {
DEBUG(dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n");
return false;
}
CurrentLocalMemUsage = NewSize;
std::vector<Value*> WorkList;
if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
DEBUG(dbgs() << " Do not know how to convert all uses\n");
return false;
}
DEBUG(dbgs() << "Promoting alloca to local memory\n");
Function *F = I.getParent()->getParent();
Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
GlobalVariable *GV = new GlobalVariable(
*Mod, GVTy, false, GlobalValue::InternalLinkage,
UndefValue::get(GVTy),
Twine(F->getName()) + Twine('.') + I.getName(),
nullptr,
GlobalVariable::NotThreadLocal,
AS.LOCAL_ADDRESS);
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
GV->setAlignment(I.getAlignment());
Value *TCntY, *TCntZ;
std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
Value *TIdX = getWorkitemID(Builder, 0);
Value *TIdY = getWorkitemID(Builder, 1);
Value *TIdZ = getWorkitemID(Builder, 2);
Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
TID = Builder.CreateAdd(TID, TIdZ);
Value *Indices[] = {
//.........这里部分代码省略.........
示例5: decomposeIntrinsics
void DecomposeInsts::decomposeIntrinsics(BasicBlock* bb)
{
IRBuilder<> builder(module->getContext());
for (BasicBlock::iterator instI = bb->begin(), instE = bb->end(); instI != instE; /* empty */) {
Instruction* inst = instI;
// Note this increment of instI will skip decompositions of the code
// inserted to decompose. E.g., if length -> dot, and dot is also to
// be decomposed, then the decomposition of dot will be skipped
// unless instI is reset.
++instI;
IntrinsicInst* intrinsic = dyn_cast<IntrinsicInst>(inst);
if (! intrinsic)
continue;
// Useful preamble for most case
llvm::Value* arg0 = 0;
llvm::Value* arg1 = 0;
llvm::Value* arg2 = 0;
if (inst->getNumOperands() > 0)
arg0 = inst->getOperand(0);
if (inst->getNumOperands() > 1)
arg1 = inst->getOperand(1);
if (inst->getNumOperands() > 2)
arg2 = inst->getOperand(2);
llvm::Value* newInst = 0;
Type* instTypes[] = { inst->getType(), inst->getType(), inst->getType(), inst->getType() };
Type* argTypes[] = { arg0->getType(), arg0->getType(), arg0->getType(), arg0->getType() };
builder.SetInsertPoint(instI);
switch (intrinsic->getIntrinsicID()) {
case Intrinsic::gla_fRadians:
{
// always decompose
// arg0 -> arg0 * pi / 180
const double pi_over_180 = 0.01745329251994329576923690768489;
newInst = MultiplyByConstant(builder, arg0, pi_over_180);
break;
}
case Intrinsic::gla_fDegrees:
{
// always decompose
// arg0 -> arg0 * 180 / pi
const double pi_into_180 = 57.295779513082320876798154814105;
newInst = MultiplyByConstant(builder, arg0, pi_into_180);
break;
}
case Intrinsic::gla_fMin:
if (backEnd->decomposeIntrinsic(EDiMin)) {
//
// min(a,b) = select (a < b), a, b
//
llvm::Value* smeared = Smear(builder, module, arg1, arg0);
newInst = builder.CreateFCmpOLT(arg0, smeared);
newInst = builder.CreateSelect(newInst, arg0, smeared);
}
break;
case Intrinsic::gla_fMax:
if (backEnd->decomposeIntrinsic(EDiMax)) {
//
// max(a,b) = select (a > b), a, b
//
llvm::Value* smeared = Smear(builder, module, arg1, arg0);
newInst = builder.CreateFCmpOGT(arg0, smeared);
newInst = builder.CreateSelect(newInst, arg0, smeared);
}
break;
case Intrinsic::gla_fClamp:
if (backEnd->decomposeIntrinsic(EDiClamp))
{
//
// Clamp(x, minVal, maxVal) is defined to be min(max(x, minVal), maxVal).
//
// The 2nd and 3rd arguments match each other, but not necessarily
// the 1st argument. In the decomposition, this difference matches
// min/max's difference in their 1st and 2nd arguments.
//
argTypes[2] = arg1->getType(); // argTypes[*] start at 0 for the return value, arg* start at 0 for operand 0
Function* max = Intrinsic::getDeclaration(module, Intrinsic::gla_fMax, makeArrayRef(argTypes, 3));
Function* min = Intrinsic::getDeclaration(module, Intrinsic::gla_fMin, makeArrayRef(argTypes, 3));
newInst = builder.CreateCall2(max, arg0, arg1);
newInst = builder.CreateCall2(min, newInst, arg2);
// Make next iteration revisit this decomposition, in case min
// or max are decomposed.
instI = inst;
++instI;
}
break;
case Intrinsic::gla_fAsin:
if (backEnd->decomposeIntrinsic(EDiAsin)) {
UnsupportedFunctionality("decomposition of gla_fAsin");
//changed = true;
}
break;
case Intrinsic::gla_fAcos:
if (backEnd->decomposeIntrinsic(EDiAcos))
//.........这里部分代码省略.........
示例6: runOnBasicBlock
bool IntrinsicCleanerPass::runOnBasicBlock(BasicBlock &b, Module &M) {
bool dirty = false;
bool block_split=false;
#if LLVM_VERSION_CODE <= LLVM_VERSION(3, 1)
unsigned WordSize = TargetData.getPointerSizeInBits() / 8;
#else
unsigned WordSize = DataLayout.getPointerSizeInBits() / 8;
#endif
for (BasicBlock::iterator i = b.begin(), ie = b.end();
(i != ie) && (block_split == false);) {
IntrinsicInst *ii = dyn_cast<IntrinsicInst>(&*i);
// increment now since LowerIntrinsic deletion makes iterator invalid.
++i;
if(ii) {
switch (ii->getIntrinsicID()) {
case Intrinsic::vastart:
case Intrinsic::vaend:
break;
// Lower vacopy so that object resolution etc is handled by
// normal instructions.
//
// FIXME: This is much more target dependent than just the word size,
// however this works for x86-32 and x86-64.
case Intrinsic::vacopy: { // (dst, src) -> *((i8**) dst) = *((i8**) src)
Value *dst = ii->getArgOperand(0);
Value *src = ii->getArgOperand(1);
if (WordSize == 4) {
Type *i8pp = PointerType::getUnqual(PointerType::getUnqual(Type::getInt8Ty(getGlobalContext())));
Value *castedDst = CastInst::CreatePointerCast(dst, i8pp, "vacopy.cast.dst", ii);
Value *castedSrc = CastInst::CreatePointerCast(src, i8pp, "vacopy.cast.src", ii);
Value *load = new LoadInst(castedSrc, "vacopy.read", ii);
new StoreInst(load, castedDst, false, ii);
} else {
assert(WordSize == 8 && "Invalid word size!");
Type *i64p = PointerType::getUnqual(Type::getInt64Ty(getGlobalContext()));
Value *pDst = CastInst::CreatePointerCast(dst, i64p, "vacopy.cast.dst", ii);
Value *pSrc = CastInst::CreatePointerCast(src, i64p, "vacopy.cast.src", ii);
Value *val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
Value *off = ConstantInt::get(Type::getInt64Ty(getGlobalContext()), 1);
pDst = GetElementPtrInst::Create(pDst, off, std::string(), ii);
pSrc = GetElementPtrInst::Create(pSrc, off, std::string(), ii);
val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
pDst = GetElementPtrInst::Create(pDst, off, std::string(), ii);
pSrc = GetElementPtrInst::Create(pSrc, off, std::string(), ii);
val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
}
ii->removeFromParent();
delete ii;
break;
}
case Intrinsic::sadd_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::smul_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::umul_with_overflow: {
IRBuilder<> builder(ii->getParent(), ii);
Value *op1 = ii->getArgOperand(0);
Value *op2 = ii->getArgOperand(1);
Value *result = 0;
Value *result_ext = 0;
Value *overflow = 0;
unsigned int bw = op1->getType()->getPrimitiveSizeInBits();
unsigned int bw2 = op1->getType()->getPrimitiveSizeInBits()*2;
if ((ii->getIntrinsicID() == Intrinsic::uadd_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::usub_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::umul_with_overflow)) {
Value *op1ext =
builder.CreateZExt(op1, IntegerType::get(M.getContext(), bw2));
Value *op2ext =
builder.CreateZExt(op2, IntegerType::get(M.getContext(), bw2));
Value *int_max_s =
ConstantInt::get(op1->getType(), APInt::getMaxValue(bw));
Value *int_max =
builder.CreateZExt(int_max_s, IntegerType::get(M.getContext(), bw2));
if (ii->getIntrinsicID() == Intrinsic::uadd_with_overflow){
result_ext = builder.CreateAdd(op1ext, op2ext);
} else if (ii->getIntrinsicID() == Intrinsic::usub_with_overflow){
result_ext = builder.CreateSub(op1ext, op2ext);
} else if (ii->getIntrinsicID() == Intrinsic::umul_with_overflow){
result_ext = builder.CreateMul(op1ext, op2ext);
}
overflow = builder.CreateICmpUGT(result_ext, int_max);
} else if ((ii->getIntrinsicID() == Intrinsic::sadd_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::ssub_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::smul_with_overflow)) {
Value *op1ext =
builder.CreateSExt(op1, IntegerType::get(M.getContext(), bw2));
//.........这里部分代码省略.........