本文整理汇总了C++中IntrinsicInst::getIntrinsicID方法的典型用法代码示例。如果您正苦于以下问题:C++ IntrinsicInst::getIntrinsicID方法的具体用法?C++ IntrinsicInst::getIntrinsicID怎么用?C++ IntrinsicInst::getIntrinsicID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IntrinsicInst
的用法示例。
在下文中一共展示了IntrinsicInst::getIntrinsicID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: allPredCameFromBeginCatch
static bool allPredCameFromBeginCatch(
BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin,
IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) {
VisitedBlocks.insert(BB);
// Look for a begincatch in this block.
for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE;
++RI) {
IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI);
if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch)
return true;
// If we find another end catch before we find a begin catch, that's
// an error.
if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) {
*SecondEndCatch = IC;
return false;
}
// If we encounter a landingpad instruction, the search failed.
if (isa<LandingPadInst>(*RI))
return false;
}
// If while searching we find a block with no predeccesors,
// the search failed.
if (pred_empty(BB))
return false;
// Search any predecessors we haven't seen before.
for (BasicBlock *Pred : predecessors(BB)) {
if (VisitedBlocks.count(Pred))
continue;
if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch,
VisitedBlocks))
return false;
}
return true;
}
示例2:
static bool
allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin,
IntrinsicInst **SecondBeginCatch,
SmallSet<BasicBlock *, 4> &VisitedBlocks) {
VisitedBlocks.insert(BB);
for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) {
IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I);
if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch)
return true;
// If we find another begincatch while looking for an endcatch,
// that's also an error.
if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) {
*SecondBeginCatch = IC;
return false;
}
}
// If we reach a block with no successors while searching, the
// search has failed.
if (succ_empty(BB))
return false;
// Otherwise, search all of the successors.
for (BasicBlock *Succ : successors(BB)) {
if (VisitedBlocks.count(Succ))
continue;
if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch,
VisitedBlocks))
return false;
}
return true;
}
示例3: OptimizeCallInst
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
const Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
CI->replaceAllUsesWith(RetVal);
CI->eraseFromParent();
return true;
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
示例4: handleIntrinsicCall
bool LLVMReachingDefsAnalysis::handleIntrinsicCall(LLVMNode *callNode,
CallInst *CI,
DefMap *df)
{
bool changed = false;
IntrinsicInst *I = cast<IntrinsicInst>(CI);
Value *dest;
switch (I->getIntrinsicID())
{
case Intrinsic::memmove:
case Intrinsic::memcpy:
case Intrinsic::memset:
dest = I->getOperand(0);
break;
default:
return handleUndefinedCall(callNode, CI, df);
}
LLVMNode *destNode = getOperand(callNode, dest, 1);
assert(destNode && "No operand for intrinsic call");
for (const Pointer& ptr : destNode->getPointsTo()) {
// we could compute all the concrete offsets, but
// these functions usually set the whole memory,
// so if we use UNKNOWN_OFFSET, the effect is the same
changed |= df->add(Pointer(ptr.obj, UNKNOWN_OFFSET), callNode);
}
return changed;
}
示例5: getLocForWrite
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
if (!II)
return MemoryLocation();
switch (II->getIntrinsicID()) {
default:
return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
示例6: assert
bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
IntrinsicInst &I) const {
assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
"I must be bitreverse intrinsic");
assert(needsPromotionToI32(I.getType()) &&
"I does not need promotion to i32");
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = getI32Ty(Builder, I.getType());
Function *I32 =
Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Value *LShrOp =
Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Value *TruncRes =
Builder.CreateTrunc(LShrOp, I.getType());
I.replaceAllUsesWith(TruncRes);
I.eraseFromParent();
return true;
}
示例7: escapeRegNode
/// Escape RegNode so that we can access it from child handlers. Find the call
/// to frameescape, if any, in the entry block and append RegNode to the list
/// of arguments.
int WinEHStatePass::escapeRegNode(Function &F) {
// Find the call to frameescape and extract its arguments.
IntrinsicInst *EscapeCall = nullptr;
for (Instruction &I : F.getEntryBlock()) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
if (II && II->getIntrinsicID() == Intrinsic::frameescape) {
EscapeCall = II;
break;
}
}
SmallVector<Value *, 8> Args;
if (EscapeCall) {
auto Ops = EscapeCall->arg_operands();
Args.append(Ops.begin(), Ops.end());
}
Args.push_back(RegNode);
// Replace the call (if it exists) with new one. Otherwise, insert at the end
// of the entry block.
IRBuilder<> Builder(&F.getEntryBlock(),
EscapeCall ? EscapeCall : F.getEntryBlock().end());
Builder.CreateCall(FrameEscape, Args);
if (EscapeCall)
EscapeCall->eraseFromParent();
return Args.size() - 1;
}
示例8: if
static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) {
if (!Op->hasOneUse())
return;
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op);
if (!II)
return;
if (II->getIntrinsicID() != Intrinsic::log2 || !II->hasUnsafeAlgebra())
return;
Log2 = II;
Value *OpLog2Of = II->getArgOperand(0);
if (!OpLog2Of->hasOneUse())
return;
Instruction *I = dyn_cast<Instruction>(OpLog2Of);
if (!I)
return;
if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
return;
if (match(I->getOperand(0), m_SpecificFP(0.5)))
Y = I->getOperand(1);
else if (match(I->getOperand(1), m_SpecificFP(0.5)))
Y = I->getOperand(0);
}
示例9: isCallPromotable
static bool isCallPromotable(CallInst *CI) {
// TODO: We might be able to handle some cases where the callee is a
// constantexpr bitcast of a function.
if (!CI->getCalledFunction())
return false;
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (!II)
return false;
switch (II->getIntrinsicID()) {
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::invariant_group_barrier:
case Intrinsic::objectsize:
return true;
default:
return false;
}
}
示例10: if
/// \brief Split sadd.with.overflow into add + sadd.with.overflow to allow
/// analysis and optimization.
///
/// \return A new value representing the non-overflowing add if possible,
/// otherwise return the original value.
Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
const DominatorTree *DT) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(IVUser);
if (!II || II->getIntrinsicID() != Intrinsic::sadd_with_overflow)
return IVUser;
// Find a branch guarded by the overflow check.
BranchInst *Branch = 0;
Instruction *AddVal = 0;
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
UI != E; ++UI) {
if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(*UI)) {
if (ExtractInst->getNumIndices() != 1)
continue;
if (ExtractInst->getIndices()[0] == 0)
AddVal = ExtractInst;
else if (ExtractInst->getIndices()[0] == 1 && ExtractInst->hasOneUse())
Branch = dyn_cast<BranchInst>(ExtractInst->use_back());
}
}
if (!AddVal || !Branch)
return IVUser;
BasicBlock *ContinueBB = Branch->getSuccessor(1);
if (llvm::next(pred_begin(ContinueBB)) != pred_end(ContinueBB))
return IVUser;
// Check if all users of the add are provably NSW.
bool AllNSW = true;
for (Value::use_iterator UI = AddVal->use_begin(), E = AddVal->use_end();
UI != E; ++UI) {
if (Instruction *UseInst = dyn_cast<Instruction>(*UI)) {
BasicBlock *UseBB = UseInst->getParent();
if (PHINode *PHI = dyn_cast<PHINode>(UseInst))
UseBB = PHI->getIncomingBlock(UI);
if (!DT->dominates(ContinueBB, UseBB)) {
AllNSW = false;
break;
}
}
}
if (!AllNSW)
return IVUser;
// Go for it...
IRBuilder<> Builder(IVUser);
Instruction *AddInst = dyn_cast<Instruction>(
Builder.CreateNSWAdd(II->getOperand(0), II->getOperand(1)));
// The caller expects the new add to have the same form as the intrinsic. The
// IV operand position must be the same.
assert((AddInst->getOpcode() == Instruction::Add &&
AddInst->getOperand(0) == II->getOperand(0)) &&
"Bad add instruction created from overflow intrinsic.");
AddVal->replaceAllUsesWith(AddInst);
DeadInsts.push_back(AddVal);
return AddInst;
}
示例11: visitIntrinsicInst
bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::bitreverse:
return visitBitreverseIntrinsicInst(I);
default:
return false;
}
}
示例12: OptimizeCallInst
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
BasicBlock *BB = CI->getParent();
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
// Avoid processing instructions out of order, which could cause
// reuse before a value is defined.
SunkAddrs.clear();
return true;
}
// Sink address computing for memory operands into the block.
if (OptimizeInlineAsmInst(CI))
return true;
}
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
// Substituting this can cause recursive simplifications, which can
// invalidate our iterator. Use a WeakVH to hold onto it in case this
// happens.
WeakVH IterHandle(CurInstIterator);
ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
if (IterHandle != CurInstIterator) {
CurInstIterator = BB->begin();
SunkAddrs.clear();
}
return true;
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
示例13: handle_instrinsic
void AAAnalyzer::handle_instrinsic(Instruction *inst) {
IntrinsicInst * call = (IntrinsicInst*) inst;
switch (call->getIntrinsicID()) {
// Variable Argument Handling Intrinsics
case Intrinsic::vastart:
{
Value * va_list_ptr = call->getArgOperand(0);
wrapValue(va_list_ptr);
}
break;
case Intrinsic::vaend:
{
}
break;
case Intrinsic::vacopy: // the same with memmove/memcpy
//Standard C Library Intrinsics
case Intrinsic::memmove:
case Intrinsic::memcpy:
{
Value * src_ptr = call->getArgOperand(0);
Value * dst_ptr = call->getArgOperand(1);
DyckVertex* src_ptr_ver = wrapValue(src_ptr);
DyckVertex* dst_ptr_ver = wrapValue(dst_ptr);
DyckVertex* src_ver = addPtrTo(src_ptr_ver, NULL);
DyckVertex* dst_ver = addPtrTo(dst_ptr_ver, NULL);
makeAlias(src_ver, dst_ver);
}
break;
case Intrinsic::memset:
{
Value * ptr = call->getArgOperand(0);
Value * val = call->getArgOperand(1);
addPtrTo(wrapValue(ptr), wrapValue(val));
}
break;
/// @todo other C lib intrinsics
//Accurate Garbage Collection Intrinsics
//Code Generator Intrinsics
//Bit Manipulation Intrinsics
//Exception Handling Intrinsics
//Trampoline Intrinsics
//Memory Use Markers
//General Intrinsics
//Arithmetic with Overflow Intrinsics
//Specialised Arithmetic Intrinsics
//Half Precision Floating Point Intrinsics
//Debugger Intrinsics
default:break;
}
}
示例14: switch
/// getStoredPointerOperand - Return the pointer that is being written to.
static Value *getStoredPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getDest();
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return II->getArgOperand(0);
}
}
示例15: isShortenable
/// isShortenable - Returns true if this instruction can be safely shortened in
/// length.
static bool isShortenable(Instruction *I) {
// Don't shorten stores for now
if (isa<StoreInst>(I))
return false;
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: return false;
case Intrinsic::memset:
case Intrinsic::memcpy:
// Do shorten memory intrinsics.
return true;
}
}