本文整理汇总了C++中IntrinsicInst类的典型用法代码示例。如果您正苦于以下问题:C++ IntrinsicInst类的具体用法?C++ IntrinsicInst怎么用?C++ IntrinsicInst使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IntrinsicInst类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: escapeRegNode
/// Escape RegNode so that we can access it from child handlers. Find the call
/// to localescape, if any, in the entry block and append RegNode to the list
/// of arguments.
int WinEHStatePass::escapeRegNode(Function &F) {
// Find the call to localescape and extract its arguments.
IntrinsicInst *EscapeCall = nullptr;
for (Instruction &I : F.getEntryBlock()) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
if (II && II->getIntrinsicID() == Intrinsic::localescape) {
EscapeCall = II;
break;
}
}
SmallVector<Value *, 8> Args;
if (EscapeCall) {
auto Ops = EscapeCall->arg_operands();
Args.append(Ops.begin(), Ops.end());
}
Args.push_back(RegNode);
// Replace the call (if it exists) with new one. Otherwise, insert at the end
// of the entry block.
IRBuilder<> Builder(&F.getEntryBlock(),
EscapeCall ? EscapeCall : F.getEntryBlock().end());
Builder.CreateCall(FrameEscape, Args);
if (EscapeCall)
EscapeCall->eraseFromParent();
return Args.size() - 1;
}
示例2: allPredCameFromBeginCatch
static bool allPredCameFromBeginCatch(
BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin,
IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) {
VisitedBlocks.insert(BB);
// Look for a begincatch in this block.
for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE;
++RI) {
IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI);
if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch)
return true;
// If we find another end catch before we find a begin catch, that's
// an error.
if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) {
*SecondEndCatch = IC;
return false;
}
// If we encounter a landingpad instruction, the search failed.
if (isa<LandingPadInst>(*RI))
return false;
}
// If while searching we find a block with no predeccesors,
// the search failed.
if (pred_empty(BB))
return false;
// Search any predecessors we haven't seen before.
for (BasicBlock *Pred : predecessors(BB)) {
if (VisitedBlocks.count(Pred))
continue;
if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch,
VisitedBlocks))
return false;
}
return true;
}
示例3: allSuccessorsReachEndCatch
static bool
allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin,
IntrinsicInst **SecondBeginCatch,
SmallSet<BasicBlock *, 4> &VisitedBlocks) {
VisitedBlocks.insert(BB);
for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) {
IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I);
if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch)
return true;
// If we find another begincatch while looking for an endcatch,
// that's also an error.
if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) {
*SecondBeginCatch = IC;
return false;
}
}
// If we reach a block with no successors while searching, the
// search has failed.
if (succ_empty(BB))
return false;
// Otherwise, search all of the successors.
for (BasicBlock *Succ : successors(BB)) {
if (VisitedBlocks.count(Succ))
continue;
if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch,
VisitedBlocks))
return false;
}
return true;
}
示例4: OptimizeCallInst
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
BasicBlock *BB = CI->getParent();
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
// Avoid processing instructions out of order, which could cause
// reuse before a value is defined.
SunkAddrs.clear();
return true;
}
// Sink address computing for memory operands into the block.
if (OptimizeInlineAsmInst(CI))
return true;
}
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
// Substituting this can cause recursive simplifications, which can
// invalidate our iterator. Use a WeakVH to hold onto it in case this
// happens.
WeakVH IterHandle(CurInstIterator);
ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
if (IterHandle != CurInstIterator) {
CurInstIterator = BB->begin();
SunkAddrs.clear();
}
return true;
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
示例5: handle_instrinsic
void AAAnalyzer::handle_instrinsic(Instruction *inst) {
IntrinsicInst * call = (IntrinsicInst*) inst;
switch (call->getIntrinsicID()) {
// Variable Argument Handling Intrinsics
case Intrinsic::vastart:
{
Value * va_list_ptr = call->getArgOperand(0);
wrapValue(va_list_ptr);
}
break;
case Intrinsic::vaend:
{
}
break;
case Intrinsic::vacopy: // the same with memmove/memcpy
//Standard C Library Intrinsics
case Intrinsic::memmove:
case Intrinsic::memcpy:
{
Value * src_ptr = call->getArgOperand(0);
Value * dst_ptr = call->getArgOperand(1);
DyckVertex* src_ptr_ver = wrapValue(src_ptr);
DyckVertex* dst_ptr_ver = wrapValue(dst_ptr);
DyckVertex* src_ver = addPtrTo(src_ptr_ver, NULL);
DyckVertex* dst_ver = addPtrTo(dst_ptr_ver, NULL);
makeAlias(src_ver, dst_ver);
}
break;
case Intrinsic::memset:
{
Value * ptr = call->getArgOperand(0);
Value * val = call->getArgOperand(1);
addPtrTo(wrapValue(ptr), wrapValue(val));
}
break;
/// @todo other C lib intrinsics
//Accurate Garbage Collection Intrinsics
//Code Generator Intrinsics
//Bit Manipulation Intrinsics
//Exception Handling Intrinsics
//Trampoline Intrinsics
//Memory Use Markers
//General Intrinsics
//Arithmetic with Overflow Intrinsics
//Specialised Arithmetic Intrinsics
//Half Precision Floating Point Intrinsics
//Debugger Intrinsics
default:break;
}
}
示例6: assert
bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
IntrinsicInst &I) const {
assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
"I must be bitreverse intrinsic");
assert(needsPromotionToI32(I.getType()) &&
"I does not need promotion to i32");
IRBuilder<> Builder(&I);
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = getI32Ty(Builder, I.getType());
Function *I32 =
Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Value *LShrOp =
Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Value *TruncRes =
Builder.CreateTrunc(LShrOp, I.getType());
I.replaceAllUsesWith(TruncRes);
I.eraseFromParent();
return true;
}
示例7: switch
/// getStoredPointerOperand - Return the pointer that is being written to.
static Value *getStoredPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getDest();
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return II->getArgOperand(0);
}
}
示例8: isShortenable
/// isShortenable - Returns true if this instruction can be safely shortened in
/// length.
static bool isShortenable(Instruction *I) {
// Don't shorten stores for now
if (isa<StoreInst>(I))
return false;
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: return false;
case Intrinsic::memset:
case Intrinsic::memcpy:
// Do shorten memory intrinsics.
return true;
}
}
示例9: switch
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
Instruction *Addr) const {
AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
Function *Func = (*Blocks.begin())->getParent();
for (BasicBlock &BB : *Func) {
if (Blocks.count(&BB))
continue;
for (Instruction &II : BB) {
if (isa<DbgInfoIntrinsic>(II))
continue;
unsigned Opcode = II.getOpcode();
Value *MemAddr = nullptr;
switch (Opcode) {
case Instruction::Store:
case Instruction::Load: {
if (Opcode == Instruction::Store) {
StoreInst *SI = cast<StoreInst>(&II);
MemAddr = SI->getPointerOperand();
} else {
LoadInst *LI = cast<LoadInst>(&II);
MemAddr = LI->getPointerOperand();
}
// Global variable can not be aliased with locals.
if (dyn_cast<Constant>(MemAddr))
break;
Value *Base = MemAddr->stripInBoundsConstantOffsets();
if (!dyn_cast<AllocaInst>(Base) || Base == AI)
return false;
break;
}
default: {
IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
if (IntrInst) {
if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
break;
return false;
}
// Treat all the other cases conservatively if it has side effects.
if (II.mayHaveSideEffects())
return false;
}
}
}
}
return true;
}
示例10: switch
bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::bitreverse:
return visitBitreverseIntrinsicInst(I);
default:
return false;
}
}
示例11: FindAllCleanupSelectors
/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
void DwarfEHPrepare::
FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
for (Value::use_iterator
I = SelectorIntrinsic->use_begin(),
E = SelectorIntrinsic->use_end(); I != E; ++I) {
IntrinsicInst *II = cast<IntrinsicInst>(*I);
if (II->getParent()->getParent() != F)
continue;
if (!HasCatchAllInSelector(II))
Sels.insert(II);
else
CatchAllSels.insert(II);
}
}
示例12: switch
// TODO: Ideally we should share Inliner's InlineCost Analysis code.
// For now use a simplified version. The returned 'InlineCost' will be used
// to esimate the size cost as well as runtime cost of the BB.
int PartialInlinerImpl::computeBBInlineCost(BasicBlock *BB) {
int InlineCost = 0;
const DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
if (isa<DbgInfoIntrinsic>(I))
continue;
switch (I->getOpcode()) {
case Instruction::BitCast:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::Alloca:
continue;
case Instruction::GetElementPtr:
if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
continue;
default:
break;
}
IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(I);
if (IntrInst) {
if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
continue;
}
if (CallInst *CI = dyn_cast<CallInst>(I)) {
InlineCost += getCallsiteCost(CallSite(CI), DL);
continue;
}
if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
InlineCost += getCallsiteCost(CallSite(II), DL);
continue;
}
if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
InlineCost += (SI->getNumCases() + 1) * InlineConstants::InstrCost;
continue;
}
InlineCost += InlineConstants::InstrCost;
}
return InlineCost;
}
示例13: visitBitreverseIntrinsicInst
bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
bool Changed = false;
if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
DA->isUniform(&I))
Changed |= promoteUniformBitreverseToI32(I);
return Changed;
}
示例14: if
/// \brief Split sadd.with.overflow into add + sadd.with.overflow to allow
/// analysis and optimization.
///
/// \return A new value representing the non-overflowing add if possible,
/// otherwise return the original value.
Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
const DominatorTree *DT) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(IVUser);
if (!II || II->getIntrinsicID() != Intrinsic::sadd_with_overflow)
return IVUser;
// Find a branch guarded by the overflow check.
BranchInst *Branch = 0;
Instruction *AddVal = 0;
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
UI != E; ++UI) {
if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(*UI)) {
if (ExtractInst->getNumIndices() != 1)
continue;
if (ExtractInst->getIndices()[0] == 0)
AddVal = ExtractInst;
else if (ExtractInst->getIndices()[0] == 1 && ExtractInst->hasOneUse())
Branch = dyn_cast<BranchInst>(ExtractInst->use_back());
}
}
if (!AddVal || !Branch)
return IVUser;
BasicBlock *ContinueBB = Branch->getSuccessor(1);
if (llvm::next(pred_begin(ContinueBB)) != pred_end(ContinueBB))
return IVUser;
// Check if all users of the add are provably NSW.
bool AllNSW = true;
for (Value::use_iterator UI = AddVal->use_begin(), E = AddVal->use_end();
UI != E; ++UI) {
if (Instruction *UseInst = dyn_cast<Instruction>(*UI)) {
BasicBlock *UseBB = UseInst->getParent();
if (PHINode *PHI = dyn_cast<PHINode>(UseInst))
UseBB = PHI->getIncomingBlock(UI);
if (!DT->dominates(ContinueBB, UseBB)) {
AllNSW = false;
break;
}
}
}
if (!AllNSW)
return IVUser;
// Go for it...
IRBuilder<> Builder(IVUser);
Instruction *AddInst = dyn_cast<Instruction>(
Builder.CreateNSWAdd(II->getOperand(0), II->getOperand(1)));
// The caller expects the new add to have the same form as the intrinsic. The
// IV operand position must be the same.
assert((AddInst->getOpcode() == Instruction::Add &&
AddInst->getOperand(0) == II->getOperand(0)) &&
"Bad add instruction created from overflow intrinsic.");
AddVal->replaceAllUsesWith(AddInst);
DeadInsts.push_back(AddVal);
return AddInst;
}
示例15: isCallPromotable
static bool isCallPromotable(CallInst *CI) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (!II)
return false;
switch (II->getIntrinsicID()) {
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::invariant_group_barrier:
case Intrinsic::objectsize:
return true;
default:
return false;
}
}