本文整理汇总了C++中IntrinsicInst::getArgOperand方法的典型用法代码示例。如果您正苦于以下问题:C++ IntrinsicInst::getArgOperand方法的具体用法?C++ IntrinsicInst::getArgOperand怎么用?C++ IntrinsicInst::getArgOperand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IntrinsicInst
的用法示例。
在下文中一共展示了IntrinsicInst::getArgOperand方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getLocForWrite
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
if (!II)
return MemoryLocation();
switch (II->getIntrinsicID()) {
default:
return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
示例2: handle_instrinsic
void AAAnalyzer::handle_instrinsic(Instruction *inst) {
IntrinsicInst * call = (IntrinsicInst*) inst;
switch (call->getIntrinsicID()) {
// Variable Argument Handling Intrinsics
case Intrinsic::vastart:
{
Value * va_list_ptr = call->getArgOperand(0);
wrapValue(va_list_ptr);
}
break;
case Intrinsic::vaend:
{
}
break;
case Intrinsic::vacopy: // the same with memmove/memcpy
//Standard C Library Intrinsics
case Intrinsic::memmove:
case Intrinsic::memcpy:
{
Value * src_ptr = call->getArgOperand(0);
Value * dst_ptr = call->getArgOperand(1);
DyckVertex* src_ptr_ver = wrapValue(src_ptr);
DyckVertex* dst_ptr_ver = wrapValue(dst_ptr);
DyckVertex* src_ver = addPtrTo(src_ptr_ver, NULL);
DyckVertex* dst_ver = addPtrTo(dst_ptr_ver, NULL);
makeAlias(src_ver, dst_ver);
}
break;
case Intrinsic::memset:
{
Value * ptr = call->getArgOperand(0);
Value * val = call->getArgOperand(1);
addPtrTo(wrapValue(ptr), wrapValue(val));
}
break;
/// @todo other C lib intrinsics
//Accurate Garbage Collection Intrinsics
//Code Generator Intrinsics
//Bit Manipulation Intrinsics
//Exception Handling Intrinsics
//Trampoline Intrinsics
//Memory Use Markers
//General Intrinsics
//Arithmetic with Overflow Intrinsics
//Specialised Arithmetic Intrinsics
//Half Precision Floating Point Intrinsics
//Debugger Intrinsics
default:break;
}
}
示例3: OptimizeCallInst
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
const Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
CI->replaceAllUsesWith(RetVal);
CI->eraseFromParent();
return true;
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
示例4: if
static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) {
if (!Op->hasOneUse())
return;
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op);
if (!II)
return;
if (II->getIntrinsicID() != Intrinsic::log2 || !II->hasUnsafeAlgebra())
return;
Log2 = II;
Value *OpLog2Of = II->getArgOperand(0);
if (!OpLog2Of->hasOneUse())
return;
Instruction *I = dyn_cast<Instruction>(OpLog2Of);
if (!I)
return;
if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
return;
if (match(I->getOperand(0), m_SpecificFP(0.5)))
Y = I->getOperand(1);
else if (match(I->getOperand(1), m_SpecificFP(0.5)))
Y = I->getOperand(0);
}
示例5: OptimizeCallInst
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
BasicBlock *BB = CI->getParent();
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
// Avoid processing instructions out of order, which could cause
// reuse before a value is defined.
SunkAddrs.clear();
return true;
}
// Sink address computing for memory operands into the block.
if (OptimizeInlineAsmInst(CI))
return true;
}
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
// Substituting this can cause recursive simplifications, which can
// invalidate our iterator. Use a WeakVH to hold onto it in case this
// happens.
WeakVH IterHandle(CurInstIterator);
ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
if (IterHandle != CurInstIterator) {
CurInstIterator = BB->begin();
SunkAddrs.clear();
}
return true;
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
示例6: switch
/// getStoredPointerOperand - Return the pointer that is being written to.
static Value *getStoredPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getDest();
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return II->getArgOperand(0);
}
}
示例7: Location
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
static AliasAnalysis::Location
getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
const DataLayout *DL = AA.getDataLayout();
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return AA.getLocation(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
return AliasAnalysis::Location();
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
if (!II) return AliasAnalysis::Location();
switch (II->getIntrinsicID()) {
default: return AliasAnalysis::Location(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
if (!DL) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
return AliasAnalysis::Location(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
return AliasAnalysis::Location(II->getArgOperand(1), Len);
}
}
}
示例8: CleanupSelectors
/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
/// the "llvm.eh.catch.all.value" call need to convert to using its
/// initializer instead.
bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
if (!EHCatchAllValue) return false;
if (!SelectorIntrinsic) {
SelectorIntrinsic =
Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
if (!SelectorIntrinsic) return false;
}
bool Changed = false;
for (SmallPtrSet<IntrinsicInst*, 32>::iterator
I = Sels.begin(), E = Sels.end(); I != E; ++I) {
IntrinsicInst *Sel = *I;
// Index of the "llvm.eh.catch.all.value" variable.
unsigned OpIdx = Sel->getNumArgOperands() - 1;
GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
if (GV != EHCatchAllValue) continue;
Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
Changed = true;
}
return Changed;
}
示例9: getModRefInfo
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
const Value *Object = P->getUnderlyingObject();
// If this is a tail call and P points to a stack location, we know that
// the tail call cannot access or modify the local stack.
// We cannot exclude byval arguments here; these belong to the caller of
// the current function not to the current function, and a tail callee
// may reference them.
if (isa<AllocaInst>(Object))
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
return NoModRef;
// If we can identify an object and it's known to be within the
// same function as the call, we can ignore interprocedural concerns.
bool EffectivelyInterprocedural =
Interprocedural && !sameParent(Object, CS.getInstruction());
// If the pointer is to a locally allocated object that does not escape,
// then the call can not mod/ref the pointer unless the call takes the pointer
// as an argument, and itself doesn't capture it.
if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
!EffectivelyInterprocedural &&
isNonEscapingLocalObject(Object)) {
bool PassedAsArg = false;
unsigned ArgNo = 0;
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture pointer arguments.
if (!(*CI)->getType()->isPointerTy() ||
!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
continue;
// If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
PassedAsArg = true;
break;
}
}
if (!PassedAsArg)
return NoModRef;
}
// Finally, handle specific knowledge of intrinsics.
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
if (II == 0)
return AliasAnalysis::getModRefInfo(CS, P, Size);
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::memcpy:
case Intrinsic::memmove: {
unsigned Len = ~0U;
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
Len = LenCI->getZExtValue();
Value *Dest = II->getArgOperand(0);
Value *Src = II->getArgOperand(1);
if (isNoAlias(Dest, Len, P, Size)) {
if (isNoAlias(Src, Len, P, Size))
return NoModRef;
return Ref;
}
break;
}
case Intrinsic::memset:
// Since memset is 'accesses arguments' only, the AliasAnalysis base class
// will handle it for the variable length case.
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
unsigned Len = LenCI->getZExtValue();
Value *Dest = II->getArgOperand(0);
if (isNoAlias(Dest, Len, P, Size))
return NoModRef;
}
break;
case Intrinsic::atomic_cmp_swap:
case Intrinsic::atomic_swap:
case Intrinsic::atomic_load_add:
case Intrinsic::atomic_load_sub:
case Intrinsic::atomic_load_and:
case Intrinsic::atomic_load_nand:
case Intrinsic::atomic_load_or:
case Intrinsic::atomic_load_xor:
case Intrinsic::atomic_load_max:
case Intrinsic::atomic_load_min:
case Intrinsic::atomic_load_umax:
case Intrinsic::atomic_load_umin:
if (TD) {
Value *Op1 = II->getArgOperand(0);
unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
if (isNoAlias(Op1, Op1Size, P, Size))
return NoModRef;
//.........这里部分代码省略.........
示例10: decomposeIntrinsics
//.........这里部分代码省略.........
for (int c = 1; c < GetComponentCount(arg0); ++c) {
llvm::Value* comp = builder.CreateExtractElement(arg0, MakeUnsignedConstant(module->getContext(), c));
newInst = builder.CreateOr(newInst, comp);
}
}
break;
case Intrinsic::gla_all:
if (backEnd->decomposeIntrinsic(EDiAll)) {
if (GetComponentCount(arg0) == 1)
UnsupportedFunctionality("all() on a scalar");
newInst = builder.CreateExtractElement(arg0, MakeUnsignedConstant(module->getContext(), 0));
for (int c = 1; c < GetComponentCount(arg0); ++c) {
llvm::Value* comp = builder.CreateExtractElement(arg0, MakeUnsignedConstant(module->getContext(), c));
newInst = builder.CreateAnd(newInst, comp);
}
}
break;
case Intrinsic::gla_not:
if (backEnd->decomposeIntrinsic(EDiNot)) {
if (GetComponentCount(arg0) == 1)
UnsupportedFunctionality("not() on a scalar");
newInst = builder.CreateNot(arg0);
}
break;
case Intrinsic::gla_fTextureSample:
case Intrinsic::gla_fTextureSampleLodRefZ:
case Intrinsic::gla_fTextureSampleLodRefZOffset:
case Intrinsic::gla_fTextureSampleLodRefZOffsetGrad:
if (backEnd->decomposeIntrinsic(EDiTextureProjection)) {
// if projection flag is set, divide all coordinates (and refZ) by projection
int texFlags = GetConstantInt(intrinsic->getArgOperand(GetTextureOpIndex(ETOFlag)));
if (texFlags & ETFProjected) {
// insert before intrinsic since we are not replacing it
builder.SetInsertPoint(inst);
// turn off projected flag to reflect decomposition
texFlags &= ~ETFProjected;
llvm::Value* coords = intrinsic->getArgOperand(GetTextureOpIndex(ETOCoord));
// determine how many channels are live after decomposition
int newCoordWidth = 0;
switch (GetConstantInt(intrinsic->getArgOperand(gla::ETOSamplerType))) {
case gla::ESamplerBuffer:
case gla::ESampler1D: newCoordWidth = 1; break;
case gla::ESampler2D:
case gla::ESampler2DRect:
case gla::ESampler2DMS: newCoordWidth = 2; break;
case gla::ESampler3D: newCoordWidth = 3; break;
case gla::ESamplerCube:
gla::UnsupportedFunctionality("projection with cube sampler");
break;
default:
assert(0 && "Unknown sampler type");
break;
}
if (texFlags & gla::ETFArrayed)
gla::UnsupportedFunctionality("projection with arrayed sampler");
// projection resides in last component
llvm::Value* projIdx = MakeUnsignedConstant(module->getContext(), GetComponentCount(coords) - 1);
示例11: runOnBasicBlock
bool IntrinsicCleanerPass::runOnBasicBlock(BasicBlock &b, Module &M) {
bool dirty = false;
bool block_split=false;
#if LLVM_VERSION_CODE <= LLVM_VERSION(3, 1)
unsigned WordSize = TargetData.getPointerSizeInBits() / 8;
#else
unsigned WordSize = DataLayout.getPointerSizeInBits() / 8;
#endif
for (BasicBlock::iterator i = b.begin(), ie = b.end();
(i != ie) && (block_split == false);) {
IntrinsicInst *ii = dyn_cast<IntrinsicInst>(&*i);
// increment now since LowerIntrinsic deletion makes iterator invalid.
++i;
if(ii) {
switch (ii->getIntrinsicID()) {
case Intrinsic::vastart:
case Intrinsic::vaend:
break;
// Lower vacopy so that object resolution etc is handled by
// normal instructions.
//
// FIXME: This is much more target dependent than just the word size,
// however this works for x86-32 and x86-64.
case Intrinsic::vacopy: { // (dst, src) -> *((i8**) dst) = *((i8**) src)
Value *dst = ii->getArgOperand(0);
Value *src = ii->getArgOperand(1);
if (WordSize == 4) {
Type *i8pp = PointerType::getUnqual(PointerType::getUnqual(Type::getInt8Ty(getGlobalContext())));
Value *castedDst = CastInst::CreatePointerCast(dst, i8pp, "vacopy.cast.dst", ii);
Value *castedSrc = CastInst::CreatePointerCast(src, i8pp, "vacopy.cast.src", ii);
Value *load = new LoadInst(castedSrc, "vacopy.read", ii);
new StoreInst(load, castedDst, false, ii);
} else {
assert(WordSize == 8 && "Invalid word size!");
Type *i64p = PointerType::getUnqual(Type::getInt64Ty(getGlobalContext()));
Value *pDst = CastInst::CreatePointerCast(dst, i64p, "vacopy.cast.dst", ii);
Value *pSrc = CastInst::CreatePointerCast(src, i64p, "vacopy.cast.src", ii);
Value *val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
Value *off = ConstantInt::get(Type::getInt64Ty(getGlobalContext()), 1);
pDst = GetElementPtrInst::Create(pDst, off, std::string(), ii);
pSrc = GetElementPtrInst::Create(pSrc, off, std::string(), ii);
val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
pDst = GetElementPtrInst::Create(pDst, off, std::string(), ii);
pSrc = GetElementPtrInst::Create(pSrc, off, std::string(), ii);
val = new LoadInst(pSrc, std::string(), ii); new StoreInst(val, pDst, ii);
}
ii->removeFromParent();
delete ii;
break;
}
case Intrinsic::sadd_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::smul_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::umul_with_overflow: {
IRBuilder<> builder(ii->getParent(), ii);
Value *op1 = ii->getArgOperand(0);
Value *op2 = ii->getArgOperand(1);
Value *result = 0;
Value *result_ext = 0;
Value *overflow = 0;
unsigned int bw = op1->getType()->getPrimitiveSizeInBits();
unsigned int bw2 = op1->getType()->getPrimitiveSizeInBits()*2;
if ((ii->getIntrinsicID() == Intrinsic::uadd_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::usub_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::umul_with_overflow)) {
Value *op1ext =
builder.CreateZExt(op1, IntegerType::get(M.getContext(), bw2));
Value *op2ext =
builder.CreateZExt(op2, IntegerType::get(M.getContext(), bw2));
Value *int_max_s =
ConstantInt::get(op1->getType(), APInt::getMaxValue(bw));
Value *int_max =
builder.CreateZExt(int_max_s, IntegerType::get(M.getContext(), bw2));
if (ii->getIntrinsicID() == Intrinsic::uadd_with_overflow){
result_ext = builder.CreateAdd(op1ext, op2ext);
} else if (ii->getIntrinsicID() == Intrinsic::usub_with_overflow){
result_ext = builder.CreateSub(op1ext, op2ext);
} else if (ii->getIntrinsicID() == Intrinsic::umul_with_overflow){
result_ext = builder.CreateMul(op1ext, op2ext);
}
overflow = builder.CreateICmpUGT(result_ext, int_max);
} else if ((ii->getIntrinsicID() == Intrinsic::sadd_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::ssub_with_overflow) ||
(ii->getIntrinsicID() == Intrinsic::smul_with_overflow)) {
Value *op1ext =
builder.CreateSExt(op1, IntegerType::get(M.getContext(), bw2));
//.........这里部分代码省略.........