本文整理汇总了C++中GetElementPtrInst类的典型用法代码示例。如果您正苦于以下问题:C++ GetElementPtrInst类的具体用法?C++ GetElementPtrInst怎么用?C++ GetElementPtrInst使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GetElementPtrInst类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: isGEPOffsetConstant
/// \brief Check whether a GEP's indices are all constant.
///
/// Respects any simplified values known during the analysis of this callsite.
bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
return false;
return true;
}
示例2: visitGetElementPtrInst
void visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *pointerOperand = GEP.getPointerOperand();
DSGraph * TDG = budsPass->getDSGraph(*(GEP.getParent()->getParent()));
DSNode *DSN = TDG->getNodeForValue(pointerOperand).getNode();
//FIXME DO we really need this ? markReachableAllocas(DSN);
if (DSN && DSN->isAllocaNode() && !DSN->isNodeCompletelyFolded()) {
unsafeAllocaNodes.push_back(DSN);
}
}
示例3: assert
void ModuloScheduler::findLoopCarriedMemoryAccesses(
RAM *globalRAM, std::map<Instruction *, MEM_ACCESS> &memAccessMap,
std::map<RAM *, std::vector<MEM_ACCESS>> &memoryAccesses) {
assert(alloc);
// add additional memory constraints for local memory read/writes
for (BasicBlock::iterator I = BB->begin(), ie = BB->end(); I != ie; I++) {
Value *addr = NULL;
std::string memtype;
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
addr = L->getPointerOperand();
memtype = "load";
} else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
addr = S->getPointerOperand();
memtype = "store";
} else {
continue;
}
RAM *ram;
if (LEGUP_CONFIG->getParameterInt("LOCAL_RAMS")) {
ram = alloc->getLocalRamFromValue(addr);
} else {
ram = globalRAM;
}
if (!ram)
continue;
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(addr);
if (!GEP)
continue;
Value *offset = GEP->getOperand(2);
MEM_ACCESS access;
access.I = I;
access.ram = ram;
int indexOffset = 0;
if (findInductionOffset(offset, ram,
loop->getCanonicalInductionVariable(), memtype,
&indexOffset)) {
// found an offset to the induction variable
access.type = MEM_ACCESS::InductionOffset;
access.offset = indexOffset;
} else {
access.type = MEM_ACCESS::Address;
access.ptr = GEP;
}
memoryAccesses[ram].push_back(access);
memAccessMap[I] = access;
}
}
示例4: TEST
TEST(CloneInstruction, Inbounds) {
LLVMContext context;
Value *V = new Argument(Type::getInt32PtrTy(context));
Constant *Z = Constant::getNullValue(Type::getInt32Ty(context));
std::vector<Value *> ops;
ops.push_back(Z);
GetElementPtrInst *GEP = GetElementPtrInst::Create(V, ops.begin(), ops.end());
EXPECT_FALSE(cast<GetElementPtrInst>(GEP->clone())->isInBounds());
GEP->setIsInBounds();
EXPECT_TRUE(cast<GetElementPtrInst>(GEP->clone())->isInBounds());
}
示例5: DEBUG
void smtit::performTest1() {
for (Module::iterator FI = Mod->begin(), FE = Mod->end(); FI != FE; ++FI) {
Function *Func = &*FI;
// DEBUG(errs() << *Func << "\n");
for (Function::iterator BI = Func->begin(), BE = Func->end(); BI != BE;
++BI) {
BasicBlock *BB = &*BI;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
Instruction *BBI = &*I;
//if (true == isa<StoreInst>(BBI)) {
if (true == isa<LoadInst>(BBI)) {
LoadInst *li = dyn_cast<LoadInst>(BBI);
Value *ptrOp = li->getPointerOperand();
DEBUG(errs() << *li << "\t Result Name: " << li->getName() << "\t Pointer Name: " << ptrOp->getName() << "\n");
// DEBUG(errs() << "\tStore Instruction: " << *BBI << " \n");
// DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(SI->getType())
// << " \n");
// Instruction* V = cast<Instruction>(SI->getOperand(1));
// DEBUG(errs() << "\tOperand : " << *V << " \n");
// DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(V->getType())
// << " \n");
} else if(true == isa<GetElementPtrInst>(BBI)) {
GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(BBI);
DEBUG(errs() << *gep << "\t Result Name: " << gep->getName() << "\n");
// DEBUG(errs() << "\tInstruction: " << *BBI << " \n");
// DEBUG(errs() << "\t\tPointerType: " <<
// isLLVMPAPtrTy(BBI->getType()) << " \n");
}
// For def-use chains: All the uses of the definition
//DEBUG(errs() << *BBI << "\n");
/*
for (User *U : BBI->users()) {
if (Instruction *Inst = dyn_cast<Instruction>(U)) {
DEBUG(errs()<< " " << *Inst << "\n");
}
}
for (Value::user_iterator i = BBI->user_begin(), e = BBI->user_end();
i != e; ++i) {
if (Instruction *user_inst = dyn_cast<Instruction>(*i)) {
DEBUG(errs()<< " " << *user_inst << "\n");
}
}
*/
}
}
}
}
示例6: assert
unordered_multimap<const char*, Value*>& ArgumentRecovery::exposeAllRegisters(llvm::Function* fn)
{
auto iter = registerAddresses.find(fn);
if (iter != registerAddresses.end())
{
return iter->second;
}
auto& addresses = registerAddresses[fn];
if (fn->isDeclaration())
{
// If a function has no body, it doesn't need a register map.
return addresses;
}
Argument* firstArg = fn->arg_begin();
assert(isStructType(firstArg));
// Get explicitly-used GEPs
const auto& target = getAnalysis<TargetInfo>();
for (User* user : firstArg->users())
{
if (auto gep = dyn_cast<GetElementPtrInst>(user))
{
const char* name = target.registerName(*gep);
const char* largestRegister = target.largestOverlappingRegister(name);
addresses.insert({largestRegister, gep});
}
}
// Synthesize GEPs for implicitly-used registers.
// Implicit uses are when a function callee uses a register without there being a reference in the caller.
// This happens either because the parameter is passed through, or because the register is a scratch register that
// the caller doesn't use itself.
auto insertionPoint = fn->begin()->begin();
auto& regUse = getAnalysis<RegisterUse>();
const auto& modRefInfo = *regUse.getModRefInfo(fn);
for (const auto& pair : modRefInfo)
{
if ((pair.second & RegisterUse::ModRef) != 0 && addresses.find(pair.first) == addresses.end())
{
// Need a GEP here, because the function ModRefs the register implicitly.
GetElementPtrInst* synthesizedGep = target.getRegister(firstArg, pair.first);
synthesizedGep->insertBefore(insertionPoint);
addresses.insert({pair.first, synthesizedGep});
}
}
return addresses;
}
示例7: getGEPInductionOperand
/// If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (!GEP)
return Ptr;
unsigned InductionOperand = getGEPInductionOperand(GEP);
// Check that all of the gep indices are uniform except for our induction
// operand.
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
if (i != InductionOperand &&
!SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
return Ptr;
return GEP->getOperand(InductionOperand);
}
示例8: preprocess
//
// Method: preprocess()
//
// Description:
// %p = bitcast %p1 to T1
// gep(%p) ...
// ->
// gep (bitcast %p1 to T1), ...
//
// Inputs:
// M - A reference to the LLVM module to process
//
// Outputs:
// M - The transformed LLVM module.
//
static void preprocess(Module& M) {
for (Module::iterator F = M.begin(); F != M.end(); ++F){
for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {
for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) {
if(!(isa<GetElementPtrInst>(I)))
continue;
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
if(BitCastInst *BI = dyn_cast<BitCastInst>(GEP->getOperand(0))) {
if(Constant *C = dyn_cast<Constant>(BI->getOperand(0))) {
GEP->setOperand(0, ConstantExpr::getBitCast(C, BI->getType()));
}
}
}
}
}
}
示例9: visitGetElementPtr
bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
Value *SROAArg;
DenseMap<Value *, int>::iterator CostIt;
bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
SROAArg, CostIt);
// Try to fold GEPs of constant-offset call site argument pointers. This
// requires target data and inbounds GEPs.
if (TD && I.isInBounds()) {
// Check if we have a base + offset for the pointer.
Value *Ptr = I.getPointerOperand();
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
if (BaseAndOffset.first) {
// Check if the offset of this GEP is constant, and if so accumulate it
// into Offset.
if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
// Non-constant GEPs aren't folded, and disable SROA.
if (SROACandidate)
disableSROA(CostIt);
return false;
}
// Add the result as a new mapping to Base + Offset.
ConstantOffsetPtrs[&I] = BaseAndOffset;
// Also handle SROA candidates here, we already know that the GEP is
// all-constant indexed.
if (SROACandidate)
SROAArgValues[&I] = SROAArg;
return true;
}
}
if (isGEPOffsetConstant(I)) {
if (SROACandidate)
SROAArgValues[&I] = SROAArg;
// Constant GEPs are modeled as free.
return true;
}
// Variable GEPs will require math and will disable SROA.
if (SROACandidate)
disableSROA(CostIt);
return false;
}
示例10: utccAbort
// -- handle GetElementPtr instruction --
void UnsafeTypeCastingCheck::handleGetElementPtrInstruction (Instruction *inst) {
GetElementPtrInst * ginst = dyn_cast<GetElementPtrInst>(inst);
if (ginst == NULL)
utccAbort("handleGetElementPtrInstruction cannot process with a non-getelementptr instruction");
Value *pt = ginst->getPointerOperand();
UTCC_TYPE pt_ut_self = UH_UT;
UTCC_TYPE pt_ut_base = UH_UT;
UTCC_TYPE pt_ut_element = llvmT2utccT(ginst->getType()->getPointerElementType(), ginst);
if (isVisitedPointer(ginst)) pt_ut_self = queryPointedType(ginst);
if (isVisitedPointer(pt)) pt_ut_base = queryPointedType(pt);
setPointedType(ginst, utSaturate(pt_ut_element,
utSaturate(pt_ut_self, pt_ut_base)));
setExprType(ginst, llvmT2utccT(ginst->getType(), ginst));
}
示例11: Builder
bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
if (!VT)
return false;
IRBuilder<> Builder(&GEPI);
unsigned NumElems = VT->getNumElements();
unsigned NumIndices = GEPI.getNumIndices();
Scatterer Base = scatter(&GEPI, GEPI.getOperand(0));
SmallVector<Scatterer, 8> Ops;
Ops.resize(NumIndices);
for (unsigned I = 0; I < NumIndices; ++I)
Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1));
ValueVector Res;
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I) {
SmallVector<Value *, 8> Indices;
Indices.resize(NumIndices);
for (unsigned J = 0; J < NumIndices; ++J)
Indices[J] = Ops[J][I];
Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
GEPI.getName() + ".i" + Twine(I));
if (GEPI.isInBounds())
if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
NewGEPI->setIsInBounds();
}
gather(&GEPI, Res);
return true;
}
示例12: visitGetElementPtrInst
void ArrayIndexChecker::visitGetElementPtrInst(GetElementPtrInst& I) {
DEBUG(dbgs() << "ArrayIndexChecker: visiting GEP " << I << "\n");
visitValue(*I.getPointerOperand());
for (auto Idx = I.idx_begin(), E = I.idx_end(); Idx != E; ++Idx) {
visitValue(**Idx);
}
auto pos = std::find(ptr_value_vec_.begin(), ptr_value_vec_.end(), &I);
assert(pos != ptr_value_vec_.end());
index_t varIdx = pos - ptr_value_vec_.begin();
assert(idx2addr_.find(varIdx) != idx2addr_.end());
if (addr2version_[idx2addr_[varIdx]] != 0)
throw ArrayIndexIsNotConstant;;
DEBUG(dbgs() << "ArrayIndexChecker: visited GEP\n");
}
示例13: assert
// Returns a clone of `I` with its operands converted to those specified in
// ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
// operand whose address space needs to be modified might not exist in
// ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
// adds that operand use to UndefUsesToFix so that caller can fix them later.
//
// Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
// from a pointer whose type already matches. Therefore, this function returns a
// Value* instead of an Instruction*.
static Value *cloneInstructionWithNewAddressSpace(
Instruction *I, unsigned NewAddrSpace,
const ValueToValueMapTy &ValueWithNewAddrSpace,
SmallVectorImpl<const Use *> *UndefUsesToFix) {
Type *NewPtrType =
I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
if (I->getOpcode() == Instruction::AddrSpaceCast) {
Value *Src = I->getOperand(0);
// Because `I` is flat, the source address space must be specific.
// Therefore, the inferred address space must be the source space, according
// to our algorithm.
assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
if (Src->getType() != NewPtrType)
return new BitCastInst(Src, NewPtrType);
return Src;
}
// Computes the converted pointer operands.
SmallVector<Value *, 4> NewPointerOperands;
for (const Use &OperandUse : I->operands()) {
if (!OperandUse.get()->getType()->isPointerTy())
NewPointerOperands.push_back(nullptr);
else
NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
}
switch (I->getOpcode()) {
case Instruction::BitCast:
return new BitCastInst(NewPointerOperands[0], NewPtrType);
case Instruction::PHI: {
assert(I->getType()->isPointerTy());
PHINode *PHI = cast<PHINode>(I);
PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
NewPHI->addIncoming(NewPointerOperands[OperandNo],
PHI->getIncomingBlock(Index));
}
return NewPHI;
}
case Instruction::GetElementPtr: {
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
GEP->getSourceElementType(), NewPointerOperands[0],
SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
NewGEP->setIsInBounds(GEP->isInBounds());
return NewGEP;
}
case Instruction::Select: {
assert(I->getType()->isPointerTy());
return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
NewPointerOperands[2], "", nullptr, I);
}
default:
llvm_unreachable("Unexpected opcode");
}
}
示例14:
/*
* Very sloppy implementation for quick prototyping
* // TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now
*/
Value *HeteroOMPTransform::find_loop_upper_bound(Value *context) {
// TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now
for (Value::use_iterator i = context->use_begin(), e = context->use_end(); i != e; ++i) {
Instruction *insn = dyn_cast<Instruction>(*i);
GetElementPtrInst *GEP; StoreInst *SI;
if ((GEP = dyn_cast<GetElementPtrInst>(insn)) &&
isa<ConstantInt>(GEP->getOperand(2)) &&
((cast<ConstantInt>(GEP->getOperand(2)))->equalsInt(0))) { /// README:NOTE THE ASSUMPTION THAT THE FIRST ELEMENT IN THE CONTEXT IS MAX ITERATION OF PARALLEL LOOP
for (Value::use_iterator I = insn->use_begin(), E = insn->use_end(); I != E; ++I) {
if ((SI = dyn_cast<StoreInst>(*I))) {
Value *op_0 = SI->getOperand(0);
return op_0;
}
}
}
}
return NULL;
}
示例15: if
/// Determines whether a phi corresponds to an inbounds recurrence where the
/// base is not a known nonnull-or-poison value. Returns the base value, or
/// null if the phi doesn't correspond to such a recurrence.
Value *NullCheckElimination::isNontrivialInBoundsRecurrence(PHINode *PN) {
if (PN->getNumOperands() != 2)
return nullptr;
Value *BaseV;
GetElementPtrInst *SuccessorI;
if (auto *GEP = castToInBoundsGEP(PN->getOperand(0))) {
BaseV = PN->getOperand(1);
SuccessorI = GEP;
} else if (auto *GEP = castToInBoundsGEP(PN->getOperand(1))) {
BaseV = PN->getOperand(0);
SuccessorI = GEP;
} else {
return nullptr;
}
if (NonNullOrPoisonValues.count(BaseV) || SuccessorI->getOperand(0) != PN)
return nullptr;
return BaseV;
}