本文整理汇总了C++中ConstantInt类的典型用法代码示例。如果您正苦于以下问题:C++ ConstantInt类的具体用法?C++ ConstantInt怎么用?C++ ConstantInt使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConstantInt类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: switch
int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
switch (I->getOpcode()) {
case Instruction::GetElementPtr:
return getUserCost(I);
case Instruction::Ret:
case Instruction::PHI:
case Instruction::Br: {
return getCFInstrCost(I->getOpcode());
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
TargetTransformInfo::OperandValueKind Op1VK =
getOperandInfo(I->getOperand(0));
TargetTransformInfo::OperandValueKind Op2VK =
getOperandInfo(I->getOperand(1));
SmallVector<const Value*, 2> Operands(I->operand_values());
return getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK,
Op2VK, TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None,
Operands);
}
case Instruction::Select: {
const SelectInst *SI = cast<SelectInst>(I);
Type *CondTy = SI->getCondition()->getType();
return getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy, I);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = I->getOperand(0)->getType();
return getCmpSelInstrCost(I->getOpcode(), ValTy, I->getType(), I);
}
case Instruction::Store: {
const StoreInst *SI = cast<StoreInst>(I);
Type *ValTy = SI->getValueOperand()->getType();
return getMemoryOpCost(I->getOpcode(), ValTy,
SI->getAlignment(),
SI->getPointerAddressSpace(), I);
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(I);
return getMemoryOpCost(I->getOpcode(), I->getType(),
LI->getAlignment(),
LI->getPointerAddressSpace(), I);
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast:
case Instruction::AddrSpaceCast: {
Type *SrcTy = I->getOperand(0)->getType();
return getCastInstrCost(I->getOpcode(), I->getType(), SrcTy, I);
}
case Instruction::ExtractElement: {
const ExtractElementInst * EEI = cast<ExtractElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
// Try to match a reduction sequence (series of shufflevector and vector
// adds followed by a extractelement).
unsigned ReduxOpCode;
Type *ReduxType;
switch (matchVectorSplittingReduction(EEI, ReduxOpCode, ReduxType)) {
case RK_Arithmetic:
return getArithmeticReductionCost(ReduxOpCode, ReduxType,
/*IsPairwiseForm=*/false);
case RK_MinMax:
return getMinMaxReductionCost(
ReduxType, CmpInst::makeCmpResultType(ReduxType),
/*IsPairwiseForm=*/false, /*IsUnsigned=*/false);
case RK_UnsignedMinMax:
return getMinMaxReductionCost(
ReduxType, CmpInst::makeCmpResultType(ReduxType),
//.........这里部分代码省略.........
示例2: switch
unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
if (!TTI)
return -1;
switch (I->getOpcode()) {
case Instruction::GetElementPtr:{
Type *ValTy = I->getOperand(0)->getType()->getPointerElementType();
return TTI->getAddressComputationCost(ValTy);
}
case Instruction::Ret:
case Instruction::PHI:
case Instruction::Br: {
return TTI->getCFInstrCost(I->getOpcode());
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType());
}
case Instruction::Select: {
const SelectInst *SI = cast<SelectInst>(I);
Type *CondTy = SI->getCondition()->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = I->getOperand(0)->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
}
case Instruction::Store: {
const StoreInst *SI = cast<StoreInst>(I);
Type *ValTy = SI->getValueOperand()->getType();
return TTI->getMemoryOpCost(I->getOpcode(), ValTy,
SI->getAlignment(),
SI->getPointerAddressSpace());
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(I);
return TTI->getMemoryOpCost(I->getOpcode(), I->getType(),
LI->getAlignment(),
LI->getPointerAddressSpace());
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast: {
Type *SrcTy = I->getOperand(0)->getType();
return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
}
case Instruction::ExtractElement: {
const ExtractElementInst * EEI = cast<ExtractElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return TTI->getVectorInstrCost(I->getOpcode(),
EEI->getOperand(0)->getType(), Idx);
}
case Instruction::InsertElement: {
const InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return TTI->getVectorInstrCost(I->getOpcode(),
IE->getType(), Idx);
}
case Instruction::ShuffleVector: {
const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
Type *VecTypOp0 = Shuffle->getOperand(0)->getType();
unsigned NumVecElems = VecTypOp0->getVectorNumElements();
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (NumVecElems == Mask.size() && isReverseVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, 0,
0);
//.........这里部分代码省略.........
示例3: RemapInstruction
/// CloneBlock - The specified block is found to be reachable, clone it and
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
WeakVH &BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
// Nope, clone it now.
BasicBlock *NewBB;
BBEntry = NewBB = BasicBlock::Create(BB->getContext());
if (BB->hasName()) NewBB->setName(BB->getName()+NameSuffix);
// It is only legal to clone a function if a block address within that
// function is never referenced outside of the function. Given that, we
// want to map block addresses from the old function to block addresses in
// the clone. (This is different from the generic ValueMapper
// implementation, which generates an invalid blockaddress when
// cloning a function.)
//
// Note that we don't need to fix the mapping for unreachable blocks;
// the default mapping there is safe.
if (BB->hasAddressTaken()) {
Constant *OldBBAddr = BlockAddress::get(const_cast<Function*>(OldFunc),
const_cast<BasicBlock*>(BB));
VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB);
}
bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
// Loop over all instructions, and copy them over, DCE'ing as we go. This
// loop doesn't include the terminator.
for (BasicBlock::const_iterator II = BB->begin(), IE = --BB->end();
II != IE; ++II) {
Instruction *NewInst = II->clone();
// Eagerly remap operands to the newly cloned instruction, except for PHI
// nodes for which we defer processing until we update the CFG.
if (!isa<PHINode>(NewInst)) {
RemapInstruction(NewInst, VMap,
ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
// If we can simplify this instruction to some other value, simply add
// a mapping to that value rather than inserting a new instruction into
// the basic block.
if (Value *V = SimplifyInstruction(NewInst, TD)) {
// On the off-chance that this simplifies to an instruction in the old
// function, map it back into the new function.
if (Value *MappedV = VMap.lookup(V))
V = MappedV;
VMap[II] = V;
delete NewInst;
continue;
}
}
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
VMap[II] = NewInst; // Add instruction map to value.
NewBB->getInstList().push_back(NewInst);
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
if (isa<ConstantInt>(AI->getArraySize()))
hasStaticAllocas = true;
else
hasDynamicAllocas = true;
}
}
// Finally, clone over the terminator.
const TerminatorInst *OldTI = BB->getTerminator();
bool TerminatorDone = false;
if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) {
if (BI->isConditional()) {
// If the condition was a known constant in the callee...
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
if (Cond == 0) {
Value *V = VMap[BI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
// Constant fold to uncond branch!
if (Cond) {
BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue());
VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
}
} else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) {
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
if (Cond == 0) { // Or known constant after constant prop in the callee...
Value *V = VMap[SI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
//.........这里部分代码省略.........
示例4: isObjectSizeLessThanOrEq
// If we can determine that all possible objects pointed to by the provided
// pointer value are, not only dereferenceable, but also definitively less than
// or equal to the provided maximum size, then return true. Otherwise, return
// false (constant global values and allocas fall into this category).
//
// FIXME: This should probably live in ValueTracking (or similar).
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
const DataLayout &DL) {
SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist(1, V);
do {
Value *P = Worklist.pop_back_val();
P = P->stripPointerCasts();
if (!Visited.insert(P).second)
continue;
if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
Worklist.push_back(SI->getTrueValue());
Worklist.push_back(SI->getFalseValue());
continue;
}
if (PHINode *PN = dyn_cast<PHINode>(P)) {
for (Value *IncValue : PN->incoming_values())
Worklist.push_back(IncValue);
continue;
}
if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
if (GA->mayBeOverridden())
return false;
Worklist.push_back(GA->getAliasee());
continue;
}
// If we know how big this object is, and it is less than MaxSize, continue
// searching. Otherwise, return false.
if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
if (!AI->getAllocatedType()->isSized())
return false;
ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
if (!CS)
return false;
uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
// Make sure that, even if the multiplication below would wrap as an
// uint64_t, we still do the right thing.
if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
return false;
continue;
}
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
return false;
uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
if (InitSize > MaxSize)
return false;
continue;
}
return false;
} while (!Worklist.empty());
return true;
}
示例5: getPredicateResult
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
LVILatticeVal &Result,
const DataLayout &DL,
TargetLibraryInfo *TLI) {
// If we know the value is a constant, evaluate the conditional.
Constant *Res = nullptr;
if (Result.isConstant()) {
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
return LazyValueInfo::Unknown;
}
if (Result.isConstantRange()) {
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
ConstantRange CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;
if (CR.isSingleElement() && CR.contains(CI->getValue()))
return LazyValueInfo::True;
} else if (Pred == ICmpInst::ICMP_NE) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::True;
if (CR.isSingleElement() && CR.contains(CI->getValue()))
return LazyValueInfo::False;
}
// Handle more complex predicates.
ConstantRange TrueValues =
ICmpInst::makeConstantRange((ICmpInst::Predicate)Pred, CI->getValue());
if (TrueValues.contains(CR))
return LazyValueInfo::True;
if (TrueValues.inverse().contains(CR))
return LazyValueInfo::False;
return LazyValueInfo::Unknown;
}
if (Result.isNotConstant()) {
// If this is an equality comparison, we can try to fold it knowing that
// "V != C1".
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return LazyValueInfo::False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return LazyValueInfo::True;
}
return LazyValueInfo::Unknown;
}
return LazyValueInfo::Unknown;
}
示例6: processMemCpyMemCpyDependence
/// processMemCpyMemCpyDependence - We've found that the (upward scanning)
/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
/// copy from MDep's input if we can. MSize is the size of M's copy.
///
bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
uint64_t MSize) {
// We can only transforms memcpy's where the dest of one is the source of the
// other.
if (M->getSource() != MDep->getDest() || MDep->isVolatile())
return false;
// If dep instruction is reading from our current input, then it is a noop
// transfer and substituting the input won't change this instruction. Just
// ignore the input and let someone else zap MDep. This handles cases like:
// memcpy(a <- a)
// memcpy(b <- a)
if (M->getSource() == MDep->getSource())
return false;
// Second, the length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
// Verify that the copied-from memory doesn't change in between the two
// transfers. For example, in:
// memcpy(a <- b)
// *b = 42;
// memcpy(c <- a)
// It would be invalid to transform the second memcpy into memcpy(c <- b).
//
// TODO: If the code between M and MDep is transparent to the destination "c",
// then we could still perform the xform by moving M up to the first memcpy.
//
// NOTE: This is conservative, it will stop on any read from the source loc,
// not just the defining memcpy.
MemDepResult SourceDep =
MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
false, M, M->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
// If the dest of the second might alias the source of the first, then the
// source and dest might overlap. We still want to eliminate the intermediate
// value, but we have to generate a memmove instead of memcpy.
bool UseMemMove = false;
if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
UseMemMove = true;
// If all checks passed, then we can transform M.
// Make sure to use the lesser of the alignment of the source and the dest
// since we're changing where we're reading from, but don't want to increase
// the alignment past what can be read from or written to.
// TODO: Is this worth it if we're creating a less aligned memcpy? For
// example we could be moving from movaps -> movq on x86.
unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
IRBuilder<> Builder(M);
if (UseMemMove)
Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Align, M->isVolatile());
else
Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
Align, M->isVolatile());
// Remove the instruction we're replacing.
MD->removeInstruction(M);
M->eraseFromParent();
++NumMemCpyInstr;
return true;
}
示例7: LLVM_DEBUG
bool InductionDescriptor::isInductionPHI(
PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
InductionDescriptor &D, const SCEV *Expr,
SmallVectorImpl<Instruction *> *CastsToIgnore) {
Type *PhiTy = Phi->getType();
// We only handle integer and pointer inductions variables.
if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
return false;
// Check that the PHI is consecutive.
const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
if (!AR) {
LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
return false;
}
if (AR->getLoop() != TheLoop) {
// FIXME: We should treat this as a uniform. Unfortunately, we
// don't currently know how to handled uniform PHIs.
LLVM_DEBUG(
dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
return false;
}
Value *StartValue =
Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
BasicBlock *Latch = AR->getLoop()->getLoopLatch();
if (!Latch)
return false;
BinaryOperator *BOp =
dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
const SCEV *Step = AR->getStepRecurrence(*SE);
// Calculate the pointer stride and check if it is consecutive.
// The stride may be a constant or a loop invariant integer value.
const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
return false;
if (PhiTy->isIntegerTy()) {
D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
CastsToIgnore);
return true;
}
assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
// Pointer induction should be a constant.
if (!ConstStep)
return false;
ConstantInt *CV = ConstStep->getValue();
Type *PointerElementType = PhiTy->getPointerElementType();
// The pointer stride cannot be determined if the pointer element type is not
// sized.
if (!PointerElementType->isSized())
return false;
const DataLayout &DL = Phi->getModule()->getDataLayout();
int64_t Size = static_cast<int64_t>(DL.getTypeAllocSize(PointerElementType));
if (!Size)
return false;
int64_t CVSize = CV->getSExtValue();
if (CVSize % Size)
return false;
auto *StepValue =
SE->getConstant(CV->getType(), CVSize / Size, true /* signed */);
D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue, BOp);
return true;
}
示例8: getAllocationData
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
Optional<AllocFnsTy> FnData =
getAllocationData(CS.getInstruction(), AnyAlloc, TLI);
if (!FnData)
return unknown();
// handle strdup-like functions separately
if (FnData->AllocTy == StrDupLike) {
APInt Size(IntTyBits, GetStringLength(CS.getArgument(0)));
if (!Size)
return unknown();
// strndup limits strlen
if (FnData->FstParam > 0) {
ConstantInt *Arg =
dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
if (!Arg)
return unknown();
APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
if (Size.ugt(MaxSize))
Size = MaxSize + 1;
}
return std::make_pair(Size, Zero);
}
ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
if (!Arg)
return unknown();
// When we're compiling N-bit code, and the user uses parameters that are
// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
// trouble with APInt size issues. This function handles resizing + overflow
// checks for us.
auto CheckedZextOrTrunc = [&](APInt &I) {
// More bits than we can handle. Checking the bit width isn't necessary, but
// it's faster than checking active bits, and should give `false` in the
// vast majority of cases.
if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
return false;
if (I.getBitWidth() != IntTyBits)
I = I.zextOrTrunc(IntTyBits);
return true;
};
APInt Size = Arg->getValue();
if (!CheckedZextOrTrunc(Size))
return unknown();
// size determined by just 1 parameter
if (FnData->SndParam < 0)
return std::make_pair(Size, Zero);
Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam));
if (!Arg)
return unknown();
APInt NumElems = Arg->getValue();
if (!CheckedZextOrTrunc(NumElems))
return unknown();
bool Overflow;
Size = Size.umul_ov(NumElems, Overflow);
return Overflow ? unknown() : std::make_pair(Size, Zero);
// TODO: handle more standard functions (+ wchar cousins):
// - strdup / strndup
// - strcpy / strncpy
// - strcat / strncat
// - memcpy / memmove
// - strcat / strncat
// - memset
}
示例9: DEBUG
int qdp_jit_vec::vectorize_loads( std::vector<std::vector<Instruction*> >& load_instructions )
{
DEBUG(dbgs() << "Vectorize loads, total of " << load_instructions.size() << "\n");
//std::vector<std::pair<Value*,Value*> > scalar_vector_loads;
scalar_vector_pairs.clear();
if (load_instructions.empty())
return 0;
int load_vec_elem = 0;
for( std::vector<Instruction*>& VI : load_instructions ) {
DEBUG(dbgs() << "Processing vector of loads number " << load_vec_elem++ << "\n");
assert( VI.size() == vec_len && "length of vector of loads does not match vec_len" );
int loads_consec = true;
uint64_t lo,hi;
bool first = true;
for( Instruction* I : VI ) {
GetElementPtrInst* GEP;
if ((GEP = dyn_cast<GetElementPtrInst>(I->getOperand(0)))) {
if (first) {
ConstantInt * CI;
if ((CI = dyn_cast<ConstantInt>(GEP->getOperand(1)))) {
lo = CI->getZExtValue();
hi = lo+1;
first=false;
} else {
DEBUG(dbgs() << "First load in the chain: Operand of GEP not a ConstantInt" << *GEP->getOperand(1) << "\n");
assert( 0 && "First load in the chain: Operand of GEP not a ConstantInt\n");
exit(0);
}
} else {
ConstantInt * CI;
if ((CI = dyn_cast<ConstantInt>(GEP->getOperand(1)))) {
if (hi != CI->getZExtValue()) {
DEBUG(dbgs() << "Loads not consecutive lo=" << lo << " hi=" << hi << " this=" << CI->getZExtValue() << "\n");
loads_consec = false;
} else {
hi++;
}
}
}
} else {
DEBUG(dbgs() << "Operand of load not a GEP " << *I->getOperand(0) << "\n");
assert( 0 && "Operand of load not a GEP" );
exit(0);
loads_consec = false;
}
}
if (loads_consec) {
DEBUG(dbgs() << "Loads consecuetive\n");
LoadInst* LI = cast<LoadInst>(VI.at(0));
GetElementPtrInst* GEP = cast<GetElementPtrInst>(LI->getOperand(0));
Instruction* GEPcl = clone_with_operands(GEP);
unsigned AS = LI->getPointerAddressSpace();
VectorType *VecTy = VectorType::get( LI->getType() , vec_len );
unsigned bitwidth = LI->getType()->getPrimitiveSizeInBits();
unsigned bytewidth = bitwidth == 1 ? 1 : bitwidth/8;
DEBUG(dbgs() << "bit/byte width of load instr trype: " << bitwidth << "/" << bytewidth << "\n");
//Builder->SetInsertPoint( GEP );
Value *VecPtr = Builder->CreateBitCast(GEPcl,VecTy->getPointerTo(AS));
//Value *VecLoad = Builder->CreateLoad( VecPtr );
unsigned align = lo % vec_len == 0 ? bytewidth * vec_len : bytewidth;
Value *VecLoad = Builder->CreateAlignedLoad( VecPtr , align );
//DEBUG(dbgs() << "created vector load: " << *VecLoad << "\n");
//function->dump();
// unsigned AS = LI->getPointerAddressSpace();
// VectorType *VecTy = VectorType::get( LI->getType() , vec_len );
// Builder->SetInsertPoint( LI );
// Value *VecPtr = Builder->CreateBitCast(LI->getPointerOperand(),VecTy->getPointerTo(AS));
// Value *VecLoad = Builder->CreateLoad( VecPtr );
scalar_vector_pairs.push_back( std::make_pair( VI.at(0) , VecLoad ) );
} else {
DEBUG(dbgs() << "Loads not consecutive:\n");
for (Value* V: VI) {
DEBUG(dbgs() << *V << "\n");
}
//Instruction* I = dyn_cast<Instruction>(VI.back()->getNextNode());
//DEBUG(dbgs() << *I << "\n");
//Builder->SetInsertPoint( VI.at(0) );
std::vector<Instruction*> VIcl;
for( Instruction* I : VI ) {
VIcl.push_back( clone_with_operands(I) );
}
VectorType *VecTy = VectorType::get( VI.at(0)->getType() , vec_len );
Value *Vec = UndefValue::get(VecTy);
int i=0;
//.........这里部分代码省略.........
示例10: CanEvaluateShifted
/// CanEvaluateShifted - See if we can compute the specified value, but shifted
/// logically to the left or right by some number of bits. This should return
/// true if the expression can be computed for the same cost as the current
/// expression tree. This is used to eliminate extraneous shifting from things
/// like:
/// %C = shl i128 %A, 64
/// %D = shl i128 %B, 96
/// %E = or i128 %C, %D
/// %F = lshr i128 %E, 64
/// where the client will ask if E can be computed shifted right by 64-bits. If
/// this succeeds, the GetShiftedValue function will be called to produce the
/// value.
static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
InstCombiner &IC) {
// We can always evaluate constants shifted.
if (isa<Constant>(V))
return true;
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
// If this is the opposite shift, we can directly reuse the input of the shift
// if the needed bits are already zero in the input. This allows us to reuse
// the value which means that we don't care if the shift has multiple uses.
// TODO: Handle opposite shift by exact value.
ConstantInt *CI = 0;
if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
(!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
if (CI->getZExtValue() == NumBits) {
// TODO: Check that the input bits are already zero with MaskedValueIsZero
#if 0
// If this is a truncate of a logical shr, we can truncate it to a smaller
// lshr iff we know that the bits we would otherwise be shifting in are
// already zeros.
uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
uint32_t BitWidth = Ty->getScalarSizeInBits();
if (MaskedValueIsZero(I->getOperand(0),
APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
CI->getLimitedValue(BitWidth) < BitWidth) {
return CanEvaluateTruncated(I->getOperand(0), Ty);
}
#endif
}
}
// We can't mutate something that has multiple uses: doing so would
// require duplicating the instruction in general, which isn't profitable.
if (!I->hasOneUse()) return false;
switch (I->getOpcode()) {
default: return false;
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
// Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
return CanEvaluateShifted(I->getOperand(0), NumBits, isLeftShift, IC) &&
CanEvaluateShifted(I->getOperand(1), NumBits, isLeftShift, IC);
case Instruction::Shl: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
if (CI == 0) return false;
// We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
if (isLeftShift) return true;
// We can always turn shl(c)+shr(c) -> and(c2).
if (CI->getValue() == NumBits) return true;
unsigned TypeWidth = I->getType()->getScalarSizeInBits();
// We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't
// profitable unless we know the and'd out bits are already zero.
if (CI->getZExtValue() > NumBits) {
unsigned LowBits = TypeWidth - CI->getZExtValue();
if (MaskedValueIsZero(I->getOperand(0),
APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
return true;
}
return false;
}
case Instruction::LShr: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
if (CI == 0) return false;
// We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
if (!isLeftShift) return true;
// We can always turn lshr(c)+shl(c) -> and(c2).
if (CI->getValue() == NumBits) return true;
unsigned TypeWidth = I->getType()->getScalarSizeInBits();
// We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't
// profitable unless we know the and'd out bits are already zero.
if (CI->getValue().ult(TypeWidth) && CI->getZExtValue() > NumBits) {
unsigned LowBits = CI->getZExtValue() - NumBits;
//.........这里部分代码省略.........
示例11: BuildMI
bool FastISel::SelectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
// Handle simple inline asms.
if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
// Don't attempt to handle constraints.
if (!IA->getConstraintString().empty())
return false;
unsigned ExtraInfo = 0;
if (IA->hasSideEffects())
ExtraInfo |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::INLINEASM))
.addExternalSymbol(IA->getAsmString().c_str())
.addImm(ExtraInfo);
return true;
}
MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
ComputeUsesVAFloatArgument(*Call, &MMI);
const Function *F = Call->getCalledFunction();
if (!F) return false;
// Handle selected intrinsic function calls.
switch (F->getIntrinsicID()) {
default: break;
// At -O0 we don't care about the lifetime intrinsics.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return true;
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
if (!DIVariable(DI->getVariable()).Verify() ||
!FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
const Value *Address = DI->getAddress();
if (!Address || isa<UndefValue>(Address)) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
unsigned Reg = 0;
unsigned Offset = 0;
if (const Argument *Arg = dyn_cast<Argument>(Address)) {
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
if (Offset)
Reg = TRI.getFrameRegister(*FuncInfo.MF);
}
if (!Reg)
Reg = lookUpRegForValue(Address);
if (!Reg && isa<Instruction>(Address) &&
(!isa<AllocaInst>(Address) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
Reg = FuncInfo.InitializeRegForValue(Address);
if (Reg)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::DBG_VALUE))
.addReg(Reg, RegState::Debug).addImm(Offset)
.addMetadata(DI->getVariable());
else
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
DEBUG(dbgs() << "Dropping debug info for " << DI);
return true;
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
const DbgValueInst *DI = cast<DbgValueInst>(Call);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(0U).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addCImm(CI).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addImm(CI->getZExtValue()).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addFPImm(CF).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
//.........这里部分代码省略.........
示例12: ConstantFoldConstantExpression
/// GetShiftedValue - When CanEvaluateShifted returned true for an expression,
/// this value inserts the new computation that produces the shifted value.
static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
InstCombiner &IC) {
// We can always evaluate constants shifted.
if (Constant *C = dyn_cast<Constant>(V)) {
if (isLeftShift)
V = IC.Builder->CreateShl(C, NumBits);
else
V = IC.Builder->CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
IC.getTargetLibraryInfo());
return V;
}
Instruction *I = cast<Instruction>(V);
IC.Worklist.Add(I);
switch (I->getOpcode()) {
default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
// Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
I->setOperand(0, GetShiftedValue(I->getOperand(0), NumBits,isLeftShift,IC));
I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
return I;
case Instruction::Shl: {
BinaryOperator *BO = cast<BinaryOperator>(I);
unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
// We only accept shifts-by-a-constant in CanEvaluateShifted.
ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
// We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
if (isLeftShift) {
// If this is oversized composite shift, then unsigned shifts get 0.
unsigned NewShAmt = NumBits+CI->getZExtValue();
if (NewShAmt >= TypeWidth)
return Constant::getNullValue(I->getType());
BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
BO->setHasNoUnsignedWrap(false);
BO->setHasNoSignedWrap(false);
return I;
}
// We turn shl(c)+lshr(c) -> and(c2) if the input doesn't already have
// zeros.
if (CI->getValue() == NumBits) {
APInt Mask(APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits));
V = IC.Builder->CreateAnd(BO->getOperand(0),
ConstantInt::get(BO->getContext(), Mask));
if (Instruction *VI = dyn_cast<Instruction>(V)) {
VI->moveBefore(BO);
VI->takeName(BO);
}
return V;
}
// We turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but only when we know that
// the and won't be needed.
assert(CI->getZExtValue() > NumBits);
BO->setOperand(1, ConstantInt::get(BO->getType(),
CI->getZExtValue() - NumBits));
BO->setHasNoUnsignedWrap(false);
BO->setHasNoSignedWrap(false);
return BO;
}
case Instruction::LShr: {
BinaryOperator *BO = cast<BinaryOperator>(I);
unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
// We only accept shifts-by-a-constant in CanEvaluateShifted.
ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
// We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
if (!isLeftShift) {
// If this is oversized composite shift, then unsigned shifts get 0.
unsigned NewShAmt = NumBits+CI->getZExtValue();
if (NewShAmt >= TypeWidth)
return Constant::getNullValue(BO->getType());
BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
BO->setIsExact(false);
return I;
}
// We turn lshr(c)+shl(c) -> and(c2) if the input doesn't already have
// zeros.
if (CI->getValue() == NumBits) {
APInt Mask(APInt::getHighBitsSet(TypeWidth, TypeWidth - NumBits));
V = IC.Builder->CreateAnd(I->getOperand(0),
ConstantInt::get(BO->getContext(), Mask));
if (Instruction *VI = dyn_cast<Instruction>(V)) {
VI->moveBefore(I);
VI->takeName(I);
}
//.........这里部分代码省略.........
示例13: CanEvaluateShuffled
/// Return true if we can evaluate the specified expression tree if the vector
/// elements were shuffled in a different order.
static bool CanEvaluateShuffled(Value *V, ArrayRef<int> Mask,
unsigned Depth = 5) {
// We can always reorder the elements of a constant.
if (isa<Constant>(V))
return true;
// We won't reorder vector arguments. No IPO here.
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
// Two users may expect different orders of the elements. Don't try it.
if (!I->hasOneUse())
return false;
if (Depth == 0) return false;
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::GetElementPtr: {
for (Value *Operand : I->operands()) {
if (!CanEvaluateShuffled(Operand, Mask, Depth-1))
return false;
}
return true;
}
case Instruction::InsertElement: {
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
if (!CI) return false;
int ElementNumber = CI->getLimitedValue();
// Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
// can't put an element into multiple indices.
bool SeenOnce = false;
for (int i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] == ElementNumber) {
if (SeenOnce)
return false;
SeenOnce = true;
}
}
return CanEvaluateShuffled(I->getOperand(0), Mask, Depth-1);
}
}
return false;
}
示例14: isDereferenceablePointer
/// \brief Check if Value is always a dereferenceable pointer.
///
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
// These are obviously ok.
if (isa<AllocaInst>(V)) return true;
// It's not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
// would result in a 4-byte load from a 1-byte alloca. However,
// if we're casting from a pointer from a type of larger size
// to a type of smaller size (or the same size), and the alignment
// is at least as large as for the resulting pointer type, then
// we can look through the bitcast.
if (DL)
if (const BitCastInst* BC = dyn_cast<BitCastInst>(V)) {
Type *STy = BC->getSrcTy()->getPointerElementType(),
*DTy = BC->getDestTy()->getPointerElementType();
if (STy->isSized() && DTy->isSized() &&
(DL->getTypeStoreSize(STy) >=
DL->getTypeStoreSize(DTy)) &&
(DL->getABITypeAlignment(STy) >=
DL->getABITypeAlignment(DTy)))
return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return !GV->hasExternalWeakLinkage();
// byval arguments are okay. Arguments specifically marked as
// dereferenceable are okay too.
if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr())
return true;
else if (uint64_t Bytes = A->getDereferenceableBytes()) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
return true;
}
return false;
}
// Return values from call sites specifically marked as dereferenceable are
// also okay.
if (ImmutableCallSite CS = V) {
if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
return true;
}
}
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
if (!Visited.insert(GEP->getOperand(0)).second)
return false;
if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
return false;
// Check the indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::const_op_iterator I = GEP->op_begin()+1,
E = GEP->op_end(); I != E; ++I) {
Value *Index = *I;
Type *Ty = *GTI++;
// Struct indices can't be out of bounds.
if (isa<StructType>(Ty))
continue;
ConstantInt *CI = dyn_cast<ConstantInt>(Index);
if (!CI)
return false;
// Zero is always ok.
if (CI->isZero())
continue;
// Check to see that it's within the bounds of an array.
ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (!ATy)
return false;
if (CI->getValue().getActiveBits() > 64)
return false;
if (CI->getZExtValue() >= ATy->getNumElements())
return false;
}
// Indices check out; this is dereferenceable.
return true;
}
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);
// If we don't know, assume the worst.
return false;
}
示例15: finishAssembly
void TartGCPrinter::finishAssembly(AsmPrinter &AP) {
unsigned nextLabel = 1;
SafePointList safePoints;
// Set up for emitting addresses.
int pointerSize = AP.TM.getTargetData()->getPointerSize();
int addressAlignLog;
if (pointerSize == sizeof(int32_t)) {
addressAlignLog = 2;
} else {
addressAlignLog = 3;
}
MCStreamer & outStream = AP.OutStreamer;
// Put this in the data section.
outStream.SwitchSection(AP.getObjFileLowering().getDataSection());
// For each function...
for (iterator FI = begin(), FE = end(); FI != FE; ++FI) {
GCFunctionInfo & gcFn = **FI;
// if (optShowGC) {
// errs() << "GCStrategy: Function: " << gcFn.getFunction().getName() << "\n";
// }
// And each safe point...
for (GCFunctionInfo::iterator sp = gcFn.begin(); sp != gcFn.end(); ++sp) {
StackTraceTable::FieldOffsetList fieldOffsets;
StackTraceTable::TraceMethodList traceMethods;
// And for each live root...
for (GCFunctionInfo::live_iterator rt = gcFn.live_begin(sp); rt != gcFn.live_end(sp); ++rt) {
int64_t offset = rt->StackOffset;
const Constant * meta = rt->Metadata;
if (meta != NULL && !meta->isNullValue()) {
// Meta is non-null, so it's a value type.
const ConstantArray * traceArray = cast<ConstantArray>(getGlobalValue(meta));
// For each trace descriptor in thre meta array...
for (ConstantArray::const_op_iterator it = traceArray->op_begin();
it != traceArray->op_end(); ++it) {
ConstantStruct * descriptor = cast<ConstantStruct>(*it);
ConstantInt * fieldCount = cast<ConstantInt>(descriptor->getOperand(1));
int64_t dscOffset = toInt(descriptor->getOperand(2), AP.TM);
if (fieldCount->isZero()) {
// A zero field count means that this is a trace method descriptor.
const Constant * traceMethod = descriptor->getOperand(3);
assert(offset > -1000 && offset < 1000);
assert(dscOffset > -1000 && dscOffset < 1000);
traceMethods.push_back(TraceMethodEntry(offset + dscOffset, traceMethod));
} else {
// Otherwise it's a field offset descriptor.
const GlobalVariable * fieldOffsetsVar = cast<GlobalVariable>(
descriptor->getOperand(3)->getOperand(0));
// Handle case where the array value is just a ConstantAggregateZero, which
// can be generated by llvm::ConstantArray::get() if the array values
// are all zero.
if (const ConstantAggregateZero * zero =
dyn_cast<ConstantAggregateZero>(fieldOffsetsVar->getInitializer())) {
// Array should never contain duplicate offsets, so an all-zero array
// can only have one entry.
(void)zero;
assert(fieldCount->isOne());
fieldOffsets.push_back(offset + dscOffset);
} else {
// Get the field offset array and add to field offsets for this
// safe point.
const ConstantArray * fieldOffsetArray = cast<ConstantArray>(
fieldOffsetsVar->getInitializer());
for (ConstantArray::const_op_iterator el = fieldOffsetArray->op_begin();
el != fieldOffsetArray->op_end(); ++el) {
fieldOffsets.push_back(
offset + dscOffset + toInt(cast<llvm::Constant>(*el), AP.TM));
}
}
}
}
} else {
// No metadata, so it's an object reference - just add the field offset.
fieldOffsets.push_back(offset);
}
}
// Nothing to trace? Then we're done.
if (fieldOffsets.empty() && traceMethods.empty()) {
continue;
}
// Create a folding set node and merge with any identical trace tables.
std::sort(fieldOffsets.begin(), fieldOffsets.end());
llvm::FoldingSetNodeID id;
StackTraceTable::ProfileEntries(id, fieldOffsets, traceMethods);
void * insertPos;
StackTraceTable * sTable = traceTables.FindNodeOrInsertPos(id, insertPos);
if (sTable == NULL) {
//.........这里部分代码省略.........