当前位置: 首页>>代码示例>>C++>>正文


C++ ConstantInt::getValue方法代码示例

本文整理汇总了C++中ConstantInt::getValue方法的典型用法代码示例。如果您正苦于以下问题:C++ ConstantInt::getValue方法的具体用法?C++ ConstantInt::getValue怎么用?C++ ConstantInt::getValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ConstantInt的用法示例。


在下文中一共展示了ConstantInt::getValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: getBlockValue

LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
  // See if we already have a value for this block.
  LVILatticeVal BBLV = getCachedEntryForBlock(BB);
  
  // If we've already computed this block's value, return it.
  if (!BBLV.isUndefined()) {
    DEBUG(dbgs() << "  reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n');
    return BBLV;
  }

  // Otherwise, this is the first time we're seeing this block.  Reset the
  // lattice value to overdefined, so that cycles will terminate and be
  // conservatively correct.
  BBLV.markOverdefined();
  Cache[BB] = BBLV;
  
  Instruction *BBI = dyn_cast<Instruction>(Val);
  if (BBI == 0 || BBI->getParent() != BB) {
    LVILatticeVal Result;  // Start Undefined.
    
    // If this is a pointer, and there's a load from that pointer in this BB,
    // then we know that the pointer can't be NULL.
    bool NotNull = false;
    if (Val->getType()->isPointerTy()) {
      for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
        LoadInst *L = dyn_cast<LoadInst>(BI);
        if (L && L->getPointerAddressSpace() == 0 &&
            L->getPointerOperand()->getUnderlyingObject() ==
              Val->getUnderlyingObject()) {
          NotNull = true;
          break;
        }
      }
    }
    
    unsigned NumPreds = 0;    
    // Loop over all of our predecessors, merging what we know from them into
    // result.
    for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
      Result.mergeIn(getEdgeValue(*PI, BB));
      
      // If we hit overdefined, exit early.  The BlockVals entry is already set
      // to overdefined.
      if (Result.isOverdefined()) {
        DEBUG(dbgs() << " compute BB '" << BB->getName()
                     << "' - overdefined because of pred.\n");
        // If we previously determined that this is a pointer that can't be null
        // then return that rather than giving up entirely.
        if (NotNull) {
          const PointerType *PTy = cast<PointerType>(Val->getType());
          Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
        }
        
        return Result;
      }
      ++NumPreds;
    }
    
    
    // If this is the entry block, we must be asking about an argument.  The
    // value is overdefined.
    if (NumPreds == 0 && BB == &BB->getParent()->front()) {
      assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
      Result.markOverdefined();
      return Result;
    }
    
    // Return the merged value, which is more precise than 'overdefined'.
    assert(!Result.isOverdefined());
    return Cache[BB] = Result;
  }
  
  // If this value is defined by an instruction in this block, we have to
  // process it here somehow or return overdefined.
  if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
    LVILatticeVal Result;  // Start Undefined.
    
    // Loop over all of our predecessors, merging what we know from them into
    // result.
    for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
      Value* PhiVal = PN->getIncomingValueForBlock(*PI);
      Result.mergeIn(Parent.getValueOnEdge(PhiVal, *PI, BB));
      
      // If we hit overdefined, exit early.  The BlockVals entry is already set
      // to overdefined.
      if (Result.isOverdefined()) {
        DEBUG(dbgs() << " compute BB '" << BB->getName()
                     << "' - overdefined because of pred.\n");
        return Result;
      }
    }
    
    // Return the merged value, which is more precise than 'overdefined'.
    assert(!Result.isOverdefined());
    return Cache[BB] = Result;
  }

  assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");

  // We can only analyze the definitions of certain classes of instructions
//.........这里部分代码省略.........
开发者ID:jyasskin,项目名称:llvm-mirror,代码行数:101,代码来源:LazyValueInfo.cpp

示例2: Range

MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
  // Given two ranges, we want to compute the union of the ranges. This
  // is slightly complitade by having to combine the intervals and merge
  // the ones that overlap.

  if (!A || !B)
    return nullptr;

  if (A == B)
    return A;

  // First, walk both lists in older of the lower boundary of each interval.
  // At each step, try to merge the new interval to the last one we adedd.
  SmallVector<Value*, 4> EndPoints;
  int AI = 0;
  int BI = 0;
  int AN = A->getNumOperands() / 2;
  int BN = B->getNumOperands() / 2;
  while (AI < AN && BI < BN) {
    ConstantInt *ALow = cast<ConstantInt>(A->getOperand(2 * AI));
    ConstantInt *BLow = cast<ConstantInt>(B->getOperand(2 * BI));

    if (ALow->getValue().slt(BLow->getValue())) {
      addRange(EndPoints, ALow, cast<ConstantInt>(A->getOperand(2 * AI + 1)));
      ++AI;
    } else {
      addRange(EndPoints, BLow, cast<ConstantInt>(B->getOperand(2 * BI + 1)));
      ++BI;
    }
  }
  while (AI < AN) {
    addRange(EndPoints, cast<ConstantInt>(A->getOperand(2 * AI)),
             cast<ConstantInt>(A->getOperand(2 * AI + 1)));
    ++AI;
  }
  while (BI < BN) {
    addRange(EndPoints, cast<ConstantInt>(B->getOperand(2 * BI)),
             cast<ConstantInt>(B->getOperand(2 * BI + 1)));
    ++BI;
  }

  // If we have more than 2 ranges (4 endpoints) we have to try to merge
  // the last and first ones.
  unsigned Size = EndPoints.size();
  if (Size > 4) {
    ConstantInt *FB = cast<ConstantInt>(EndPoints[0]);
    ConstantInt *FE = cast<ConstantInt>(EndPoints[1]);
    if (tryMergeRange(EndPoints, FB, FE)) {
      for (unsigned i = 0; i < Size - 2; ++i) {
        EndPoints[i] = EndPoints[i + 2];
      }
      EndPoints.resize(Size - 2);
    }
  }

  // If in the end we have a single range, it is possible that it is now the
  // full range. Just drop the metadata in that case.
  if (EndPoints.size() == 2) {
    ConstantRange Range(cast<ConstantInt>(EndPoints[0])->getValue(),
                        cast<ConstantInt>(EndPoints[1])->getValue());
    if (Range.isFullSet())
      return nullptr;
  }

  return MDNode::get(A->getContext(), EndPoints);
}
开发者ID:Drup,项目名称:llvm,代码行数:66,代码来源:Metadata.cpp

示例3: if

/// foldSelectICmpAnd - If one of the constants is zero (we know they can't
/// both be) and we have an icmp instruction with zero, and we have an 'and'
/// with the non-constant value and a power of two we can turn the select
/// into a shift on the result of the 'and'.
static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
                                ConstantInt *FalseVal,
                                InstCombiner::BuilderTy *Builder) {
  const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
  if (!IC || !IC->isEquality())
    return 0;

  if (!match(IC->getOperand(1), m_Zero()))
    return 0;

  ConstantInt *AndRHS;
  Value *LHS = IC->getOperand(0);
  if (LHS->getType() != SI.getType() ||
      !match(LHS, m_And(m_Value(), m_ConstantInt(AndRHS))))
    return 0;

  // If both select arms are non-zero see if we have a select of the form
  // 'x ? 2^n + C : C'. Then we can offset both arms by C, use the logic
  // for 'x ? 2^n : 0' and fix the thing up at the end.
  ConstantInt *Offset = 0;
  if (!TrueVal->isZero() && !FalseVal->isZero()) {
    if ((TrueVal->getValue() - FalseVal->getValue()).isPowerOf2())
      Offset = FalseVal;
    else if ((FalseVal->getValue() - TrueVal->getValue()).isPowerOf2())
      Offset = TrueVal;
    else
      return 0;

    // Adjust TrueVal and FalseVal to the offset.
    TrueVal = ConstantInt::get(Builder->getContext(),
                               TrueVal->getValue() - Offset->getValue());
    FalseVal = ConstantInt::get(Builder->getContext(),
                                FalseVal->getValue() - Offset->getValue());
  }

  // Make sure the mask in the 'and' and one of the select arms is a power of 2.
  if (!AndRHS->getValue().isPowerOf2() ||
      (!TrueVal->getValue().isPowerOf2() &&
       !FalseVal->getValue().isPowerOf2()))
    return 0;

  // Determine which shift is needed to transform result of the 'and' into the
  // desired result.
  ConstantInt *ValC = !TrueVal->isZero() ? TrueVal : FalseVal;
  unsigned ValZeros = ValC->getValue().logBase2();
  unsigned AndZeros = AndRHS->getValue().logBase2();

  Value *V = LHS;
  if (ValZeros > AndZeros)
    V = Builder->CreateShl(V, ValZeros - AndZeros);
  else if (ValZeros < AndZeros)
    V = Builder->CreateLShr(V, AndZeros - ValZeros);

  // Okay, now we know that everything is set up, we just don't know whether we
  // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
  bool ShouldNotVal = !TrueVal->isZero();
  ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
  if (ShouldNotVal)
    V = Builder->CreateXor(V, ValC);

  // Apply an offset if needed.
  if (Offset)
    V = Builder->CreateAdd(V, Offset);
  return V;
}
开发者ID:32bitmicro,项目名称:llvm,代码行数:69,代码来源:InstCombineSelect.cpp

示例4: handle_gep

DyckVertex* AAAnalyzer::handle_gep(GEPOperator* gep) {
    Value * ptr = gep->getPointerOperand();
    DyckVertex* current = wrapValue(ptr);

    gep_type_iterator preGTI = gep_type_begin(gep); // preGTI is the PointerTy of ptr
    gep_type_iterator GTI = gep_type_begin(gep); // GTI is the PointerTy of ptr
    if (GTI != gep_type_end(gep))
        GTI++; // ptr's element type, e.g. struct

    int num_indices = gep->getNumIndices();
    int idxidx = 0;
    while (idxidx < num_indices) {
        Value * idx = gep->getOperand(++idxidx);
        if (/*!isa<ConstantInt>(idx) ||*/ !GTI->isSized()) {
            // current->addProperty("unknown-offset", (void*) 1);
            break;
        }

        if ((*preGTI)->isStructTy()) {
            // example: gep y 0 constIdx
            // s1: y--deref-->?1--(-2-constIdx)-->?2
            DyckVertex* theStruct = this->addPtrTo(current, NULL);

            ConstantInt * ci = cast<ConstantInt>(idx);
            if (ci == NULL) {
                errs() << ("ERROR: when dealing with gep: \n");
                errs() << *gep << "\n";
                exit(1);
            }
            // field index need not be the same as original value
            // make it be a negative integer
            long fieldIdx = (long) (*(ci->getValue().getRawData()));
            DyckVertex* field = this->addField(theStruct, -2 - fieldIdx, NULL);

            // s2: ?3--deref-->?2
            DyckVertex* fieldPtr = this->addPtrTo(NULL, field);

            /// the label representation and feature impl is temporal. @FIXME
            // s3: y--2-->?3
            current->getRepresentative()->addTarget(fieldPtr->getRepresentative(), (void*) (fieldIdx));

            // update current
            current = fieldPtr;
        } else if ((*preGTI)->isPointerTy() || (*preGTI)->isArrayTy()) {
#ifndef ARRAY_SIMPLIFIED
            current = addPtrOffset(current, getConstantIntRawData(cast<ConstantInt>(idx)) * dl.getTypeAllocSize(*GTI), dgraph);
#endif
        } else {
            errs() << "ERROR in handle_gep: unknown type:\n";
            errs() << "Type Id: " << (*preGTI)->getTypeID() << "\n";
            exit(1);
        }

        if (GTI != gep_type_end(gep))
            GTI++;
        if (preGTI != gep_type_end(gep))
            preGTI++;
    }

    return current;
}
开发者ID:richardxx,项目名称:canary,代码行数:61,代码来源:AAAnalyzer.cpp

示例5: getPredicateResult

static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
                                                  LVILatticeVal &Result,
                                                  const DataLayout &DL,
                                                  TargetLibraryInfo *TLI) {

  // If we know the value is a constant, evaluate the conditional.
  Constant *Res = nullptr;
  if (Result.isConstant()) {
    Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
                                          TLI);
    if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
      return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
    return LazyValueInfo::Unknown;
  }

  if (Result.isConstantRange()) {
    ConstantInt *CI = dyn_cast<ConstantInt>(C);
    if (!CI) return LazyValueInfo::Unknown;

    ConstantRange CR = Result.getConstantRange();
    if (Pred == ICmpInst::ICMP_EQ) {
      if (!CR.contains(CI->getValue()))
        return LazyValueInfo::False;

      if (CR.isSingleElement() && CR.contains(CI->getValue()))
        return LazyValueInfo::True;
    } else if (Pred == ICmpInst::ICMP_NE) {
      if (!CR.contains(CI->getValue()))
        return LazyValueInfo::True;

      if (CR.isSingleElement() && CR.contains(CI->getValue()))
        return LazyValueInfo::False;
    }

    // Handle more complex predicates.
    ConstantRange TrueValues =
        ICmpInst::makeConstantRange((ICmpInst::Predicate)Pred, CI->getValue());
    if (TrueValues.contains(CR))
      return LazyValueInfo::True;
    if (TrueValues.inverse().contains(CR))
      return LazyValueInfo::False;
    return LazyValueInfo::Unknown;
  }

  if (Result.isNotConstant()) {
    // If this is an equality comparison, we can try to fold it knowing that
    // "V != C1".
    if (Pred == ICmpInst::ICMP_EQ) {
      // !C1 == C -> false iff C1 == C.
      Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
                                            Result.getNotConstant(), C, DL,
                                            TLI);
      if (Res->isNullValue())
        return LazyValueInfo::False;
    } else if (Pred == ICmpInst::ICMP_NE) {
      // !C1 != C -> true iff C1 == C.
      Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
                                            Result.getNotConstant(), C, DL,
                                            TLI);
      if (Res->isNullValue())
        return LazyValueInfo::True;
    }
    return LazyValueInfo::Unknown;
  }

  return LazyValueInfo::Unknown;
}
开发者ID:2asoft,项目名称:freebsd,代码行数:67,代码来源:LazyValueInfo.cpp

示例6: isObjectSizeLessThanOrEq

// If we can determine that all possible objects pointed to by the provided
// pointer value are, not only dereferenceable, but also definitively less than
// or equal to the provided maximum size, then return true. Otherwise, return
// false (constant global values and allocas fall into this category).
//
// FIXME: This should probably live in ValueTracking (or similar).
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
                                     const DataLayout &DL) {
  SmallPtrSet<Value *, 4> Visited;
  SmallVector<Value *, 4> Worklist(1, V);

  do {
    Value *P = Worklist.pop_back_val();
    P = P->stripPointerCasts();

    if (!Visited.insert(P).second)
      continue;

    if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
      Worklist.push_back(SI->getTrueValue());
      Worklist.push_back(SI->getFalseValue());
      continue;
    }

    if (PHINode *PN = dyn_cast<PHINode>(P)) {
      for (Value *IncValue : PN->incoming_values())
        Worklist.push_back(IncValue);
      continue;
    }

    if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
      if (GA->mayBeOverridden())
        return false;
      Worklist.push_back(GA->getAliasee());
      continue;
    }

    // If we know how big this object is, and it is less than MaxSize, continue
    // searching. Otherwise, return false.
    if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
      if (!AI->getAllocatedType()->isSized())
        return false;

      ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
      if (!CS)
        return false;

      uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
      // Make sure that, even if the multiplication below would wrap as an
      // uint64_t, we still do the right thing.
      if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
        return false;
      continue;
    }

    if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
      if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
        return false;

      uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
      if (InitSize > MaxSize)
        return false;
      continue;
    }

    return false;
  } while (!Worklist.empty());

  return true;
}
开发者ID:subler,项目名称:llvm,代码行数:70,代码来源:InstCombineLoadStoreAlloca.cpp

示例7: calcMetadataWeights

// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
  const TerminatorInst *TI = BB->getTerminator();
  if (TI->getNumSuccessors() == 1)
    return false;
  if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
    return false;

  MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
  if (!WeightsNode)
    return false;

  // Check that the number of successors is manageable.
  assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");

  // Ensure there are weights for all of the successors. Note that the first
  // operand to the metadata node is a name, not a weight.
  if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
    return false;

  // Build up the final weights that will be used in a temporary buffer.
  // Compute the sum of all weights to later decide whether they need to
  // be scaled to fit in 32 bits.
  uint64_t WeightSum = 0;
  SmallVector<uint32_t, 2> Weights;
  Weights.reserve(TI->getNumSuccessors());
  for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
    ConstantInt *Weight =
        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
    if (!Weight)
      return false;
    assert(Weight->getValue().getActiveBits() <= 32 &&
           "Too many bits for uint32_t");
    Weights.push_back(Weight->getZExtValue());
    WeightSum += Weights.back();
  }
  assert(Weights.size() == TI->getNumSuccessors() && "Checked above");

  // If the sum of weights does not fit in 32 bits, scale every weight down
  // accordingly.
  uint64_t ScalingFactor =
      (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;

  WeightSum = 0;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Weights[i] /= ScalingFactor;
    WeightSum += Weights[i];
  }

  if (WeightSum == 0) {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      setEdgeProbability(BB, i, {1, e});
  } else {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      setEdgeProbability(BB, i, {Weights[i], static_cast<uint32_t>(WeightSum)});
  }

  assert(WeightSum <= UINT32_MAX &&
         "Expected weights to scale down to 32 bits");

  return true;
}
开发者ID:AlexDenisov,项目名称:llvm,代码行数:63,代码来源:BranchProbabilityInfo.cpp

示例8: combineRangeChecks

bool GuardWideningImpl::combineRangeChecks(
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) {
  unsigned OldCount = Checks.size();
  while (!Checks.empty()) {
    // Pick all of the range checks with a specific base and length, and try to
    // merge them.
    Value *CurrentBase = Checks.front().getBase();
    Value *CurrentLength = Checks.front().getLength();

    SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;

    auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
      return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
    };

    std::copy_if(Checks.begin(), Checks.end(),
                 std::back_inserter(CurrentChecks), IsCurrentCheck);
    Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end());

    assert(CurrentChecks.size() != 0 && "We know we have at least one!");

    if (CurrentChecks.size() < 3) {
      RangeChecksOut.insert(RangeChecksOut.end(), CurrentChecks.begin(),
                            CurrentChecks.end());
      continue;
    }

    // CurrentChecks.size() will typically be 3 here, but so far there has been
    // no need to hard-code that fact.

    std::sort(CurrentChecks.begin(), CurrentChecks.end(),
              [&](const GuardWideningImpl::RangeCheck &LHS,
                  const GuardWideningImpl::RangeCheck &RHS) {
      return LHS.getOffsetValue().slt(RHS.getOffsetValue());
    });

    // Note: std::sort should not invalidate the ChecksStart iterator.

    ConstantInt *MinOffset = CurrentChecks.front().getOffset(),
                *MaxOffset = CurrentChecks.back().getOffset();

    unsigned BitWidth = MaxOffset->getValue().getBitWidth();
    if ((MaxOffset->getValue() - MinOffset->getValue())
            .ugt(APInt::getSignedMinValue(BitWidth)))
      return false;

    APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
    const APInt &HighOffset = MaxOffset->getValue();
    auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
      return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
    };

    if (MaxDiff.isMinValue() ||
        !std::all_of(std::next(CurrentChecks.begin()), CurrentChecks.end(),
                     OffsetOK))
      return false;

    // We have a series of f+1 checks as:
    //
    //   I+k_0 u< L   ... Chk_0
    //   I_k_1 u< L   ... Chk_1
    //   ...
    //   I_k_f u< L   ... Chk_(f+1)
    //
    //     with forall i in [0,f): k_f-k_i u< k_f-k_0  ... Precond_0
    //          k_f-k_0 u< INT_MIN+k_f                 ... Precond_1
    //          k_f != k_0                             ... Precond_2
    //
    // Claim:
    //   Chk_0 AND Chk_(f+1)  implies all the other checks
    //
    // Informal proof sketch:
    //
    // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
    // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
    // thus I+k_f is the greatest unsigned value in that range.
    //
    // This combined with Ckh_(f+1) shows that everything in that range is u< L.
    // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
    // lie in [I+k_0,I+k_f], this proving our claim.
    //
    // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
    // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
    // since k_0 != k_f).  In the former case, [I+k_0,I+k_f] is not a wrapping
    // range by definition, and the latter case is impossible:
    //
    //   0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
    //   xxxxxx             xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    //
    // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
    // with 'x' above) to be at least >u INT_MIN.

    RangeChecksOut.emplace_back(CurrentChecks.front());
    RangeChecksOut.emplace_back(CurrentChecks.back());
  }

  assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
  return RangeChecksOut.size() != OldCount;
}
开发者ID:CristinaCristescu,项目名称:llvm,代码行数:100,代码来源:GuardWidening.cpp

示例9: CanEvaluateShifted

/// CanEvaluateShifted - See if we can compute the specified value, but shifted
/// logically to the left or right by some number of bits.  This should return
/// true if the expression can be computed for the same cost as the current
/// expression tree.  This is used to eliminate extraneous shifting from things
/// like:
///      %C = shl i128 %A, 64
///      %D = shl i128 %B, 96
///      %E = or i128 %C, %D
///      %F = lshr i128 %E, 64
/// where the client will ask if E can be computed shifted right by 64-bits.  If
/// this succeeds, the GetShiftedValue function will be called to produce the
/// value.
static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
                               InstCombiner &IC) {
  // We can always evaluate constants shifted.
  if (isa<Constant>(V))
    return true;

  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return false;

  // If this is the opposite shift, we can directly reuse the input of the shift
  // if the needed bits are already zero in the input.  This allows us to reuse
  // the value which means that we don't care if the shift has multiple uses.
  //  TODO:  Handle opposite shift by exact value.
  ConstantInt *CI = 0;
  if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
      (!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
    if (CI->getZExtValue() == NumBits) {
      // TODO: Check that the input bits are already zero with MaskedValueIsZero
#if 0
      // If this is a truncate of a logical shr, we can truncate it to a smaller
      // lshr iff we know that the bits we would otherwise be shifting in are
      // already zeros.
      uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
      uint32_t BitWidth = Ty->getScalarSizeInBits();
      if (MaskedValueIsZero(I->getOperand(0),
            APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
          CI->getLimitedValue(BitWidth) < BitWidth) {
        return CanEvaluateTruncated(I->getOperand(0), Ty);
      }
#endif

    }
  }

  // We can't mutate something that has multiple uses: doing so would
  // require duplicating the instruction in general, which isn't profitable.
  if (!I->hasOneUse()) return false;

  switch (I->getOpcode()) {
  default: return false;
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
    // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
    return CanEvaluateShifted(I->getOperand(0), NumBits, isLeftShift, IC) &&
           CanEvaluateShifted(I->getOperand(1), NumBits, isLeftShift, IC);

  case Instruction::Shl: {
    // We can often fold the shift into shifts-by-a-constant.
    CI = dyn_cast<ConstantInt>(I->getOperand(1));
    if (CI == 0) return false;

    // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
    if (isLeftShift) return true;

    // We can always turn shl(c)+shr(c) -> and(c2).
    if (CI->getValue() == NumBits) return true;

    unsigned TypeWidth = I->getType()->getScalarSizeInBits();

    // We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't
    // profitable unless we know the and'd out bits are already zero.
    if (CI->getZExtValue() > NumBits) {
      unsigned LowBits = TypeWidth - CI->getZExtValue();
      if (MaskedValueIsZero(I->getOperand(0),
                       APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
        return true;
    }

    return false;
  }
  case Instruction::LShr: {
    // We can often fold the shift into shifts-by-a-constant.
    CI = dyn_cast<ConstantInt>(I->getOperand(1));
    if (CI == 0) return false;

    // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
    if (!isLeftShift) return true;

    // We can always turn lshr(c)+shl(c) -> and(c2).
    if (CI->getValue() == NumBits) return true;

    unsigned TypeWidth = I->getType()->getScalarSizeInBits();

    // We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't
    // profitable unless we know the and'd out bits are already zero.
    if (CI->getValue().ult(TypeWidth) && CI->getZExtValue() > NumBits) {
      unsigned LowBits = CI->getZExtValue() - NumBits;
//.........这里部分代码省略.........
开发者ID:7heaven,项目名称:softart,代码行数:101,代码来源:InstCombineShifts.cpp

示例10: visitCallSite

SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
  Optional<AllocFnsTy> FnData =
      getAllocationData(CS.getInstruction(), AnyAlloc, TLI);
  if (!FnData)
    return unknown();

  // handle strdup-like functions separately
  if (FnData->AllocTy == StrDupLike) {
    APInt Size(IntTyBits, GetStringLength(CS.getArgument(0)));
    if (!Size)
      return unknown();

    // strndup limits strlen
    if (FnData->FstParam > 0) {
      ConstantInt *Arg =
          dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
      if (!Arg)
        return unknown();

      APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
      if (Size.ugt(MaxSize))
        Size = MaxSize + 1;
    }
    return std::make_pair(Size, Zero);
  }

  ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
  if (!Arg)
    return unknown();

  // When we're compiling N-bit code, and the user uses parameters that are
  // greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
  // trouble with APInt size issues. This function handles resizing + overflow
  // checks for us.
  auto CheckedZextOrTrunc = [&](APInt &I) {
    // More bits than we can handle. Checking the bit width isn't necessary, but
    // it's faster than checking active bits, and should give `false` in the
    // vast majority of cases.
    if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
      return false;
    if (I.getBitWidth() != IntTyBits)
      I = I.zextOrTrunc(IntTyBits);
    return true;
  };

  APInt Size = Arg->getValue();
  if (!CheckedZextOrTrunc(Size))
    return unknown();

  // size determined by just 1 parameter
  if (FnData->SndParam < 0)
    return std::make_pair(Size, Zero);

  Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam));
  if (!Arg)
    return unknown();

  APInt NumElems = Arg->getValue();
  if (!CheckedZextOrTrunc(NumElems))
    return unknown();

  bool Overflow;
  Size = Size.umul_ov(NumElems, Overflow);
  return Overflow ? unknown() : std::make_pair(Size, Zero);

  // TODO: handle more standard functions (+ wchar cousins):
  // - strdup / strndup
  // - strcpy / strncpy
  // - strcat / strncat
  // - memcpy / memmove
  // - strcat / strncat
  // - memset
}
开发者ID:campuslifeceo,项目名称:llvm,代码行数:73,代码来源:MemoryBuiltins.cpp

示例11: getNullValue

/// GetShiftedValue - When CanEvaluateShifted returned true for an expression,
/// this value inserts the new computation that produces the shifted value.
static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
                              InstCombiner &IC) {
  // We can always evaluate constants shifted.
  if (Constant *C = dyn_cast<Constant>(V)) {
    if (isLeftShift)
      V = IC.Builder->CreateShl(C, NumBits);
    else
      V = IC.Builder->CreateLShr(C, NumBits);
    // If we got a constantexpr back, try to simplify it with TD info.
    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
      V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
                                         IC.getTargetLibraryInfo());
    return V;
  }

  Instruction *I = cast<Instruction>(V);
  IC.Worklist.Add(I);

  switch (I->getOpcode()) {
  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
    // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
    I->setOperand(0, GetShiftedValue(I->getOperand(0), NumBits,isLeftShift,IC));
    I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
    return I;

  case Instruction::Shl: {
    BinaryOperator *BO = cast<BinaryOperator>(I);
    unsigned TypeWidth = BO->getType()->getScalarSizeInBits();

    // We only accept shifts-by-a-constant in CanEvaluateShifted.
    ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));

    // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
    if (isLeftShift) {
      // If this is oversized composite shift, then unsigned shifts get 0.
      unsigned NewShAmt = NumBits+CI->getZExtValue();
      if (NewShAmt >= TypeWidth)
        return Constant::getNullValue(I->getType());

      BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
      BO->setHasNoUnsignedWrap(false);
      BO->setHasNoSignedWrap(false);
      return I;
    }

    // We turn shl(c)+lshr(c) -> and(c2) if the input doesn't already have
    // zeros.
    if (CI->getValue() == NumBits) {
      APInt Mask(APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits));
      V = IC.Builder->CreateAnd(BO->getOperand(0),
                                ConstantInt::get(BO->getContext(), Mask));
      if (Instruction *VI = dyn_cast<Instruction>(V)) {
        VI->moveBefore(BO);
        VI->takeName(BO);
      }
      return V;
    }

    // We turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but only when we know that
    // the and won't be needed.
    assert(CI->getZExtValue() > NumBits);
    BO->setOperand(1, ConstantInt::get(BO->getType(),
                                       CI->getZExtValue() - NumBits));
    BO->setHasNoUnsignedWrap(false);
    BO->setHasNoSignedWrap(false);
    return BO;
  }
  case Instruction::LShr: {
    BinaryOperator *BO = cast<BinaryOperator>(I);
    unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
    // We only accept shifts-by-a-constant in CanEvaluateShifted.
    ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));

    // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
    if (!isLeftShift) {
      // If this is oversized composite shift, then unsigned shifts get 0.
      unsigned NewShAmt = NumBits+CI->getZExtValue();
      if (NewShAmt >= TypeWidth)
        return Constant::getNullValue(BO->getType());

      BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
      BO->setIsExact(false);
      return I;
    }

    // We turn lshr(c)+shl(c) -> and(c2) if the input doesn't already have
    // zeros.
    if (CI->getValue() == NumBits) {
      APInt Mask(APInt::getHighBitsSet(TypeWidth, TypeWidth - NumBits));
      V = IC.Builder->CreateAnd(I->getOperand(0),
                                ConstantInt::get(BO->getContext(), Mask));
      if (Instruction *VI = dyn_cast<Instruction>(V)) {
        VI->moveBefore(I);
        VI->takeName(I);
      }
//.........这里部分代码省略.........
开发者ID:7heaven,项目名称:softart,代码行数:101,代码来源:InstCombineShifts.cpp

示例12: isDereferenceablePointer

/// \brief Check if Value is always a dereferenceable pointer.
///
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
                                     SmallPtrSetImpl<const Value *> &Visited) {
  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // These are obviously ok.
  if (isa<AllocaInst>(V)) return true;

  // It's not always safe to follow a bitcast, for example:
  //   bitcast i8* (alloca i8) to i32*
  // would result in a 4-byte load from a 1-byte alloca. However,
  // if we're casting from a pointer from a type of larger size
  // to a type of smaller size (or the same size), and the alignment
  // is at least as large as for the resulting pointer type, then
  // we can look through the bitcast.
  if (DL)
    if (const BitCastInst* BC = dyn_cast<BitCastInst>(V)) {
      Type *STy = BC->getSrcTy()->getPointerElementType(),
           *DTy = BC->getDestTy()->getPointerElementType();
      if (STy->isSized() && DTy->isSized() &&
          (DL->getTypeStoreSize(STy) >=
           DL->getTypeStoreSize(DTy)) &&
          (DL->getABITypeAlignment(STy) >=
           DL->getABITypeAlignment(DTy)))
        return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
    }

  // Global variables which can't collapse to null are ok.
  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
    return !GV->hasExternalWeakLinkage();

  // byval arguments are okay. Arguments specifically marked as
  // dereferenceable are okay too.
  if (const Argument *A = dyn_cast<Argument>(V)) {
    if (A->hasByValAttr())
      return true;
    else if (uint64_t Bytes = A->getDereferenceableBytes()) {
      Type *Ty = V->getType()->getPointerElementType();
      if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
        return true;
    }

    return false;
  }

  // Return values from call sites specifically marked as dereferenceable are
  // also okay.
  if (ImmutableCallSite CS = V) {
    if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
      Type *Ty = V->getType()->getPointerElementType();
      if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
        return true;
    }
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    // Conservatively require that the base pointer be fully dereferenceable.
    if (!Visited.insert(GEP->getOperand(0)).second)
      return false;
    if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
      return false;
    // Check the indices.
    gep_type_iterator GTI = gep_type_begin(GEP);
    for (User::const_op_iterator I = GEP->op_begin()+1,
         E = GEP->op_end(); I != E; ++I) {
      Value *Index = *I;
      Type *Ty = *GTI++;
      // Struct indices can't be out of bounds.
      if (isa<StructType>(Ty))
        continue;
      ConstantInt *CI = dyn_cast<ConstantInt>(Index);
      if (!CI)
        return false;
      // Zero is always ok.
      if (CI->isZero())
        continue;
      // Check to see that it's within the bounds of an array.
      ArrayType *ATy = dyn_cast<ArrayType>(Ty);
      if (!ATy)
        return false;
      if (CI->getValue().getActiveBits() > 64)
        return false;
      if (CI->getZExtValue() >= ATy->getNumElements())
        return false;
    }
    // Indices check out; this is dereferenceable.
    return true;
  }

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);

  // If we don't know, assume the worst.
  return false;
}
开发者ID:MessiahAndrw,项目名称:Perception,代码行数:100,代码来源:Value.cpp

示例13: calcMetadataWeights

// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing. Examine metadata against unreachable
// heuristic. The probability of the edge coming to unreachable block is
// set to min of metadata and unreachable heuristic.
bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
  const TerminatorInst *TI = BB->getTerminator();
  assert(TI->getNumSuccessors() > 1 && "expected more than one successor!");
  if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
    return false;

  MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
  if (!WeightsNode)
    return false;

  // Check that the number of successors is manageable.
  assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");

  // Ensure there are weights for all of the successors. Note that the first
  // operand to the metadata node is a name, not a weight.
  if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
    return false;

  // Build up the final weights that will be used in a temporary buffer.
  // Compute the sum of all weights to later decide whether they need to
  // be scaled to fit in 32 bits.
  uint64_t WeightSum = 0;
  SmallVector<uint32_t, 2> Weights;
  SmallVector<unsigned, 2> UnreachableIdxs;
  SmallVector<unsigned, 2> ReachableIdxs;
  Weights.reserve(TI->getNumSuccessors());
  for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
    ConstantInt *Weight =
        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
    if (!Weight)
      return false;
    assert(Weight->getValue().getActiveBits() <= 32 &&
           "Too many bits for uint32_t");
    Weights.push_back(Weight->getZExtValue());
    WeightSum += Weights.back();
    if (PostDominatedByUnreachable.count(TI->getSuccessor(i - 1)))
      UnreachableIdxs.push_back(i - 1);
    else
      ReachableIdxs.push_back(i - 1);
  }
  assert(Weights.size() == TI->getNumSuccessors() && "Checked above");

  // If the sum of weights does not fit in 32 bits, scale every weight down
  // accordingly.
  uint64_t ScalingFactor =
      (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;

  if (ScalingFactor > 1) {
    WeightSum = 0;
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
      Weights[i] /= ScalingFactor;
      WeightSum += Weights[i];
    }
  }
  assert(WeightSum <= UINT32_MAX &&
         "Expected weights to scale down to 32 bits");

  if (WeightSum == 0 || ReachableIdxs.size() == 0) {
    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
      Weights[i] = 1;
    WeightSum = TI->getNumSuccessors();
  }

  // Set the probability.
  SmallVector<BranchProbability, 2> BP;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    BP.push_back({ Weights[i], static_cast<uint32_t>(WeightSum) });

  // Examine the metadata against unreachable heuristic.
  // If the unreachable heuristic is more strong then we use it for this edge.
  if (UnreachableIdxs.size() > 0 && ReachableIdxs.size() > 0) {
    auto ToDistribute = BranchProbability::getZero();
    auto UnreachableProb = UR_TAKEN_PROB;
    for (auto i : UnreachableIdxs)
      if (UnreachableProb < BP[i]) {
        ToDistribute += BP[i] - UnreachableProb;
        BP[i] = UnreachableProb;
      }

    // If we modified the probability of some edges then we must distribute
    // the difference between reachable blocks.
    if (ToDistribute > BranchProbability::getZero()) {
      BranchProbability PerEdge = ToDistribute / ReachableIdxs.size();
      for (auto i : ReachableIdxs)
        BP[i] += PerEdge;
    }
  }

  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    setEdgeProbability(BB, i, BP[i]);

  return true;
}
开发者ID:mikaoP,项目名称:llvm,代码行数:97,代码来源:BranchProbabilityInfo.cpp

示例14: newLeafBlock

// switchConvert - Convert the switch statement into a binary lookup of
// the case values. The function recursively builds this tree.
// LowerBound and UpperBound are used to keep track of the bounds for Val
// that have already been checked by a block emitted by one of the previous
// calls to switchConvert in the call stack.
BasicBlock *
LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound,
                           ConstantInt *UpperBound, Value *Val,
                           BasicBlock *Predecessor, BasicBlock *OrigBlock,
                           BasicBlock *Default,
                           const std::vector<IntRange> &UnreachableRanges) {
  unsigned Size = End - Begin;

  if (Size == 1) {
    // Check if the Case Range is perfectly squeezed in between
    // already checked Upper and Lower bounds. If it is then we can avoid
    // emitting the code that checks if the value actually falls in the range
    // because the bounds already tell us so.
    if (Begin->Low == LowerBound && Begin->High == UpperBound) {
      unsigned NumMergedCases = 0;
      if (LowerBound && UpperBound)
        NumMergedCases =
            UpperBound->getSExtValue() - LowerBound->getSExtValue();
      fixPhis(Begin->BB, OrigBlock, Predecessor, NumMergedCases);
      return Begin->BB;
    }
    return newLeafBlock(*Begin, Val, OrigBlock, Default);
  }

  unsigned Mid = Size / 2;
  std::vector<CaseRange> LHS(Begin, Begin + Mid);
  DEBUG(dbgs() << "LHS: " << LHS << "\n");
  std::vector<CaseRange> RHS(Begin + Mid, End);
  DEBUG(dbgs() << "RHS: " << RHS << "\n");

  CaseRange &Pivot = *(Begin + Mid);
  DEBUG(dbgs() << "Pivot ==> "
               << Pivot.Low->getValue()
               << " -" << Pivot.High->getValue() << "\n");

  // NewLowerBound here should never be the integer minimal value.
  // This is because it is computed from a case range that is never
  // the smallest, so there is always a case range that has at least
  // a smaller value.
  ConstantInt *NewLowerBound = Pivot.Low;

  // Because NewLowerBound is never the smallest representable integer
  // it is safe here to subtract one.
  ConstantInt *NewUpperBound = ConstantInt::get(NewLowerBound->getContext(),
                                                NewLowerBound->getValue() - 1);

  if (!UnreachableRanges.empty()) {
    // Check if the gap between LHS's highest and NewLowerBound is unreachable.
    int64_t GapLow = LHS.back().High->getSExtValue() + 1;
    int64_t GapHigh = NewLowerBound->getSExtValue() - 1;
    IntRange Gap = { GapLow, GapHigh };
    if (GapHigh >= GapLow && IsInRanges(Gap, UnreachableRanges))
      NewUpperBound = LHS.back().High;
  }

  DEBUG(dbgs() << "LHS Bounds ==> ";
        if (LowerBound) {
          dbgs() << LowerBound->getSExtValue();
        } else {
          dbgs() << "NONE";
        }
        dbgs() << " - " << NewUpperBound->getSExtValue() << "\n";
        dbgs() << "RHS Bounds ==> ";
        dbgs() << NewLowerBound->getSExtValue() << " - ";
        if (UpperBound) {
          dbgs() << UpperBound->getSExtValue() << "\n";
        } else {
          dbgs() << "NONE\n";
        });
开发者ID:dongAxis,项目名称:clang-700.0.72,代码行数:74,代码来源:LowerSwitch.cpp

示例15: EvaluateBlock


//.........这里部分代码省略.........
        DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
        return false;  // Cannot handle array allocs.
      }
      Type *Ty = AI->getAllocatedType();
      AllocaTmps.push_back(
          make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage,
                                      UndefValue::get(Ty), AI->getName()));
      InstResult = AllocaTmps.back().get();
      DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
    } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
      CallSite CS(&*CurInst);

      // Debug info can safely be ignored here.
      if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
        DEBUG(dbgs() << "Ignoring debug info.\n");
        ++CurInst;
        continue;
      }

      // Cannot handle inline asm.
      if (isa<InlineAsm>(CS.getCalledValue())) {
        DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
        return false;
      }

      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
        if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
          if (MSI->isVolatile()) {
            DEBUG(dbgs() << "Can not optimize a volatile memset " <<
                  "intrinsic.\n");
            return false;
          }
          Constant *Ptr = getVal(MSI->getDest());
          Constant *Val = getVal(MSI->getValue());
          Constant *DestVal = ComputeLoadResult(getVal(Ptr));
          if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
            // This memset is a no-op.
            DEBUG(dbgs() << "Ignoring no-op memset.\n");
            ++CurInst;
            continue;
          }
        }

        if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
            II->getIntrinsicID() == Intrinsic::lifetime_end) {
          DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
          ++CurInst;
          continue;
        }

        if (II->getIntrinsicID() == Intrinsic::invariant_start) {
          // We don't insert an entry into Values, as it doesn't have a
          // meaningful return value.
          if (!II->use_empty()) {
            DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
            return false;
          }
          ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
          Value *PtrArg = getVal(II->getArgOperand(1));
          Value *Ptr = PtrArg->stripPointerCasts();
          if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
            Type *ElemTy = GV->getValueType();
            if (!Size->isAllOnesValue() &&
                Size->getValue().getLimitedValue() >=
                    DL.getTypeStoreSize(ElemTy)) {
              Invariants.insert(GV);
开发者ID:LedgerHQ,项目名称:llvm,代码行数:67,代码来源:Evaluator.cpp


注:本文中的ConstantInt::getValue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。