当前位置: 首页>>代码示例>>C++>>正文


C++ SmallVector::size方法代码示例

本文整理汇总了C++中SmallVector::size方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallVector::size方法的具体用法?C++ SmallVector::size怎么用?C++ SmallVector::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SmallVector的用法示例。


在下文中一共展示了SmallVector::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: if


//.........这里部分代码省略.........
        HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
        HandleOrdinalModifier(Val, OutStr);
      } else {
        assert(ModifierLen == 0 && "Unknown integer modifier");
        llvm::raw_svector_ostream(OutStr) << Val;
      }
      break;
    }
    // ---- NAMES and TYPES ----
    case DiagnosticsEngine::ak_identifierinfo: {
      const IdentifierInfo *II = getArgIdentifier(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");

      // Don't crash if get passed a null pointer by accident.
      if (!II) {
        const char *S = "(null)";
        OutStr.append(S, S + strlen(S));
        continue;
      }

      llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
      break;
    }
    case DiagnosticsEngine::ak_qualtype:
    case DiagnosticsEngine::ak_declarationname:
    case DiagnosticsEngine::ak_nameddecl:
    case DiagnosticsEngine::ak_nestednamespec:
    case DiagnosticsEngine::ak_declcontext:
      getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      break;
    case DiagnosticsEngine::ak_qualtype_pair:
      // Create a struct with all the info needed for printing.
      TemplateDiffTypes TDT;
      TDT.FromType = getRawArg(ArgNo);
      TDT.ToType = getRawArg(ArgNo2);
      TDT.ElideType = getDiags()->ElideType;
      TDT.ShowColors = getDiags()->ShowColors;
      TDT.TemplateDiffUsed = false;
      intptr_t val = reinterpret_cast<intptr_t>(&TDT);

      const char *ArgumentEnd = Argument + ArgumentLen;
      const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');

      // Print the tree.  If this diagnostic already has a tree, skip the
      // second tree.
      if (getDiags()->PrintTemplateTree && Tree.empty()) {
        TDT.PrintFromType = true;
        TDT.PrintTree = true;
        getDiags()->ConvertArgToString(Kind, val,
                                       Modifier, ModifierLen,
                                       Argument, ArgumentLen,
                                       FormattedArgs.data(),
                                       FormattedArgs.size(),
                                       Tree, QualTypeVals);
        // If there is no tree information, fall back to regular printing.
        if (!Tree.empty()) {
          FormatDiagnostic(Pipe + 1, ArgumentEnd, OutStr);
          break;
        }
      }
开发者ID:gwelymernans,项目名称:lfort,代码行数:67,代码来源:Diagnostic.cpp

示例2: assert

void
UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
                      const SmallVectorImpl<SlotIndex> &Kills,
                      SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
                      MachineRegisterInfo &MRI, LiveIntervals &LIS) {
  if (Kills.empty())
    return;
  // Don't track copies from physregs, there are too many uses.
  if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
    return;

  // Collect all the (vreg, valno) pairs that are copies of LI.
  SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
  for (MachineOperand &MO : MRI.use_nodbg_operands(LI->reg)) {
    MachineInstr *MI = MO.getParent();
    // Copies of the full value.
    if (MO.getSubReg() || !MI->isCopy())
      continue;
    unsigned DstReg = MI->getOperand(0).getReg();

    // Don't follow copies to physregs. These are usually setting up call
    // arguments, and the argument registers are always call clobbered. We are
    // better off in the source register which could be a callee-saved register,
    // or it could be spilled.
    if (!TargetRegisterInfo::isVirtualRegister(DstReg))
      continue;

    // Is LocNo extended to reach this copy? If not, another def may be blocking
    // it, or we are looking at a wrong value of LI.
    SlotIndex Idx = LIS.getInstructionIndex(MI);
    LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
    if (!I.valid() || I.value() != LocNo)
      continue;

    if (!LIS.hasInterval(DstReg))
      continue;
    LiveInterval *DstLI = &LIS.getInterval(DstReg);
    const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
    assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
    CopyValues.push_back(std::make_pair(DstLI, DstVNI));
  }

  if (CopyValues.empty())
    return;

  DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');

  // Try to add defs of the copied values for each kill point.
  for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
    SlotIndex Idx = Kills[i];
    for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) {
      LiveInterval *DstLI = CopyValues[j].first;
      const VNInfo *DstVNI = CopyValues[j].second;
      if (DstLI->getVNInfoAt(Idx) != DstVNI)
        continue;
      // Check that there isn't already a def at Idx
      LocMap::iterator I = locInts.find(Idx);
      if (I.valid() && I.start() <= Idx)
        continue;
      DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
                   << DstVNI->id << " in " << *DstLI << '\n');
      MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
      assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
      unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
      I.insert(Idx, Idx.getNextSlot(), LocNo);
      NewDefs.push_back(std::make_pair(Idx, LocNo));
      break;
    }
  }
}
开发者ID:DiamondLovesYou,项目名称:llvm,代码行数:70,代码来源:LiveDebugVariables.cpp

示例3: sharedTypeIDs

/// Compute the actions table and gather the first action index for each landing
/// pad site.
unsigned EHStreamer::
computeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
                    SmallVectorImpl<ActionEntry> &Actions,
                    SmallVectorImpl<unsigned> &FirstActions) {

  // The action table follows the call-site table in the LSDA. The individual
  // records are of two types:
  //
  //   * Catch clause
  //   * Exception specification
  //
  // The two record kinds have the same format, with only small differences.
  // They are distinguished by the "switch value" field: Catch clauses
  // (TypeInfos) have strictly positive switch values, and exception
  // specifications (FilterIds) have strictly negative switch values. Value 0
  // indicates a catch-all clause.
  //
  // Negative type IDs index into FilterIds. Positive type IDs index into
  // TypeInfos.  The value written for a positive type ID is just the type ID
  // itself.  For a negative type ID, however, the value written is the
  // (negative) byte offset of the corresponding FilterIds entry.  The byte
  // offset is usually equal to the type ID (because the FilterIds entries are
  // written using a variable width encoding, which outputs one byte per entry
  // as long as the value written is not too large) but can differ.  This kind
  // of complication does not occur for positive type IDs because type infos are
  // output using a fixed width encoding.  FilterOffsets[i] holds the byte
  // offset corresponding to FilterIds[i].

  const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
  SmallVector<int, 16> FilterOffsets;
  FilterOffsets.reserve(FilterIds.size());
  int Offset = -1;

  for (std::vector<unsigned>::const_iterator
         I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
    FilterOffsets.push_back(Offset);
    Offset -= getULEB128Size(*I);
  }

  FirstActions.reserve(LandingPads.size());

  int FirstAction = 0;
  unsigned SizeActions = 0;
  const LandingPadInfo *PrevLPI = nullptr;

  for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
         I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
    const LandingPadInfo *LPI = *I;
    const std::vector<int> &TypeIds = LPI->TypeIds;
    unsigned NumShared = PrevLPI ? sharedTypeIDs(LPI, PrevLPI) : 0;
    unsigned SizeSiteActions = 0;

    if (NumShared < TypeIds.size()) {
      unsigned SizeAction = 0;
      unsigned PrevAction = (unsigned)-1;

      if (NumShared) {
        unsigned SizePrevIds = PrevLPI->TypeIds.size();
        assert(Actions.size());
        PrevAction = Actions.size() - 1;
        SizeAction = getSLEB128Size(Actions[PrevAction].NextAction) +
                     getSLEB128Size(Actions[PrevAction].ValueForTypeID);

        for (unsigned j = NumShared; j != SizePrevIds; ++j) {
          assert(PrevAction != (unsigned)-1 && "PrevAction is invalid!");
          SizeAction -= getSLEB128Size(Actions[PrevAction].ValueForTypeID);
          SizeAction += -Actions[PrevAction].NextAction;
          PrevAction = Actions[PrevAction].Previous;
        }
      }

      // Compute the actions.
      for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
        int TypeID = TypeIds[J];
        assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
        int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
        unsigned SizeTypeID = getSLEB128Size(ValueForTypeID);

        int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
        SizeAction = SizeTypeID + getSLEB128Size(NextAction);
        SizeSiteActions += SizeAction;

        ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
        Actions.push_back(Action);
        PrevAction = Actions.size() - 1;
      }

      // Record the first action of the landing pad site.
      FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
    } // else identical - re-use previous FirstAction

    // Information used when created the call-site table. The action record
    // field of the call site record is the offset of the first associated
    // action record, relative to the start of the actions table. This value is
    // biased by 1 (1 indicating the start of the actions table), and 0
    // indicates that there are no actions.
    FirstActions.push_back(FirstAction);

//.........这里部分代码省略.........
开发者ID:JiadingGai,项目名称:llvm,代码行数:101,代码来源:EHStreamer.cpp

示例4: ClusterNeighboringLoads

/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
/// This function finds loads of the same base and different offsets. If the
/// offsets are not far apart (target specific), it add MVT::Glue inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
  SDNode *Chain = nullptr;
  unsigned NumOps = Node->getNumOperands();
  if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
    Chain = Node->getOperand(NumOps-1).getNode();
  if (!Chain)
    return;

  // Look for other loads of the same chain. Find loads that are loading from
  // the same base pointer and different offsets.
  SmallPtrSet<SDNode*, 16> Visited;
  SmallVector<int64_t, 4> Offsets;
  DenseMap<long long, SDNode*> O2SMap;  // Map from offset to SDNode.
  bool Cluster = false;
  SDNode *Base = Node;
  // This algorithm requires a reasonably low use count before finding a match
  // to avoid uselessly blowing up compile time in large blocks.
  unsigned UseCount = 0;
  for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
       I != E && UseCount < 100; ++I, ++UseCount) {
    SDNode *User = *I;
    if (User == Node || !Visited.insert(User).second)
      continue;
    int64_t Offset1, Offset2;
    if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
        Offset1 == Offset2)
      // FIXME: Should be ok if they addresses are identical. But earlier
      // optimizations really should have eliminated one of the loads.
      continue;
    if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
      Offsets.push_back(Offset1);
    O2SMap.insert(std::make_pair(Offset2, User));
    Offsets.push_back(Offset2);
    if (Offset2 < Offset1)
      Base = User;
    Cluster = true;
    // Reset UseCount to allow more matches.
    UseCount = 0;
  }

  if (!Cluster)
    return;

  // Sort them in increasing order.
  std::sort(Offsets.begin(), Offsets.end());

  // Check if the loads are close enough.
  SmallVector<SDNode*, 4> Loads;
  unsigned NumLoads = 0;
  int64_t BaseOff = Offsets[0];
  SDNode *BaseLoad = O2SMap[BaseOff];
  Loads.push_back(BaseLoad);
  for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
    int64_t Offset = Offsets[i];
    SDNode *Load = O2SMap[Offset];
    if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
      break; // Stop right here. Ignore loads that are further away.
    Loads.push_back(Load);
    ++NumLoads;
  }

  if (NumLoads == 0)
    return;

  // Cluster loads by adding MVT::Glue outputs and inputs. This also
  // ensure they are scheduled in order of increasing addresses.
  SDNode *Lead = Loads[0];
  SDValue InGlue = SDValue(nullptr, 0);
  if (AddGlue(Lead, InGlue, true, DAG))
    InGlue = SDValue(Lead, Lead->getNumValues() - 1);
  for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
    bool OutGlue = I < E - 1;
    SDNode *Load = Loads[I];

    // If AddGlue fails, we could leave an unsused glue value. This should not
    // cause any
    if (AddGlue(Load, InGlue, OutGlue, DAG)) {
      if (OutGlue)
        InGlue = SDValue(Load, Load->getNumValues() - 1);

      ++LoadsClustered;
    }
    else if (!OutGlue && InGlue.getNode())
      RemoveUnusedGlue(InGlue.getNode(), DAG);
  }
}
开发者ID:2asoft,项目名称:freebsd,代码行数:91,代码来源:ScheduleDAGSDNodes.cpp

示例5: emitScalarExistentialDowncast


//.........这里部分代码省略.........
      }
      case MetatypeRepresentation::ObjC:
        // Metatype is already an ObjC object.
        objcObject = value;
        break;
      }
    } else {
      // Class instance is already an ObjC object.
      objcObject = value;
    }
    if (objcObject)
      objcObject = IGF.Builder.CreateBitCast(objcObject,
                                             IGF.IGM.UnknownRefCountedPtrTy);
    
    // Pick the cast function based on the cast mode and on whether we're
    // casting a Swift metatype or ObjC object.
    llvm::Value *castFn;
    switch (mode) {
    case CheckedCastMode::Unconditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolUnconditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolUnconditionalFn();
      break;
    case CheckedCastMode::Conditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolConditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolConditionalFn();
      break;
    }
    llvm::Value *objcCastObject = objcObject ? objcObject : value;
    
    Address protoRefsBuf = IGF.createAlloca(
                                        llvm::ArrayType::get(IGF.IGM.Int8PtrTy,
                                                             objcProtos.size()),
                                        IGF.IGM.getPointerAlignment(),
                                        "objc_protocols");
    protoRefsBuf = IGF.Builder.CreateBitCast(protoRefsBuf,
                                             IGF.IGM.Int8PtrPtrTy);

    for (unsigned index : indices(objcProtos)) {
      Address protoRefSlot = IGF.Builder.CreateConstArrayGEP(
                                                     protoRefsBuf, index,
                                                     IGF.IGM.getPointerSize());
      IGF.Builder.CreateStore(objcProtos[index], protoRefSlot);
      ++index;
    }

    
    auto cc = IGF.IGM.DefaultCC;
    if (auto fun = dyn_cast<llvm::Function>(castFn))
      cc = fun->getCallingConv();


    auto call = IGF.Builder.CreateCall(
        castFn,
        {objcCastObject, IGF.IGM.getSize(Size(objcProtos.size())),
         protoRefsBuf.getAddress()});
    call->setCallingConv(cc);
    objcCast = call;
    resultValue = IGF.Builder.CreateBitCast(objcCast, resultType);
  }

  // If we don't need to look up any witness tables, we're done.
  if (witnessTableProtos.empty() && !checkClassConstraint) {
    ex.add(resultValue);
    return;
开发者ID:hiostreas,项目名称:swift,代码行数:67,代码来源:GenCast.cpp

示例6: ParseBlock

/// ParseBlock - Read a block, updating statistics, etc.
static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
  std::string Indent(IndentLevel*2, ' ');
  uint64_t BlockBitStart = Stream.GetCurrentBitNo();
  unsigned BlockID = Stream.ReadSubBlockID();

  // Get the statistics for this BlockID.
  PerBlockIDStats &BlockStats = BlockIDStats[BlockID];

  BlockStats.NumInstances++;

  // BLOCKINFO is a special part of the stream.
  if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
    if (Dump) errs() << Indent << "<BLOCKINFO_BLOCK/>\n";
    if (Stream.ReadBlockInfoBlock())
      return Error("Malformed BlockInfoBlock");
    uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
    BlockStats.NumBits += BlockBitEnd-BlockBitStart;
    return false;
  }

  unsigned NumWords = 0;
  if (Stream.EnterSubBlock(BlockID, &NumWords))
    return Error("Malformed block record");

  const char *BlockName = 0;
  if (Dump) {
    errs() << Indent << "<";
    if ((BlockName = GetBlockName(BlockID, *Stream.getBitStreamReader())))
      errs() << BlockName;
    else
      errs() << "UnknownBlock" << BlockID;

    if (NonSymbolic && BlockName)
      errs() << " BlockID=" << BlockID;

    errs() << " NumWords=" << NumWords
           << " BlockCodeSize=" << Stream.GetAbbrevIDWidth() << ">\n";
  }

  SmallVector<uint64_t, 64> Record;

  // Read all the records for this block.
  while (1) {
    if (Stream.AtEndOfStream())
      return Error("Premature end of bitstream");

    uint64_t RecordStartBit = Stream.GetCurrentBitNo();

    // Read the code for this record.
    unsigned AbbrevID = Stream.ReadCode();
    switch (AbbrevID) {
    case bitc::END_BLOCK: {
      if (Stream.ReadBlockEnd())
        return Error("Error at end of block");
      uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
      BlockStats.NumBits += BlockBitEnd-BlockBitStart;
      if (Dump) {
        errs() << Indent << "</";
        if (BlockName)
          errs() << BlockName << ">\n";
        else
          errs() << "UnknownBlock" << BlockID << ">\n";
      }
      return false;
    }
    case bitc::ENTER_SUBBLOCK: {
      uint64_t SubBlockBitStart = Stream.GetCurrentBitNo();
      if (ParseBlock(Stream, IndentLevel+1))
        return true;
      ++BlockStats.NumSubBlocks;
      uint64_t SubBlockBitEnd = Stream.GetCurrentBitNo();

      // Don't include subblock sizes in the size of this block.
      BlockBitStart += SubBlockBitEnd-SubBlockBitStart;
      break;
    }
    case bitc::DEFINE_ABBREV:
      Stream.ReadAbbrevRecord();
      ++BlockStats.NumAbbrevs;
      break;
    default:
      Record.clear();

      ++BlockStats.NumRecords;
      if (AbbrevID != bitc::UNABBREV_RECORD)
        ++BlockStats.NumAbbreviatedRecords;

      const char *BlobStart = 0;
      unsigned BlobLen = 0;
      unsigned Code = Stream.ReadRecord(AbbrevID, Record, BlobStart, BlobLen);



      // Increment the # occurrences of this code.
      if (BlockStats.CodeFreq.size() <= Code)
        BlockStats.CodeFreq.resize(Code+1);
      BlockStats.CodeFreq[Code].NumInstances++;
      BlockStats.CodeFreq[Code].TotalBits +=
        Stream.GetCurrentBitNo()-RecordStartBit;
//.........这里部分代码省略.........
开发者ID:Persper,项目名称:PTMC,代码行数:101,代码来源:llvm-bcanalyzer.cpp

示例7: incorporateFunction

void ValueEnumerator::incorporateFunction(const Function &F) {
  InstructionCount = 0;
  NumModuleValues = Values.size();
  NumModuleMDValues = MDValues.size();

  // Adding function arguments to the value table.
  for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
       I != E; ++I)
    EnumerateValue(I);

  FirstFuncConstantID = Values.size();

  // Add all function-level constants to the value table.
  for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
      for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
           OI != E; ++OI) {
        if ((isa<Constant>(*OI) && !isa<GlobalValue>(*OI)) ||
            isa<InlineAsm>(*OI))
          EnumerateValue(*OI);
      }
    BasicBlocks.push_back(BB);
    ValueMap[BB] = BasicBlocks.size();
  }

  // Optimize the constant layout.
  OptimizeConstants(FirstFuncConstantID, Values.size());

  // Add the function's parameter attributes so they are available for use in
  // the function's instruction.
  EnumerateAttributes(F.getAttributes());

  FirstInstID = Values.size();

  SmallVector<MDNode *, 8> FnLocalMDVector;
  // Add all of the instructions.
  for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
      for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
           OI != E; ++OI) {
        if (MDNode *MD = dyn_cast<MDNode>(*OI))
          if (MD->isFunctionLocal() && MD->getFunction())
            // Enumerate metadata after the instructions they might refer to.
            FnLocalMDVector.push_back(MD);
      }

      SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
      I->getAllMetadataOtherThanDebugLoc(MDs);
      for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
        MDNode *N = MDs[i].second;
        if (N->isFunctionLocal() && N->getFunction())
          FnLocalMDVector.push_back(N);
      }
        
      if (!I->getType()->isVoidTy())
        EnumerateValue(I);
    }
  }

  // Add all of the function-local metadata.
  for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i)
    EnumerateFunctionLocalMetadata(FnLocalMDVector[i]);
}
开发者ID:CartBlanche,项目名称:llvm,代码行数:63,代码来源:ValueEnumerator.cpp

示例8: recurseBasicBlock

// recurseBasicBlock() - This calculates the ProfileInfo estimation for a
// single block and then recurses into the successors.
// The algorithm preserves the flow condition, meaning that the sum of the
// weight of the incoming edges must be equal the block weight which must in
// turn be equal to the sume of the weights of the outgoing edges.
// Since the flow of an block is deterimined from the current state of the
// flow, once an edge has a flow assigned this flow is never changed again,
// otherwise it would be possible to violate the flow condition in another
// block.
void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {

  // Break the recursion if this BasicBlock was already visited.
  if (BBToVisit.find(BB) == BBToVisit.end()) return;

  // Read the LoopInfo for this block.
  bool  BBisHeader = LI->isLoopHeader(BB);
  Loop* BBLoop     = LI->getLoopFor(BB);

  // To get the block weight, read all incoming edges.
  double BBWeight = 0;
  std::set<BasicBlock*> ProcessedPreds;
  for ( pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
        bbi != bbe; ++bbi ) {
    // If this block was not considered already, add weight.
    Edge edge = getEdge(*bbi,BB);
    double w = getEdgeWeight(edge);
    if (ProcessedPreds.insert(*bbi).second) {
      BBWeight += ignoreMissing(w);
    }
    // If this block is a loop header and the predecessor is contained in this
    // loop, thus the edge is a backedge, continue and do not check if the
    // value is valid.
    if (BBisHeader && BBLoop->contains(*bbi)) {
      printEdgeError(edge, "but is backedge, continuing");
      continue;
    }
    // If the edges value is missing (and this is no loop header, and this is
    // no backedge) return, this block is currently non estimatable.
    if (w == MissingValue) {
      printEdgeError(edge, "returning");
      return;
    }
  }
  if (getExecutionCount(BB) != MissingValue) {
    BBWeight = getExecutionCount(BB);
  }

  // Fetch all necessary information for current block.
  SmallVector<Edge, 8> ExitEdges;
  SmallVector<Edge, 8> Edges;
  if (BBLoop) {
    BBLoop->getExitEdges(ExitEdges);
  }

  // If this is a loop header, consider the following:
  // Exactly the flow that is entering this block, must exit this block too. So
  // do the following: 
  // *) get all the exit edges, read the flow that is already leaving this
  // loop, remember the edges that do not have any flow on them right now.
  // (The edges that have already flow on them are most likely exiting edges of
  // other loops, do not touch those flows because the previously caclulated
  // loopheaders would not be exact anymore.)
  // *) In case there is not a single exiting edge left, create one at the loop
  // latch to prevent the flow from building up in the loop.
  // *) Take the flow that is not leaving the loop already and distribute it on
  // the remaining exiting edges.
  // (This ensures that all flow that enters the loop also leaves it.)
  // *) Increase the flow into the loop by increasing the weight of this block.
  // There is at least one incoming backedge that will bring us this flow later
  // on. (So that the flow condition in this node is valid again.)
  if (BBisHeader) {
    double incoming = BBWeight;
    // Subtract the flow leaving the loop.
    std::set<Edge> ProcessedExits;
    for (SmallVector<Edge, 8>::iterator ei = ExitEdges.begin(),
         ee = ExitEdges.end(); ei != ee; ++ei) {
      if (ProcessedExits.insert(*ei).second) {
        double w = getEdgeWeight(*ei);
        if (w == MissingValue) {
          Edges.push_back(*ei);
          // Check if there is a necessary minimal weight, if yes, subtract it 
          // from weight.
          if (MinimalWeight.find(*ei) != MinimalWeight.end()) {
            incoming -= MinimalWeight[*ei];
            DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n");
          }
        } else {
          incoming -= w;
        }
      }
    }
    // If no exit edges, create one:
    if (Edges.size() == 0) {
      BasicBlock *Latch = BBLoop->getLoopLatch();
      if (Latch) {
        Edge edge = getEdge(Latch,0);
        EdgeInformation[BB->getParent()][edge] = BBWeight;
        printEdgeWeight(edge);
        edge = getEdge(Latch, BB);
        EdgeInformation[BB->getParent()][edge] = BBWeight * ExecCount;
//.........这里部分代码省略.........
开发者ID:2014-class,项目名称:freerouter,代码行数:101,代码来源:ProfileEstimatorPass.cpp

示例9: CCInfo

/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
Cpu0TargetLowering::LowerFormalArguments(SDValue Chain,
                                         CallingConv::ID CallConv,
                                         bool isVarArg,
                                      const SmallVectorImpl<ISD::InputArg> &Ins,
                                         DebugLoc dl, SelectionDAG &DAG,
                                         SmallVectorImpl<SDValue> &InVals)
                                          const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  Cpu0FunctionInfo *Cpu0FI = MF.getInfo<Cpu0FunctionInfo>();

  Cpu0FI->setVarArgsFrameIndex(0);

  // Used with vargs to acumulate store chains.
  std::vector<SDValue> OutChains;

  // Assign locations to all of the incoming arguments.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
                 getTargetMachine(), ArgLocs, *DAG.getContext());
                         
  CCInfo.AnalyzeFormalArguments(Ins, CC_Cpu0);

  Function::const_arg_iterator FuncArg =
    DAG.getMachineFunction().getFunction()->arg_begin();
  int LastFI = 0;// Cpu0FI->LastInArgFI is 0 at the entry of this function.

  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) {
    CCValAssign &VA = ArgLocs[i];
    EVT ValVT = VA.getValVT();
    ISD::ArgFlagsTy Flags = Ins[i].Flags;
    bool IsRegLoc = VA.isRegLoc();

    if (Flags.isByVal()) {
      assert(Flags.getByValSize() &&
             "ByVal args of size 0 should have been ignored by front-end."); 
      continue;
    }
    // sanity check
    assert(VA.isMemLoc());

    // The stack pointer offset is relative to the caller stack frame.
    LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
                                    VA.getLocMemOffset(), true);

    // Create load nodes to retrieve arguments from the stack
    SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
    InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN,
                                 MachinePointerInfo::getFixedStack(LastFI),
                                 false, false, false, 0));
  }
  Cpu0FI->setLastInArgFI(LastFI);
  // All stores are grouped in one node to allow the matching between
  // the size of Ins and InVals. This only happens when on varg functions
  if (!OutChains.empty()) {
    OutChains.push_back(Chain);
    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                        &OutChains[0], OutChains.size());
  }
  return Chain;
}
开发者ID:ajamshidi,项目名称:lbd,代码行数:64,代码来源:Cpu0ISelLowering.cpp

示例10: if

/// ValueEnumerator - Enumerate module-level information.
ValueEnumerator::ValueEnumerator(const Module *M) {
  // Enumerate the global variables.
  for (Module::const_global_iterator I = M->global_begin(),
         E = M->global_end(); I != E; ++I)
    EnumerateValue(I);

  // Enumerate the functions.
  for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
    EnumerateValue(I);
    EnumerateAttributes(cast<Function>(I)->getAttributes());
  }

  // Enumerate the aliases.
  for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
       I != E; ++I)
    EnumerateValue(I);

  // Remember what is the cutoff between globalvalue's and other constants.
  unsigned FirstConstant = Values.size();

  // Enumerate the global variable initializers.
  for (Module::const_global_iterator I = M->global_begin(),
         E = M->global_end(); I != E; ++I)
    if (I->hasInitializer())
      EnumerateValue(I->getInitializer());

  // Enumerate the aliasees.
  for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
       I != E; ++I)
    EnumerateValue(I->getAliasee());

  // Insert constants and metadata that are named at module level into the slot 
  // pool so that the module symbol table can refer to them...
  EnumerateValueSymbolTable(M->getValueSymbolTable());
  EnumerateNamedMetadata(M);

  SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;

  // Enumerate types used by function bodies and argument lists.
  for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {

    for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
         I != E; ++I)
      EnumerateType(I->getType());

    for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){
        for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
             OI != E; ++OI) {
          if (MDNode *MD = dyn_cast<MDNode>(*OI))
            if (MD->isFunctionLocal() && MD->getFunction())
              // These will get enumerated during function-incorporation.
              continue;
          EnumerateOperandType(*OI);
        }
        EnumerateType(I->getType());
        if (const CallInst *CI = dyn_cast<CallInst>(I))
          EnumerateAttributes(CI->getAttributes());
        else if (const InvokeInst *II = dyn_cast<InvokeInst>(I))
          EnumerateAttributes(II->getAttributes());

        // Enumerate metadata attached with this instruction.
        MDs.clear();
        I->getAllMetadataOtherThanDebugLoc(MDs);
        for (unsigned i = 0, e = MDs.size(); i != e; ++i)
          EnumerateMetadata(MDs[i].second);
        
        if (!I->getDebugLoc().isUnknown()) {
          MDNode *Scope, *IA;
          I->getDebugLoc().getScopeAndInlinedAt(Scope, IA, I->getContext());
          if (Scope) EnumerateMetadata(Scope);
          if (IA) EnumerateMetadata(IA);
        }
      }
  }

  // Optimize constant ordering.
  OptimizeConstants(FirstConstant, Values.size());
}
开发者ID:CartBlanche,项目名称:llvm,代码行数:80,代码来源:ValueEnumerator.cpp

示例11: ParseAsmStatement


//.........这里部分代码省略.........
    SkipUntil(tok::r_paren, StopAtSemi);
    return StmtError();
  }

  if (Tok.isNot(tok::l_paren)) {
    Diag(Tok, diag::err_expected_lparen_after) << "asm";
    SkipUntil(tok::r_paren, StopAtSemi);
    return StmtError();
  }
  BalancedDelimiterTracker T(*this, tok::l_paren);
  T.consumeOpen();

  ExprResult AsmString(ParseAsmStringLiteral());

  // Check if GNU-style InlineAsm is disabled.
  // Error on anything other than empty string.
  if (!(getLangOpts().GNUAsm || AsmString.isInvalid())) {
    const auto *SL = cast<StringLiteral>(AsmString.get());
    if (!SL->getString().trim().empty())
      Diag(Loc, diag::err_gnu_inline_asm_disabled);
  }

  if (AsmString.isInvalid()) {
    // Consume up to and including the closing paren.
    T.skipToEnd();
    return StmtError();
  }

  SmallVector<IdentifierInfo *, 4> Names;
  ExprVector Constraints;
  ExprVector Exprs;
  ExprVector Clobbers;

  if (Tok.is(tok::r_paren)) {
    // We have a simple asm expression like 'asm("foo")'.
    T.consumeClose();
    return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
                                   /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr,
                                   Constraints, Exprs, AsmString.get(),
                                   Clobbers, T.getCloseLocation());
  }

  // Parse Outputs, if present.
  bool AteExtraColon = false;
  if (Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
    // In C++ mode, parse "::" like ": :".
    AteExtraColon = Tok.is(tok::coloncolon);
    ConsumeToken();

    if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
      return StmtError();
  }

  unsigned NumOutputs = Names.size();

  // Parse Inputs, if present.
  if (AteExtraColon || Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
    // In C++ mode, parse "::" like ": :".
    if (AteExtraColon)
      AteExtraColon = false;
    else {
      AteExtraColon = Tok.is(tok::coloncolon);
      ConsumeToken();
    }

    if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
      return StmtError();
  }

  assert(Names.size() == Constraints.size() &&
         Constraints.size() == Exprs.size() && "Input operand size mismatch!");

  unsigned NumInputs = Names.size() - NumOutputs;

  // Parse the clobbers, if present.
  if (AteExtraColon || Tok.is(tok::colon)) {
    if (!AteExtraColon)
      ConsumeToken();

    // Parse the asm-string list for clobbers if present.
    if (Tok.isNot(tok::r_paren)) {
      while (1) {
        ExprResult Clobber(ParseAsmStringLiteral());

        if (Clobber.isInvalid())
          break;

        Clobbers.push_back(Clobber.get());

        if (!TryConsumeToken(tok::comma))
          break;
      }
    }
  }

  T.consumeClose();
  return Actions.ActOnGCCAsmStmt(
      AsmLoc, false, isVolatile, NumOutputs, NumInputs, Names.data(),
      Constraints, Exprs, AsmString.get(), Clobbers, T.getCloseLocation());
}
开发者ID:FreeBSDFoundation,项目名称:freebsd,代码行数:101,代码来源:ParseStmtAsm.cpp

示例12: spillAroundUses

/// spillAroundUses - insert spill code around each use of Reg.
void InlineSpiller::spillAroundUses(unsigned Reg) {
  DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
  LiveInterval &OldLI = LIS.getInterval(Reg);

  // Iterate over instructions using Reg.
  for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg);
       MachineInstr *MI = RegI.skipBundle();) {

    // Debug values are not allowed to affect codegen.
    if (MI->isDebugValue()) {
      // Modify DBG_VALUE now that the value is in a spill slot.
      uint64_t Offset = MI->getOperand(1).getImm();
      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
      DebugLoc DL = MI->getDebugLoc();
      DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
      MachineBasicBlock *MBB = MI->getParent();
      BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
          .addFrameIndex(StackSlot).addImm(Offset).addMetadata(MDPtr);
      continue;
    }

    // Ignore copies to/from snippets. We'll delete them.
    if (SnippetCopies.count(MI))
      continue;

    // Stack slot accesses may coalesce away.
    if (coalesceStackAccess(MI, Reg))
      continue;

    // Analyze instruction.
    SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
    MIBundleOperands::VirtRegInfo RI =
      MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);

    // Find the slot index where this instruction reads and writes OldLI.
    // This is usually the def slot, except for tied early clobbers.
    SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
    if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
      if (SlotIndex::isSameInstr(Idx, VNI->def))
        Idx = VNI->def;

    // Check for a sibling copy.
    unsigned SibReg = isFullCopyOf(MI, Reg);
    if (SibReg && isSibling(SibReg)) {
      // This may actually be a copy between snippets.
      if (isRegToSpill(SibReg)) {
        DEBUG(dbgs() << "Found new snippet copy: " << *MI);
        SnippetCopies.insert(MI);
        continue;
      }
      if (RI.Writes) {
        // Hoist the spill of a sib-reg copy.
        if (hoistSpill(OldLI, MI)) {
          // This COPY is now dead, the value is already in the stack slot.
          MI->getOperand(0).setIsDead();
          DeadDefs.push_back(MI);
          continue;
        }
      } else {
        // This is a reload for a sib-reg copy. Drop spills downstream.
        LiveInterval &SibLI = LIS.getInterval(SibReg);
        eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
        // The COPY will fold to a reload below.
      }
    }

    // Attempt to fold memory ops.
    if (foldMemoryOperand(Ops))
      continue;

    // Allocate interval around instruction.
    // FIXME: Infer regclass from instruction alone.
    LiveInterval &NewLI = Edit->createFrom(Reg);
    NewLI.markNotSpillable();

    if (RI.Reads)
      insertReload(NewLI, Idx, MI);

    // Rewrite instruction operands.
    bool hasLiveDef = false;
    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
      MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
      MO.setReg(NewLI.reg);
      if (MO.isUse()) {
        if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second))
          MO.setIsKill();
      } else {
        if (!MO.isDead())
          hasLiveDef = true;
      }
    }
    DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);

    // FIXME: Use a second vreg if instruction has no tied ops.
    if (RI.Writes) {
      if (hasLiveDef)
        insertSpill(NewLI, OldLI, Idx, MI);
      else {
        // This instruction defines a dead value.  We don't need to spill it,
//.........这里部分代码省略.........
开发者ID:blunted2night,项目名称:llvm,代码行数:101,代码来源:InlineSpiller.cpp

示例13: ReplaceInstUsesWith


//.........这里部分代码省略.........
      // insert our getelementptr instruction...
      //
      Type *IdxTy = DL
                  ? DL->getIntPtrType(AI.getType())
                  : Type::getInt64Ty(AI.getContext());
      Value *NullIdx = Constant::getNullValue(IdxTy);
      Value *Idx[2] = { NullIdx, NullIdx };
      Instruction *GEP =
        GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
      InsertNewInstBefore(GEP, *It);

      // Now make everything use the getelementptr instead of the original
      // allocation.
      return ReplaceInstUsesWith(AI, GEP);
    } else if (isa<UndefValue>(AI.getArraySize())) {
      return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
    }
  }

  if (DL && AI.getAllocatedType()->isSized()) {
    // If the alignment is 0 (unspecified), assign it the preferred alignment.
    if (AI.getAlignment() == 0)
      AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));

    // Move all alloca's of zero byte objects to the entry block and merge them
    // together.  Note that we only do this for alloca's, because malloc should
    // allocate and return a unique pointer, even for a zero byte allocation.
    if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
      // For a zero sized alloca there is no point in doing an array allocation.
      // This is helpful if the array size is a complicated expression not used
      // elsewhere.
      if (AI.isArrayAllocation()) {
        AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
        return &AI;
      }

      // Get the first instruction in the entry block.
      BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
      Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
      if (FirstInst != &AI) {
        // If the entry block doesn't start with a zero-size alloca then move
        // this one to the start of the entry block.  There is no problem with
        // dominance as the array size was forced to a constant earlier already.
        AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
        if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
            DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
          AI.moveBefore(FirstInst);
          return &AI;
        }

        // If the alignment of the entry block alloca is 0 (unspecified),
        // assign it the preferred alignment.
        if (EntryAI->getAlignment() == 0)
          EntryAI->setAlignment(
            DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
        // Replace this zero-sized alloca with the one at the start of the entry
        // block after ensuring that the address will be aligned enough for both
        // types.
        unsigned MaxAlign = std::max(EntryAI->getAlignment(),
                                     AI.getAlignment());
        EntryAI->setAlignment(MaxAlign);
        if (AI.getType() != EntryAI->getType())
          return new BitCastInst(EntryAI, AI.getType());
        return ReplaceInstUsesWith(AI, EntryAI);
      }
    }
  }

  if (AI.getAlignment()) {
    // Check to see if this allocation is only modified by a memcpy/memmove from
    // a constant global whose alignment is equal to or exceeds that of the
    // allocation.  If this is the case, we can change all users to use
    // the constant global instead.  This is commonly produced by the CFE by
    // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
    // is only subsequently read.
    SmallVector<Instruction *, 4> ToDelete;
    if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
      unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
                                                        AI.getAlignment(),
                                                        DL, AT, &AI, DT);
      if (AI.getAlignment() <= SourceAlign) {
        DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
        DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
        for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
          EraseInstFromFunction(*ToDelete[i]);
        Constant *TheSrc = cast<Constant>(Copy->getSource());
        Constant *Cast
          = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
        Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
        EraseInstFromFunction(*Copy);
        ++NumGlobalCopies;
        return NewI;
      }
    }
  }

  // At last, use the generic allocation site handler to aggressively remove
  // unused allocas.
  return visitAllocSite(AI);
}
开发者ID:Matthewxie,项目名称:llvm-1,代码行数:101,代码来源:InstCombineLoadStoreAlloca.cpp

示例14: reMaterializeFor

/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
                                     MachineBasicBlock::iterator MI) {
  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());

  if (!ParentVNI) {
    DEBUG(dbgs() << "\tadding <undef> flags: ");
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      MachineOperand &MO = MI->getOperand(i);
      if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
        MO.setIsUndef();
    }
    DEBUG(dbgs() << UseIdx << '\t' << *MI);
    return true;
  }

  if (SnippetCopies.count(MI))
    return false;

  // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
  LiveRangeEdit::Remat RM(ParentVNI);
  SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
  if (SibI != SibValues.end())
    RM.OrigMI = SibI->second.DefMI;
  if (!Edit->canRematerializeAt(RM, UseIdx, false)) {
    markValueUsed(&VirtReg, ParentVNI);
    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
    return false;
  }

  // If the instruction also writes VirtReg.reg, it had better not require the
  // same register for uses and defs.
  SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
  MIBundleOperands::VirtRegInfo RI =
    MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
  if (RI.Tied) {
    markValueUsed(&VirtReg, ParentVNI);
    DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
    return false;
  }

  // Before rematerializing into a register for a single instruction, try to
  // fold a load into the instruction. That avoids allocating a new register.
  if (RM.OrigMI->canFoldAsLoad() &&
      foldMemoryOperand(Ops, RM.OrigMI)) {
    Edit->markRematerialized(RM.ParentVNI);
    ++NumFoldedLoads;
    return true;
  }

  // Alocate a new register for the remat.
  LiveInterval &NewLI = Edit->createFrom(Original);
  NewLI.markNotSpillable();

  // Finally we can rematerialize OrigMI before MI.
  SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
                                           TRI);
  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t'
               << *LIS.getInstructionFromIndex(DefIdx));

  // Replace operands
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(Ops[i].second);
    if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
      MO.setReg(NewLI.reg);
      MO.setIsKill();
    }
  }
  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);

  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, LIS.getVNInfoAllocator());
  NewLI.addRange(LiveRange(DefIdx, UseIdx.getRegSlot(), DefVNI));
  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
  ++NumRemats;
  return true;
}
开发者ID:blunted2night,项目名称:llvm,代码行数:77,代码来源:InlineSpiller.cpp

示例15: ActOnGCCAsmStmt

StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
                                 bool IsVolatile, unsigned NumOutputs,
                                 unsigned NumInputs, IdentifierInfo **Names,
                                 MultiExprArg constraints, MultiExprArg Exprs,
                                 Expr *asmString, MultiExprArg clobbers,
                                 SourceLocation RParenLoc) {
  unsigned NumClobbers = clobbers.size();
  StringLiteral **Constraints =
    reinterpret_cast<StringLiteral**>(constraints.data());
  StringLiteral *AsmString = cast<StringLiteral>(asmString);
  StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.data());

  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;

  // The parser verifies that there is a string literal here.
  assert(AsmString->isAscii());

  for (unsigned i = 0; i != NumOutputs; i++) {
    StringLiteral *Literal = Constraints[i];
    assert(Literal->isAscii());

    StringRef OutputName;
    if (Names[i])
      OutputName = Names[i]->getName();

    TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
    if (!Context.getTargetInfo().validateOutputConstraint(Info))
      return StmtError(Diag(Literal->getLocStart(),
                            diag::err_asm_invalid_output_constraint)
                       << Info.getConstraintStr());

    ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
    if (ER.isInvalid())
      return StmtError();
    Exprs[i] = ER.get();

    // Check that the output exprs are valid lvalues.
    Expr *OutputExpr = Exprs[i];

    // Referring to parameters is not allowed in naked functions.
    if (CheckNakedParmReference(OutputExpr, *this))
      return StmtError();

    OutputConstraintInfos.push_back(Info);

    // If this is dependent, just continue.
    if (OutputExpr->isTypeDependent())
      continue;

    Expr::isModifiableLvalueResult IsLV =
        OutputExpr->isModifiableLvalue(Context, /*Loc=*/nullptr);
    switch (IsLV) {
    case Expr::MLV_Valid:
      // Cool, this is an lvalue.
      break;
    case Expr::MLV_ArrayType:
      // This is OK too.
      break;
    case Expr::MLV_LValueCast: {
      const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
      if (!getLangOpts().HeinousExtensions) {
        Diag(LVal->getLocStart(), diag::err_invalid_asm_cast_lvalue)
            << OutputExpr->getSourceRange();
      } else {
        Diag(LVal->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
            << OutputExpr->getSourceRange();
      }
      // Accept, even if we emitted an error diagnostic.
      break;
    }
    case Expr::MLV_IncompleteType:
    case Expr::MLV_IncompleteVoidType:
      if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
                              diag::err_dereference_incomplete_type))
        return StmtError();
    default:
      return StmtError(Diag(OutputExpr->getLocStart(),
                            diag::err_asm_invalid_lvalue_in_output)
                       << OutputExpr->getSourceRange());
    }

    unsigned Size = Context.getTypeSize(OutputExpr->getType());
    if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
                                                    Size))
      return StmtError(Diag(OutputExpr->getLocStart(),
                            diag::err_asm_invalid_output_size)
                       << Info.getConstraintStr());
  }

  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;

  for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
    StringLiteral *Literal = Constraints[i];
    assert(Literal->isAscii());

    StringRef InputName;
    if (Names[i])
      InputName = Names[i]->getName();

    TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
//.........这里部分代码省略.........
开发者ID:RobertoMalatesta,项目名称:cheerp-clang,代码行数:101,代码来源:SemaStmtAsm.cpp


注:本文中的SmallVector::size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。