当前位置: 首页>>代码示例>>C++>>正文


C++ MapVector类代码示例

本文整理汇总了C++中MapVector的典型用法代码示例。如果您正苦于以下问题:C++ MapVector类的具体用法?C++ MapVector怎么用?C++ MapVector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MapVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: getPointer

// get or create value
Pointer getPointer(Module *M, const char *name, int64_t off)
{
    Value *va;

    MapVector<TestPointer, Pointer>::iterator E = valueMap.end();
    Pointer& p = valueMap[TestPointer(name, off)];

    // if the last operation increased the size of valueMap, then
    // the pointer does not exist
    if (E != valueMap.end()) {
        // use always the same llvm value
        std::map<const char *, Value *>::iterator VI = llvmValues.find(name);
        if (VI == llvmValues.end()) {
            if (strcmp(name, "null") == 0)
                va = ConstantPointerNull::get(llvm::Type::getInt32PtrTy(M->getContext()));
            else
                va = new GlobalVariable(*M, IntegerType::get(M->getContext(), 32),
                                        false,
                                        GlobalValue::CommonLinkage, 0 , name);
            llvmValues.insert(std::make_pair(name, va));
        } else {
            va = VI->second;
        }

	// copy it to map (p is reference)
        p = Pointer(va, off);
    }

    return p;
}
开发者ID:chubbymaggie,项目名称:LLVMSlicer,代码行数:31,代码来源:PTGTester.cpp

示例2: ini

int IniFile::ReadAndFillTDC(){
    std::ifstream ini( sFileName.c_str() );
    std::stringstream parser;
    std::string token, value, line, group;
    iError = INI_OK;
    // Loading the file into the parser
    if( ini ){
        parser << ini.rdbuf();
        ini.close();
    } else {
        iError = INI_ERROR_CANNOT_OPEN_READ_FILE;
        return iError;
    }
    group = "";
    TdcMap mapTDC;
    MapVector mapVector;
    while( std::getline( parser, line ) && ( iError == INI_OK ) ){
            // Check if the line is comment
            if( !CheckIfComment( line ) ){
                // Check for group
                if( !CheckIfGroup( line, group ) ){
                    // Check for token
                    if( CheckIfToken( line, token, value ) ){
                        // Make the key in format group.key if the group is not empty
                        //if( group.size() > 1 ) token = group + "." + token;
                        mData[ token ] = value;
                    }   else {
                        iError = INI_ERROR_WRONG_FORMAT;
                        return iError;
                    }
                }
                else{
                  mapVector.push_back(mData);
                  mData.clear();
                }
            }
        }
    mapVector.push_back(mData);



    for(int i=3 ; i<mapVector.size() ; i++){
      TDC tempTDC;
      tempTDC.SetName(mapVector[i]["Name"]);
      //std::cout<<"TriggerWindowWidth : "<<mapVector[i]["TriggerWindowWidth"]<<std::endl;
      tempTDC.SetTriggerWindowWidth(std::stoi(mapVector[i]["TriggerWindowWidth"], nullptr,10 ));
      tempTDC.SetTriggerWindowOffset(std::stoi(mapVector[i]["TriggerWindowOffset"], nullptr,10 ));
      tempTDC.SetTriggerExtraSearchMargin(std::stoi(mapVector[i]["TriggerExtraSearchMargin"], nullptr,10 ));
      tempTDC.SetTriggerRejectMargin(std::stoi(mapVector[i]["TriggerRejectMargin"], nullptr,10 ));
      tempTDC.SetEnableTriggerTimeSubstraction(std::stoi(mapVector[i]["EnableTriggerTimeSubstraction"], nullptr,10 ));
      tempTDC.SetIndividualLSB(std::stoi(mapVector[i]["IndividualLSB"], nullptr,10 ));

      fTdcVector.push_back(tempTDC);
      //std::cout<<"Tdc-NName : "<<mapVector[i]["Name"]<<std::endl;
      //std::cout<<"TDC-NAME : "<<mapVector[i]["Name"]<<std::endl;
    }

    std::cout<<"Num of Groups in INI file : "<<mapVector.size()<<std::endl;

}
开发者ID:vatsal512,项目名称:MuonTomography,代码行数:60,代码来源:IniFile.cpp

示例3: myMap

MatrixEpetraStructured<DataType>::MatrixEpetraStructured ( const MapVector<MapEpetra>& vector, int numEntries, bool ignoreNonLocalValues ) :
    MatrixEpetra<DataType> ( typename MatrixEpetra<DataType>::matrix_ptrtype() ), M_blockStructure ( vector )
{
    ASSERT ( vector.nbMap() > 0 , "Map vector empty, impossible to construct a MatrixBlockMonolithicEpetra!" );

    MapEpetra myMap ( vector.totalMap() );

    this->mapPtr().reset ( new MapEpetra ( myMap ) );
    this->matrixPtr().reset ( new typename MatrixEpetra<DataType>::matrix_type ( Copy, *myMap.map ( Unique ), numEntries, ignoreNonLocalValues ) );
}
开发者ID:Danniel-UCAS,项目名称:lifev,代码行数:10,代码来源:MatrixEpetraStructured.hpp

示例4: totalSize

void
MatrixBlockMonolithicEpetra<DataType>::setBlockStructure (const MapVector<MapEpetra>& mapVector)
{
    ASSERT ( mapVector.nbMap() > 0 , "Map vector empty, impossible to set the block structure");

    M_blockNumRows.resize (mapVector.nbMap() );
    M_blockNumColumns.resize (mapVector.nbMap() );

    M_blockFirstRows.resize (mapVector.nbMap() );
    M_blockFirstColumns.resize (mapVector.nbMap() );

    UInt totalSize (0);

    for (UInt i (0); i < mapVector.nbMap(); ++i)
    {
        M_blockNumRows[i] = mapVector.mapSize (i);
        M_blockNumColumns[i] = mapVector.mapSize (i);

        M_blockFirstRows[i] = totalSize;
        M_blockFirstColumns[i] = totalSize;

        totalSize += mapVector.mapSize (i);
    }

    ASSERT ( this->matrixPtr()->NumGlobalCols() == totalSize, " Incompatible block structure (global size does not match) ");
    ASSERT ( this->matrixPtr()->NumGlobalRows() == totalSize, " Incompatible block structure (global size does not match) ");
}
开发者ID:lifev,项目名称:lifev,代码行数:27,代码来源:MatrixBlockMonolithicEpetra.hpp

示例5: writeIndex

static void
writeIndex(MCStreamer &Out, MCSection *Section,
           ArrayRef<unsigned> ContributionOffsets,
           const MapVector<uint64_t, UnitIndexEntry> &IndexEntries) {
  if (IndexEntries.empty())
    return;

  unsigned Columns = 0;
  for (auto &C : ContributionOffsets)
    if (C)
      ++Columns;

  std::vector<unsigned> Buckets(NextPowerOf2(3 * IndexEntries.size() / 2));
  uint64_t Mask = Buckets.size() - 1;
  size_t i = 0;
  for (const auto &P : IndexEntries) {
    auto S = P.first;
    auto H = S & Mask;
    auto HP = ((S >> 32) & Mask) | 1;
    while (Buckets[H]) {
      assert(S != IndexEntries.begin()[Buckets[H] - 1].first &&
             "Duplicate unit");
      H = (H + HP) & Mask;
    }
    Buckets[H] = i + 1;
    ++i;
  }

  Out.SwitchSection(Section);
  Out.EmitIntValue(2, 4);                   // Version
  Out.EmitIntValue(Columns, 4);             // Columns
  Out.EmitIntValue(IndexEntries.size(), 4); // Num Units
  Out.EmitIntValue(Buckets.size(), 4);      // Num Buckets

  // Write the signatures.
  for (const auto &I : Buckets)
    Out.EmitIntValue(I ? IndexEntries.begin()[I - 1].first : 0, 8);

  // Write the indexes.
  for (const auto &I : Buckets)
    Out.EmitIntValue(I, 4);

  // Write the column headers (which sections will appear in the table)
  for (size_t i = 0; i != ContributionOffsets.size(); ++i)
    if (ContributionOffsets[i])
      Out.EmitIntValue(i + DW_SECT_INFO, 4);

  // Write the offsets.
  writeIndexTable(Out, ContributionOffsets, IndexEntries,
                  &DWARFUnitIndex::Entry::SectionContribution::Offset);

  // Write the lengths.
  writeIndexTable(Out, ContributionOffsets, IndexEntries,
                  &DWARFUnitIndex::Entry::SectionContribution::Length);
}
开发者ID:AnachroNia,项目名称:llvm,代码行数:55,代码来源:llvm-dwp.cpp

示例6: TEST

TEST(MapVectorTest, insert) {
  MapVector<int, int> MV;
  std::pair<MapVector<int, int>::iterator, bool> R;

  R = MV.insert(std::make_pair(1, 2));
  ASSERT_EQ(R.first, MV.begin());
  EXPECT_EQ(R.first->first, 1);
  EXPECT_EQ(R.first->second, 2);
  EXPECT_TRUE(R.second);

  R = MV.insert(std::make_pair(1, 3));
  ASSERT_EQ(R.first, MV.begin());
  EXPECT_EQ(R.first->first, 1);
  EXPECT_EQ(R.first->second, 2);
  EXPECT_FALSE(R.second);

  R = MV.insert(std::make_pair(4, 5));
  ASSERT_NE(R.first, MV.end());
  EXPECT_EQ(R.first->first, 4);
  EXPECT_EQ(R.first->second, 5);
  EXPECT_TRUE(R.second);

  EXPECT_EQ(MV.size(), 2u);
  EXPECT_EQ(MV[1], 2);
  EXPECT_EQ(MV[4], 5);
}
开发者ID:Midas8181919,项目名称:llvm,代码行数:26,代码来源:MapVectorTest.cpp

示例7: addAllTypesFromDWP

static void addAllTypesFromDWP(
    MCStreamer &Out, MapVector<uint64_t, UnitIndexEntry> &TypeIndexEntries,
    const DWARFUnitIndex &TUIndex, MCSection *OutputTypes, StringRef Types,
    const UnitIndexEntry &TUEntry, uint32_t &TypesOffset) {
  Out.SwitchSection(OutputTypes);
  for (const DWARFUnitIndex::Entry &E : TUIndex.getRows()) {
    auto *I = E.getOffsets();
    if (!I)
      continue;
    auto P = TypeIndexEntries.insert(std::make_pair(E.getSignature(), TUEntry));
    if (!P.second)
      continue;
    auto &Entry = P.first->second;
    // Zero out the debug_info contribution
    Entry.Contributions[0] = {};
    for (auto Kind : TUIndex.getColumnKinds()) {
      auto &C = Entry.Contributions[Kind - DW_SECT_INFO];
      C.Offset += I->Offset;
      C.Length = I->Length;
      ++I;
    }
    auto &C = Entry.Contributions[DW_SECT_TYPES - DW_SECT_INFO];
    Out.EmitBytes(Types.substr(
        C.Offset - TUEntry.Contributions[DW_SECT_TYPES - DW_SECT_INFO].Offset,
        C.Length));
    C.Offset = TypesOffset;
    TypesOffset += C.Length;
  }
}
开发者ID:AnachroNia,项目名称:llvm,代码行数:29,代码来源:llvm-dwp.cpp

示例8: addAllTypes

static void addAllTypes(MCStreamer &Out,
                        MapVector<uint64_t, UnitIndexEntry> &TypeIndexEntries,
                        MCSection *OutputTypes,
                        const std::vector<StringRef> &TypesSections,
                        const UnitIndexEntry &CUEntry, uint32_t &TypesOffset) {
  for (StringRef Types : TypesSections) {
    Out.SwitchSection(OutputTypes);
    uint32_t Offset = 0;
    DataExtractor Data(Types, true, 0);
    while (Data.isValidOffset(Offset)) {
      UnitIndexEntry Entry = CUEntry;
      // Zero out the debug_info contribution
      Entry.Contributions[0] = {};
      auto &C = Entry.Contributions[DW_SECT_TYPES - DW_SECT_INFO];
      C.Offset = TypesOffset;
      auto PrevOffset = Offset;
      // Length of the unit, including the 4 byte length field.
      C.Length = Data.getU32(&Offset) + 4;

      Data.getU16(&Offset); // Version
      Data.getU32(&Offset); // Abbrev offset
      Data.getU8(&Offset);  // Address size
      auto Signature = Data.getU64(&Offset);
      Offset = PrevOffset + C.Length;

      auto P = TypeIndexEntries.insert(std::make_pair(Signature, Entry));
      if (!P.second)
        continue;

      Out.EmitBytes(Types.substr(PrevOffset, C.Length));
      TypesOffset += C.Length;
    }
  }
}
开发者ID:AnachroNia,项目名称:llvm,代码行数:34,代码来源:llvm-dwp.cpp

示例9: TEST

TEST(MapVectorTest, erase) {
  MapVector<int, int> MV;

  MV.insert(std::make_pair(1, 2));
  MV.insert(std::make_pair(3, 4));
  MV.insert(std::make_pair(5, 6));
  ASSERT_EQ(MV.size(), 3u);

  MV.erase(MV.find(1));
  ASSERT_EQ(MV.size(), 2u);
  ASSERT_EQ(MV.find(1), MV.end());
  ASSERT_EQ(MV[3], 4);
  ASSERT_EQ(MV[5], 6);
}
开发者ID:0xDEC0DE8,项目名称:mcsema,代码行数:14,代码来源:MapVectorTest.cpp

示例10: ASSERT

void
MatrixEpetraStructured<DataType>::setBlockStructure ( const MapVector<MapEpetra>& mapVector )
{
    ASSERT ( mapVector.nbMap() > 0 , "Map vector empty, impossible to set the block structure" );

    M_blockStructure.setBlockStructure ( mapVector );

    ASSERT ( this->matrixPtr()->NumGlobalCols() == M_blockStructure.numRows(), " Incompatible block structure (global size does not match) " );
    ASSERT ( this->matrixPtr()->NumGlobalRows() == M_blockStructure.numColumns(), " Incompatible block structure (global size does not match) " );
}
开发者ID:Danniel-UCAS,项目名称:lifev,代码行数:10,代码来源:MatrixEpetraStructured.hpp

示例11: MapMemberName

int DialogReaderWriter::MapMemberName(MapVector& aVector, string& aName)
{
	int wId;

	CaseValues* wCase;
	for(int i = 0; i < aVector.size(); i++)
	{
		wCase = aVector[i];
		wId = aVector[i]->isMatch(aName);
		if(wId > -1)
			return wId;
	}
	return -1;
}
开发者ID:hoshi89,项目名称:Forgotten,代码行数:14,代码来源:DialogReaderWriter.cpp

示例12: LLVM_DEBUG

// Analyze interleaved accesses and collect them into interleaved load and
// store groups.
//
// When generating code for an interleaved load group, we effectively hoist all
// loads in the group to the location of the first load in program order. When
// generating code for an interleaved store group, we sink all stores to the
// location of the last store. This code motion can change the order of load
// and store instructions and may break dependences.
//
// The code generation strategy mentioned above ensures that we won't violate
// any write-after-read (WAR) dependences.
//
// E.g., for the WAR dependence:  a = A[i];      // (1)
//                                A[i] = b;      // (2)
//
// The store group of (2) is always inserted at or below (2), and the load
// group of (1) is always inserted at or above (1). Thus, the instructions will
// never be reordered. All other dependences are checked to ensure the
// correctness of the instruction reordering.
//
// The algorithm visits all memory accesses in the loop in bottom-up program
// order. Program order is established by traversing the blocks in the loop in
// reverse postorder when collecting the accesses.
//
// We visit the memory accesses in bottom-up order because it can simplify the
// construction of store groups in the presence of write-after-write (WAW)
// dependences.
//
// E.g., for the WAW dependence:  A[i] = a;      // (1)
//                                A[i] = b;      // (2)
//                                A[i + 1] = c;  // (3)
//
// We will first create a store group with (3) and (2). (1) can't be added to
// this group because it and (2) are dependent. However, (1) can be grouped
// with other accesses that may precede it in program order. Note that a
// bottom-up order does not imply that WAW dependences should not be checked.
void InterleavedAccessInfo::analyzeInterleaving(
                                 bool EnablePredicatedInterleavedMemAccesses) {
  LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
  const ValueToValueMap &Strides = LAI->getSymbolicStrides();

  // Holds all accesses with a constant stride.
  MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
  collectConstStrideAccesses(AccessStrideInfo, Strides);

  if (AccessStrideInfo.empty())
    return;

  // Collect the dependences in the loop.
  collectDependences();

  // Holds all interleaved store groups temporarily.
  SmallSetVector<InterleaveGroup *, 4> StoreGroups;
  // Holds all interleaved load groups temporarily.
  SmallSetVector<InterleaveGroup *, 4> LoadGroups;

  // Search in bottom-up program order for pairs of accesses (A and B) that can
  // form interleaved load or store groups. In the algorithm below, access A
  // precedes access B in program order. We initialize a group for B in the
  // outer loop of the algorithm, and then in the inner loop, we attempt to
  // insert each A into B's group if:
  //
  //  1. A and B have the same stride,
  //  2. A and B have the same memory object size, and
  //  3. A belongs in B's group according to its distance from B.
  //
  // Special care is taken to ensure group formation will not break any
  // dependences.
  for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
       BI != E; ++BI) {
    Instruction *B = BI->first;
    StrideDescriptor DesB = BI->second;

    // Initialize a group for B if it has an allowable stride. Even if we don't
    // create a group for B, we continue with the bottom-up algorithm to ensure
    // we don't break any of B's dependences.
    InterleaveGroup *Group = nullptr;
    if (isStrided(DesB.Stride) && 
        (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
      Group = getInterleaveGroup(B);
      if (!Group) {
        LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
                          << '\n');
        Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
      }
      if (B->mayWriteToMemory())
        StoreGroups.insert(Group);
      else
        LoadGroups.insert(Group);
    }

    for (auto AI = std::next(BI); AI != E; ++AI) {
      Instruction *A = AI->first;
      StrideDescriptor DesA = AI->second;

      // Our code motion strategy implies that we can't have dependences
      // between accesses in an interleaved group and other accesses located
      // between the first and last member of the group. Note that this also
      // means that a group can't have more than one member at a given offset.
      // The accesses in a group can have dependences with other accesses, but
//.........这里部分代码省略.........
开发者ID:MatzeB,项目名称:llvm,代码行数:101,代码来源:VectorUtils.cpp

示例13: computeFunctionSummary

static void
computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
                       const Function &F, BlockFrequencyInfo *BFI,
                       ProfileSummaryInfo *PSI, bool HasLocalsInUsed,
                       DenseSet<GlobalValue::GUID> &CantBePromoted) {
  // Summary not currently supported for anonymous functions, they should
  // have been named.
  assert(F.hasName());

  unsigned NumInsts = 0;
  // Map from callee ValueId to profile count. Used to accumulate profile
  // counts for all static calls to a given callee.
  MapVector<ValueInfo, CalleeInfo> CallGraphEdges;
  SetVector<ValueInfo> RefEdges;
  SetVector<GlobalValue::GUID> TypeTests;
  ICallPromotionAnalysis ICallAnalysis;

  bool HasInlineAsmMaybeReferencingInternal = false;
  SmallPtrSet<const User *, 8> Visited;
  for (const BasicBlock &BB : F)
    for (const Instruction &I : BB) {
      if (isa<DbgInfoIntrinsic>(I))
        continue;
      ++NumInsts;
      findRefEdges(&I, RefEdges, Visited);
      auto CS = ImmutableCallSite(&I);
      if (!CS)
        continue;

      const auto *CI = dyn_cast<CallInst>(&I);
      // Since we don't know exactly which local values are referenced in inline
      // assembly, conservatively mark the function as possibly referencing
      // a local value from inline assembly to ensure we don't export a
      // reference (which would require renaming and promotion of the
      // referenced value).
      if (HasLocalsInUsed && CI && CI->isInlineAsm())
        HasInlineAsmMaybeReferencingInternal = true;

      auto *CalledValue = CS.getCalledValue();
      auto *CalledFunction = CS.getCalledFunction();
      // Check if this is an alias to a function. If so, get the
      // called aliasee for the checks below.
      if (auto *GA = dyn_cast<GlobalAlias>(CalledValue)) {
        assert(!CalledFunction && "Expected null called function in callsite for alias");
        CalledFunction = dyn_cast<Function>(GA->getBaseObject());
      }
      // Check if this is a direct call to a known function or a known
      // intrinsic, or an indirect call with profile data.
      if (CalledFunction) {
        if (CalledFunction->isIntrinsic()) {
          if (CalledFunction->getIntrinsicID() != Intrinsic::type_test)
            continue;
          // Produce a summary from type.test intrinsics. We only summarize
          // type.test intrinsics that are used other than by an llvm.assume
          // intrinsic. Intrinsics that are assumed are relevant only to the
          // devirtualization pass, not the type test lowering pass.
          bool HasNonAssumeUses = llvm::any_of(CI->uses(), [](const Use &CIU) {
            auto *AssumeCI = dyn_cast<CallInst>(CIU.getUser());
            if (!AssumeCI)
              return true;
            Function *F = AssumeCI->getCalledFunction();
            return !F || F->getIntrinsicID() != Intrinsic::assume;
          });
          if (HasNonAssumeUses) {
            auto *TypeMDVal = cast<MetadataAsValue>(CI->getArgOperand(1));
            if (auto *TypeId = dyn_cast<MDString>(TypeMDVal->getMetadata()))
              TypeTests.insert(GlobalValue::getGUID(TypeId->getString()));
          }
        }
        // We should have named any anonymous globals
        assert(CalledFunction->hasName());
        auto ScaledCount = BFI ? BFI->getBlockProfileCount(&BB) : None;
        auto Hotness = ScaledCount ? getHotness(ScaledCount.getValue(), PSI)
                                   : CalleeInfo::HotnessType::Unknown;

        // Use the original CalledValue, in case it was an alias. We want
        // to record the call edge to the alias in that case. Eventually
        // an alias summary will be created to associate the alias and
        // aliasee.
        CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
      } else {
        // Skip inline assembly calls.
        if (CI && CI->isInlineAsm())
          continue;
        // Skip direct calls.
        if (!CS.getCalledValue() || isa<Constant>(CS.getCalledValue()))
          continue;

        uint32_t NumVals, NumCandidates;
        uint64_t TotalCount;
        auto CandidateProfileData =
            ICallAnalysis.getPromotionCandidatesForInstruction(
                &I, NumVals, TotalCount, NumCandidates);
        for (auto &Candidate : CandidateProfileData)
          CallGraphEdges[Candidate.Value].updateHotness(
              getHotness(Candidate.Count, PSI));
      }
    }

  bool NonRenamableLocal = isNonRenamableLocal(F);
//.........这里部分代码省略.........
开发者ID:dberlin,项目名称:llvm-gvn-rewrite,代码行数:101,代码来源:ModuleSummaryAnalysis.cpp

示例14: runThinLTOBackend

static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
                              std::unique_ptr<raw_pwrite_stream> OS,
                              std::string SampleProfile) {
  StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
      ModuleToDefinedGVSummaries;
  CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);

  // We can simply import the values mentioned in the combined index, since
  // we should only invoke this using the individual indexes written out
  // via a WriteIndexesThinBackend.
  FunctionImporter::ImportMapTy ImportList;
  for (auto &GlobalList : *CombinedIndex) {
    auto GUID = GlobalList.first;
    assert(GlobalList.second.size() == 1 &&
           "Expected individual combined index to have one summary per GUID");
    auto &Summary = GlobalList.second[0];
    // Skip the summaries for the importing module. These are included to
    // e.g. record required linkage changes.
    if (Summary->modulePath() == M->getModuleIdentifier())
      continue;
    // Doesn't matter what value we plug in to the map, just needs an entry
    // to provoke importing by thinBackend.
    ImportList[Summary->modulePath()][GUID] = 1;
  }

  std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
  MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;

  for (auto &I : ImportList) {
    ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
        llvm::MemoryBuffer::getFile(I.first());
    if (!MBOrErr) {
      errs() << "Error loading imported file '" << I.first()
             << "': " << MBOrErr.getError().message() << "\n";
      return;
    }

    Expected<std::vector<BitcodeModule>> BMsOrErr =
        getBitcodeModuleList(**MBOrErr);
    if (!BMsOrErr) {
      handleAllErrors(BMsOrErr.takeError(), [&](ErrorInfoBase &EIB) {
        errs() << "Error loading imported file '" << I.first()
               << "': " << EIB.message() << '\n';
      });
      return;
    }

    // The bitcode file may contain multiple modules, we want the one with a
    // summary.
    bool FoundModule = false;
    for (BitcodeModule &BM : *BMsOrErr) {
      Expected<bool> HasSummary = BM.hasSummary();
      if (HasSummary && *HasSummary) {
        ModuleMap.insert({I.first(), BM});
        FoundModule = true;
        break;
      }
    }
    if (!FoundModule) {
      errs() << "Error loading imported file '" << I.first()
             << "': Could not find module summary\n";
      return;
    }

    OwnedImports.push_back(std::move(*MBOrErr));
  }
  auto AddStream = [&](size_t Task) {
    return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
  };
  lto::Config Conf;
  Conf.SampleProfile = SampleProfile;
  if (Error E = thinBackend(
          Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
          ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
    handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
      errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
    });
  }
}
开发者ID:cms-externals,项目名称:clang,代码行数:79,代码来源:BackendUtil.cpp

示例15: runThinLTOBackend

static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
                              const HeaderSearchOptions &HeaderOpts,
                              const CodeGenOptions &CGOpts,
                              const clang::TargetOptions &TOpts,
                              const LangOptions &LOpts,
                              std::unique_ptr<raw_pwrite_stream> OS,
                              std::string SampleProfile,
                              BackendAction Action) {
  StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
      ModuleToDefinedGVSummaries;
  CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);

  setCommandLineOpts(CGOpts);

  // We can simply import the values mentioned in the combined index, since
  // we should only invoke this using the individual indexes written out
  // via a WriteIndexesThinBackend.
  FunctionImporter::ImportMapTy ImportList;
  for (auto &GlobalList : *CombinedIndex) {
    // Ignore entries for undefined references.
    if (GlobalList.second.SummaryList.empty())
      continue;

    auto GUID = GlobalList.first;
    assert(GlobalList.second.SummaryList.size() == 1 &&
           "Expected individual combined index to have one summary per GUID");
    auto &Summary = GlobalList.second.SummaryList[0];
    // Skip the summaries for the importing module. These are included to
    // e.g. record required linkage changes.
    if (Summary->modulePath() == M->getModuleIdentifier())
      continue;
    // Doesn't matter what value we plug in to the map, just needs an entry
    // to provoke importing by thinBackend.
    ImportList[Summary->modulePath()][GUID] = 1;
  }

  std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
  MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;

  for (auto &I : ImportList) {
    ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
        llvm::MemoryBuffer::getFile(I.first());
    if (!MBOrErr) {
      errs() << "Error loading imported file '" << I.first()
             << "': " << MBOrErr.getError().message() << "\n";
      return;
    }

    Expected<BitcodeModule> BMOrErr = FindThinLTOModule(**MBOrErr);
    if (!BMOrErr) {
      handleAllErrors(BMOrErr.takeError(), [&](ErrorInfoBase &EIB) {
        errs() << "Error loading imported file '" << I.first()
               << "': " << EIB.message() << '\n';
      });
      return;
    }
    ModuleMap.insert({I.first(), *BMOrErr});

    OwnedImports.push_back(std::move(*MBOrErr));
  }
  auto AddStream = [&](size_t Task) {
    return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
  };
  lto::Config Conf;
  Conf.CPU = TOpts.CPU;
  Conf.CodeModel = getCodeModel(CGOpts);
  Conf.MAttrs = TOpts.Features;
  Conf.RelocModel = getRelocModel(CGOpts);
  Conf.CGOptLevel = getCGOptLevel(CGOpts);
  initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
  Conf.SampleProfile = std::move(SampleProfile);
  Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
  switch (Action) {
  case Backend_EmitNothing:
    Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
      return false;
    };
    break;
  case Backend_EmitLL:
    Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
      M->print(*OS, nullptr, CGOpts.EmitLLVMUseLists);
      return false;
    };
    break;
  case Backend_EmitBC:
    Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
      WriteBitcodeToFile(M, *OS, CGOpts.EmitLLVMUseLists);
      return false;
    };
    break;
  default:
    Conf.CGFileType = getCodeGenFileType(Action);
    break;
  }
  if (Error E = thinBackend(
          Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
          ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
    handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
      errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
    });
//.........这里部分代码省略.........
开发者ID:lijiansong,项目名称:clang,代码行数:101,代码来源:BackendUtil.cpp


注:本文中的MapVector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。