当前位置: 首页>>代码示例>>C++>>正文


C++ IndexWriterPtr类代码示例

本文整理汇总了C++中IndexWriterPtr的典型用法代码示例。如果您正苦于以下问题:C++ IndexWriterPtr类的具体用法?C++ IndexWriterPtr怎么用?C++ IndexWriterPtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了IndexWriterPtr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: TEST_F

TEST_F(SegmentTermEnumTest, testPrevTermAtEnd) {
    DirectoryPtr dir = newLucene<MockRAMDirectory>();
    IndexWriterPtr writer  = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
    addDoc(writer, L"aaa bbb");
    writer->close();
    SegmentReaderPtr reader = SegmentReader::getOnlySegmentReader(dir);
    SegmentTermEnumPtr termEnum = boost::dynamic_pointer_cast<SegmentTermEnum>(reader->terms());
    EXPECT_TRUE(termEnum->next());
    EXPECT_EQ(L"aaa", termEnum->term()->text());
    EXPECT_TRUE(termEnum->next());
    EXPECT_EQ(L"aaa", termEnum->prev()->text());
    EXPECT_EQ(L"bbb", termEnum->term()->text());
    EXPECT_TRUE(!termEnum->next());
    EXPECT_EQ(L"bbb", termEnum->prev()->text());
}
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:15,代码来源:SegmentTermEnumTest.cpp

示例2: MultiThreadTermVectorsFixture

 MultiThreadTermVectorsFixture()
 {
     directory = newLucene<RAMDirectory>();
     numDocs = 100;
     numThreads = 3;
     
     IndexWriterPtr writer = newLucene<IndexWriter>(directory, newLucene<SimpleAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
     for (int32_t i = 0; i < numDocs; ++i)
     {
         DocumentPtr doc = newLucene<Document>();
         FieldablePtr fld = newLucene<Field>(L"field", intToEnglish(i), Field::STORE_YES, Field::INDEX_NOT_ANALYZED, Field::TERM_VECTOR_YES);
         doc->add(fld);
         writer->addDocument(doc);
     }
     writer->close();
 }
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:16,代码来源:MultiThreadTermVectorsTest.cpp

示例3: setup

    /// One-time setup to initialise static members
    void setup() {
        // set the theoretical maximum term count for 8bit (see docs for the number)
        BooleanQuery::setMaxClauseCount(3 * 255 * 2 + 255);

        directory = newLucene<RAMDirectory>();
        IndexWriterPtr writer = newLucene<IndexWriter>(directory, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthUNLIMITED);

        NumericFieldPtr field8 = newLucene<NumericField>(L"field8", 8, Field::STORE_YES, true);
        NumericFieldPtr field4 = newLucene<NumericField>(L"field4", 4, Field::STORE_YES, true);
        NumericFieldPtr field2 = newLucene<NumericField>(L"field2", 2, Field::STORE_YES, true);
        NumericFieldPtr fieldNoTrie = newLucene<NumericField>(L"field" + StringUtils::toString(INT_MAX), INT_MAX, Field::STORE_YES, true);
        NumericFieldPtr ascfield8 = newLucene<NumericField>(L"ascfield8", 8, Field::STORE_NO, true);
        NumericFieldPtr ascfield4 = newLucene<NumericField>(L"ascfield4", 4, Field::STORE_NO, true);
        NumericFieldPtr ascfield2 = newLucene<NumericField>(L"ascfield2", 2, Field::STORE_NO, true);

        DocumentPtr doc = newLucene<Document>();

        // add fields, that have a distance to test general functionality
        doc->add(field8);
        doc->add(field4);
        doc->add(field2);
        doc->add(fieldNoTrie);

        // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
        doc->add(ascfield8);
        doc->add(ascfield4);
        doc->add(ascfield2);

        // Add a series of noDocs docs with increasing int values
        for (int32_t l = 0; l < noDocs; ++l) {
            int32_t val = distance * l + startOffset;
            field8->setIntValue(val);
            field4->setIntValue(val);
            field2->setIntValue(val);
            fieldNoTrie->setIntValue(val);

            val = l - (noDocs / 2);
            ascfield8->setIntValue(val);
            ascfield4->setIntValue(val);
            ascfield2->setIntValue(val);
            writer->addDocument(doc);
        }

        writer->optimize();
        writer->close();
        searcher = newLucene<IndexSearcher>(directory, true);
    }
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:48,代码来源:NumericRangeQuery32Test.cpp

示例4: addDocs2

static void addDocs2(IndexWriterPtr writer, int32_t numDocs)
{
    for (int32_t i = 0; i < numDocs; ++i)
    {
        DocumentPtr doc = newLucene<Document>();
        doc->add(newLucene<Field>(L"content", L"bbb", Field::STORE_NO, Field::INDEX_ANALYZED));
        writer->addDocument(doc);
    }
}
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:9,代码来源:AddIndexesNoOptimizeTest.cpp

示例5: checkPhraseQuery

    double checkPhraseQuery(DocumentPtr doc, PhraseQueryPtr query, int32_t slop, int32_t expectedNumResults)
    {
        query->setSlop(slop);

        RAMDirectoryPtr ramDir = newLucene<RAMDirectory>();
        WhitespaceAnalyzerPtr analyzer = newLucene<WhitespaceAnalyzer>();
        IndexWriterPtr writer = newLucene<IndexWriter>(ramDir, analyzer, IndexWriter::MaxFieldLengthUNLIMITED);
        writer->addDocument(doc);
        writer->close();

        IndexSearcherPtr searcher = newLucene<IndexSearcher>(ramDir, true);
        TopDocsPtr td = searcher->search(query, FilterPtr(), 10);
        BOOST_CHECK_EQUAL(expectedNumResults, td->totalHits);

        searcher->close();
        ramDir->close();

        return td->maxScore;
    }
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:19,代码来源:SloppyPhraseQueryTest.cpp

示例6: createIndex

    void createIndex(const DirectoryPtr& dir, bool multiSegment) {
        IndexWriter::unlock(dir);
        IndexWriterPtr w = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);

        w->setMergePolicy(newLucene<LogDocMergePolicy>(w));

        for (int32_t i = 0; i < 100; ++i) {
            w->addDocument(createDocument(i, 4));
            if (multiSegment && (i % 10) == 0) {
                w->commit();
            }
        }

        if (!multiSegment) {
            w->optimize();
        }

        w->close();

        IndexReaderPtr r = IndexReader::open(dir, false);
        if (multiSegment) {
            EXPECT_TRUE(r->getSequentialSubReaders().size() > 1);
        } else {
            EXPECT_EQ(r->getSequentialSubReaders().size(), 1);
        }
        r->close();
    }
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:27,代码来源:IndexReaderCloneNormsTest.cpp

示例7: PrefixInBooleanQueryFixture

 PrefixInBooleanQueryFixture()
 {
     directory = newLucene<RAMDirectory>();
     IndexWriterPtr writer = newLucene<IndexWriter>(directory, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
     
     for (int32_t i = 0; i < 5137; ++i)
     {
         DocumentPtr doc = newLucene<Document>();
         doc->add(newLucene<Field>(FIELD, L"meaninglessnames", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
         writer->addDocument(doc);
     }
     {
         DocumentPtr doc = newLucene<Document>();
         doc->add(newLucene<Field>(FIELD, L"tangfulin", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
         writer->addDocument(doc);
     }
     
     for (int32_t i = 5138; i < 11377; ++i)
     {
         DocumentPtr doc = newLucene<Document>();
         doc->add(newLucene<Field>(FIELD, L"meaninglessnames", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
         writer->addDocument(doc);
     }
     {
         DocumentPtr doc = newLucene<Document>();
         doc->add(newLucene<Field>(FIELD, L"tangfulin", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
         writer->addDocument(doc);
     }
     
     writer->close();
 }
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:31,代码来源:PrefixInBooleanQueryTest.cpp

示例8: createIndex

    void createIndex(int32_t numHits) {
        int32_t numDocs = 500;

        DirectoryPtr directory = newLucene<SeekCountingDirectory>();
        IndexWriterPtr writer = newLucene<IndexWriter>(directory, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
        writer->setUseCompoundFile(false);
        writer->setMaxBufferedDocs(10);
        for (int32_t i = 0; i < numDocs; ++i) {
            DocumentPtr doc = newLucene<Document>();
            String content;
            if (i % (numDocs / numHits) == 0) {
                // add a document that matches the query "term1 term2"
                content = term1 + L" " + term2;
            } else if (i % 15 == 0) {
                // add a document that only contains term1
                content = term1 + L" " + term1;
            } else {
                // add a document that contains term2 but not term 1
                content = term3 + L" " + term2;
            }

            doc->add(newLucene<Field>(field, content, Field::STORE_YES, Field::INDEX_ANALYZED));
            writer->addDocument(doc);
        }

        // make sure the index has only a single segment
        writer->optimize();
        writer->close();

        SegmentReaderPtr reader = SegmentReader::getOnlySegmentReader(directory);
        searcher = newLucene<IndexSearcher>(reader);
    }
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:32,代码来源:LazyProxSkippingTest.cpp

示例9: FieldCacheSanityCheckerTestFixture

    FieldCacheSanityCheckerTestFixture()
    {
        RAMDirectoryPtr dirA = newLucene<RAMDirectory>();
        RAMDirectoryPtr dirB = newLucene<RAMDirectory>();

        IndexWriterPtr wA = newLucene<IndexWriter>(dirA, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
        IndexWriterPtr wB = newLucene<IndexWriter>(dirB, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);

        int64_t theLong = LLONG_MAX;
        double theDouble = DBL_MAX;
        uint8_t theByte = UCHAR_MAX;
        int32_t theInt = INT_MAX;
        for (int32_t i = 0; i < NUM_DOCS; ++i)
        {
            DocumentPtr doc = newLucene<Document>();
            doc->add(newLucene<Field>(L"theLong", StringUtils::toString(theLong--), Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
            doc->add(newLucene<Field>(L"theDouble", StringUtils::toString(theDouble--), Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
            doc->add(newLucene<Field>(L"theByte", StringUtils::toString(theByte--), Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
            doc->add(newLucene<Field>(L"theInt", StringUtils::toString(theInt--), Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
            if (i % 3 == 0)
                wA->addDocument(doc);
            else
                wB->addDocument(doc);
        }
        wA->close();
        wB->close();
        readerA = IndexReader::open(dirA, true);
        readerB = IndexReader::open(dirB, true);
        readerX = newLucene<MultiReader>(newCollection<IndexReaderPtr>(readerA, readerB));
    }
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:30,代码来源:FieldCacheSanityCheckerTest.cpp

示例10: TEST_F

TEST_F(IndexWriterReaderTest, testAddIndexes2) {
    bool optimize = false;

    DirectoryPtr dir1 = newLucene<MockRAMDirectory>();
    IndexWriterPtr writer = newLucene<IndexWriter>(dir1, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);

    DirectoryPtr dir2 = newLucene<MockRAMDirectory>();
    IndexWriterPtr writer2 = newLucene<IndexWriter>(dir2, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);
    createIndexNoClose(!optimize, L"index2", writer2);
    writer2->close();

    Collection<DirectoryPtr> dirs = newCollection<DirectoryPtr>(dir2);

    writer->addIndexesNoOptimize(dirs);
    writer->addIndexesNoOptimize(dirs);
    writer->addIndexesNoOptimize(dirs);
    writer->addIndexesNoOptimize(dirs);
    writer->addIndexesNoOptimize(dirs);

    IndexReaderPtr r1 = writer->getReader();
    EXPECT_EQ(500, r1->maxDoc());

    r1->close();
    writer->close();
    dir1->close();
}
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:26,代码来源:IndexWriterReaderTest.cpp

示例11: runTest

// Run one indexer and 2 searchers against single index as stress test.
static void runTest(DirectoryPtr directory)
{
    Collection<TimedThreadPtr> threads(Collection<TimedThreadPtr>::newInstance(4));
    AnalyzerPtr analyzer = newLucene<SimpleAnalyzer>();
    
    IndexWriterPtr writer = newLucene<MockIndexWriter>(directory, analyzer, true, IndexWriter::MaxFieldLengthUNLIMITED);
    
    writer->setMaxBufferedDocs(7);
    writer->setMergeFactor(3);
    
    // Establish a base index of 100 docs
    for (int32_t i = 0; i < 100; ++i)
    {
        DocumentPtr d = newLucene<Document>();
        d->add(newLucene<Field>(L"id", StringUtils::toString(i), Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
        d->add(newLucene<Field>(L"contents", intToEnglish(i), Field::STORE_NO, Field::INDEX_ANALYZED));
        if ((i - 1) % 7 == 0)
            writer->commit();
        writer->addDocument(d);
    }
    writer->commit();
    
    IndexReaderPtr r = IndexReader::open(directory, true);
    BOOST_CHECK_EQUAL(100, r->numDocs());
    r->close();

    IndexerThreadPtr indexerThread1 = newLucene<IndexerThread>(writer);
    threads[0] = indexerThread1;
    indexerThread1->start();

    IndexerThreadPtr indexerThread2 = newLucene<IndexerThread>(writer);
    threads[1] = indexerThread2;
    indexerThread2->start();

    SearcherThreadPtr searcherThread1 = newLucene<SearcherThread>(directory);
    threads[2] = searcherThread1;
    searcherThread1->start();

    SearcherThreadPtr searcherThread2 = newLucene<SearcherThread>(directory);
    threads[3] = searcherThread2;
    searcherThread2->start();
    
    indexerThread1->join();
    indexerThread2->join();
    searcherThread1->join();
    searcherThread2->join();
    
    writer->close();

    BOOST_CHECK(!indexerThread1->failed); // hit unexpected exception in indexer1
    BOOST_CHECK(!indexerThread2->failed); // hit unexpected exception in indexer2
    BOOST_CHECK(!searcherThread1->failed); // hit unexpected exception in search1
    BOOST_CHECK(!searcherThread2->failed); // hit unexpected exception in search2
}
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:55,代码来源:AtomicUpdateTest.cpp

示例12: addDocs

 void addDocs(const DirectoryPtr& dir, int32_t ndocs, bool compound) {
     IndexWriterPtr iw = newLucene<IndexWriter>(dir, anlzr, false, IndexWriter::MaxFieldLengthLIMITED);
     iw->setMaxBufferedDocs(5);
     iw->setMergeFactor(3);
     iw->setSimilarity(similarityOne);
     iw->setUseCompoundFile(compound);
     for (int32_t i = 0; i < ndocs; ++i) {
         iw->addDocument(newDoc());
     }
     iw->close();
 }
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:11,代码来源:IndexReaderCloneNormsTest.cpp

示例13: TEST_F

/// Tests whether the DocumentWriter and SegmentMerger correctly enable the payload bit in the FieldInfo
TEST_F(PayloadsTest, testPayloadFieldBit) {
    DirectoryPtr ram = newLucene<RAMDirectory>();
    PayloadAnalyzerPtr analyzer = newLucene<PayloadAnalyzer>();
    IndexWriterPtr writer = newLucene<IndexWriter>(ram, analyzer, true, IndexWriter::MaxFieldLengthLIMITED);
    DocumentPtr d = newLucene<Document>();
    // this field won't have any payloads
    d->add(newLucene<Field>(L"f1", L"This field has no payloads", Field::STORE_NO, Field::INDEX_ANALYZED));
    // this field will have payloads in all docs, however not for all term positions,
    // so this field is used to check if the DocumentWriter correctly enables the payloads bit
    // even if only some term positions have payloads
    d->add(newLucene<Field>(L"f2", L"This field has payloads in all docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    d->add(newLucene<Field>(L"f2", L"This field has payloads in all docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    // this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads
    // enabled in only some documents
    d->add(newLucene<Field>(L"f3", L"This field has payloads in some docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    // only add payload data for field f2

    ByteArray someData(ByteArray::newInstance(8));
    uint8_t input[8] = { 's', 'o', 'm', 'e', 'd', 'a', 't', 'a' };
    std::memcpy(someData.get(), input, 8);

    analyzer->setPayloadData(L"f2", 1, someData, 0, 1);

    writer->addDocument(d);
    // flush
    writer->close();

    SegmentReaderPtr reader = SegmentReader::getOnlySegmentReader(ram);
    FieldInfosPtr fi = reader->fieldInfos();
    EXPECT_TRUE(!fi->fieldInfo(L"f1")->storePayloads);
    EXPECT_TRUE(fi->fieldInfo(L"f2")->storePayloads);
    EXPECT_TRUE(!fi->fieldInfo(L"f3")->storePayloads);
    reader->close();

    // now we add another document which has payloads for field f3 and verify if the SegmentMerger
    // enabled payloads for that field
    writer = newLucene<IndexWriter>(ram, analyzer, true, IndexWriter::MaxFieldLengthLIMITED);
    d = newLucene<Document>();
    d->add(newLucene<Field>(L"f1", L"This field has no payloads", Field::STORE_NO, Field::INDEX_ANALYZED));
    d->add(newLucene<Field>(L"f2", L"This field has payloads in all docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    d->add(newLucene<Field>(L"f2", L"This field has payloads in all docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    d->add(newLucene<Field>(L"f3", L"This field has payloads in some docs", Field::STORE_NO, Field::INDEX_ANALYZED));
    // add payload data for field f2 and f3
    analyzer->setPayloadData(L"f2", someData, 0, 1);
    analyzer->setPayloadData(L"f3", someData, 0, 3);
    writer->addDocument(d);
    // force merge
    writer->optimize();
    // flush
    writer->close();

    reader = SegmentReader::getOnlySegmentReader(ram);
    fi = reader->fieldInfos();
    EXPECT_TRUE(!fi->fieldInfo(L"f1")->storePayloads);
    EXPECT_TRUE(fi->fieldInfo(L"f2")->storePayloads);
    EXPECT_TRUE(fi->fieldInfo(L"f3")->storePayloads);
    reader->close();
}
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:59,代码来源:PayloadsTest.cpp

示例14: AddDirectoriesThreads

    AddDirectoriesThreads(int32_t numDirs, const IndexWriterPtr& mainWriter) {
        this->numDirs = numDirs;
        this->mainWriter = mainWriter;
        threads = Collection<LuceneThreadPtr>::newInstance(NUM_THREADS);
        failures = Collection<LuceneException>::newInstance();
        didClose = false;
        count = newLucene<HeavyAtomicInt>(0);
        numAddIndexesNoOptimize = newLucene<HeavyAtomicInt>(0);
        addDir = newLucene<MockRAMDirectory>();
        IndexWriterPtr writer = newLucene<IndexWriter>(addDir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);
        writer->setMaxBufferedDocs(2);
        for (int32_t i = 0; i < NUM_INIT_DOCS; ++i) {
            DocumentPtr doc = createDocument(i, L"addindex", 4);
            writer->addDocument(doc);
        }

        writer->close();

        readers = Collection<IndexReaderPtr>::newInstance(numDirs);
        for (int32_t i = 0; i < numDirs; ++i) {
            readers[i] = IndexReader::open(addDir, false);
        }
    }
开发者ID:304471720,项目名称:LucenePlusPlus,代码行数:23,代码来源:IndexWriterReaderTest.cpp

示例15: MultiSearcherRankingFixture

    MultiSearcherRankingFixture()
    {
        // create MultiSearcher from two separate searchers
        DirectoryPtr d1 = newLucene<RAMDirectory>();
        IndexWriterPtr iw1 = newLucene<IndexWriter>(d1, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
        addCollection1(iw1);
        iw1->close();
        DirectoryPtr d2 = newLucene<RAMDirectory>();
        IndexWriterPtr iw2 = newLucene<IndexWriter>(d2, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
        addCollection2(iw2);
        iw2->close();

        Collection<SearchablePtr> s = newCollection<SearchablePtr>(newLucene<IndexSearcher>(d1, true), newLucene<IndexSearcher>(d2, true));
        multiSearcher = newLucene<MultiSearcher>(s);

        // create IndexSearcher which contains all documents
        DirectoryPtr d = newLucene<RAMDirectory>();
        IndexWriterPtr iw = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
        addCollection1(iw);
        addCollection2(iw);
        iw->close();
        singleSearcher = newLucene<IndexSearcher>(d, true);
    }
开发者ID:alesha1488,项目名称:LucenePlusPlus,代码行数:23,代码来源:MultiSearcherRankingTest.cpp


注:本文中的IndexWriterPtr类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。