当前位置: 首页>>代码示例>>C#>>正文


C# IndexWriter.NumDocs方法代码示例

本文整理汇总了C#中Lucene.Net.Index.IndexWriter.NumDocs方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.NumDocs方法的具体用法?C# IndexWriter.NumDocs怎么用?C# IndexWriter.NumDocs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Lucene.Net.Index.IndexWriter的用法示例。


在下文中一共展示了IndexWriter.NumDocs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: TestForceMergeDeletes

        public virtual void TestForceMergeDeletes()
        {
            Directory dir = NewDirectory();
            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            TieredMergePolicy tmp = NewTieredMergePolicy();
            conf.SetMergePolicy(tmp);
            conf.SetMaxBufferedDocs(4);
            tmp.MaxMergeAtOnce = 100;
            tmp.SegmentsPerTier = 100;
            tmp.ForceMergeDeletesPctAllowed = 30.0;
            IndexWriter w = new IndexWriter(dir, conf);
            for (int i = 0; i < 80; i++)
            {
                Document doc = new Document();
                doc.Add(NewTextField("content", "aaa " + (i % 4), Field.Store.NO));
                w.AddDocument(doc);
            }
            Assert.AreEqual(80, w.MaxDoc);
            Assert.AreEqual(80, w.NumDocs());

            if (VERBOSE)
            {
                Console.WriteLine("\nTEST: delete docs");
            }
            w.DeleteDocuments(new Term("content", "0"));
            w.ForceMergeDeletes();

            Assert.AreEqual(80, w.MaxDoc);
            Assert.AreEqual(60, w.NumDocs());

            if (VERBOSE)
            {
                Console.WriteLine("\nTEST: forceMergeDeletes2");
            }
            ((TieredMergePolicy)w.Config.MergePolicy).ForceMergeDeletesPctAllowed = 10.0;
            w.ForceMergeDeletes();
            Assert.AreEqual(60, w.NumDocs());
            Assert.AreEqual(60, w.MaxDoc);
            w.Dispose();
            dir.Dispose();
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:41,代码来源:TestTieredMergePolicy.cs

示例2: Main

        static void Main(string[] args)
        {
            
            // default AzureDirectory stores cache in local temp folder
            var azureDirectory = new AzureDirectory(CloudStorageAccount.Parse(ConfigurationManager.AppSettings["blobStorage"]), "TestCatalog6");
            var findexExists = IndexReader.IndexExists(azureDirectory);

            IndexWriter indexWriter = null;
            while (indexWriter == null)
            {
                try
                {
                    indexWriter = new IndexWriter(azureDirectory, new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT), !IndexReader.IndexExists(azureDirectory), new Lucene.Net.Index.IndexWriter.MaxFieldLength(IndexWriter.DEFAULT_MAX_FIELD_LENGTH));
                }
                catch (LockObtainFailedException)
                {
                    Console.WriteLine("Lock is taken, Hit 'Y' to clear the lock, or anything else to try again");
                    if (Console.ReadLine().ToLower().Trim() == "y" )
                        azureDirectory.ClearLock("write.lock");
                }
            };
            Console.WriteLine("IndexWriter lock obtained, this process has exclusive write access to index");
            indexWriter.SetRAMBufferSizeMB(10.0);
            //indexWriter.SetUseCompoundFile(false);
            //indexWriter.SetMaxMergeDocs(10000);
            //indexWriter.SetMergeFactor(100);
            
            for (int iDoc = 0; iDoc < 10000; iDoc++)
            {
                if (iDoc % 10 == 0)
                    Console.WriteLine(iDoc);
                var doc = new Document();
                doc.Add(new Field("id", DateTime.Now.ToFileTimeUtc().ToString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                doc.Add(new Field("Title", GeneratePhrase(10), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                doc.Add(new Field("Body", GeneratePhrase(40), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                indexWriter.AddDocument(doc);
            }
            Console.WriteLine("Total docs is {0}", indexWriter.NumDocs());
            indexWriter.Dispose();

            IndexSearcher searcher;
            using (new AutoStopWatch("Creating searcher"))
            {
                searcher = new IndexSearcher(azureDirectory); 
            }
            SearchForPhrase(searcher, "dog");
            SearchForPhrase(searcher, _random.Next(32768).ToString());
            SearchForPhrase(searcher, _random.Next(32768).ToString());
            Console.Read();
        }
开发者ID:supriyakumari17,项目名称:AzureDirectory,代码行数:50,代码来源:Program.cs

示例3: OnProcessBatch

        protected override Task<bool> OnProcessBatch(CollectorHttpClient client, IEnumerable<JToken> items, JToken context, DateTime commitTimeStamp, CancellationToken cancellationToken)
        {
            PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30));
            analyzer.AddAnalyzer("Id", new IdentifierKeywordAnalyzer());

            int i = 0;

            using (IndexWriter writer = new IndexWriter(_directory, analyzer, false, IndexWriter.MaxFieldLength.UNLIMITED))
            {
                foreach (JObject item in items)
                {
                    i++;

                    string id = item["nuget:id"].ToString();
                    string version = item["nuget:version"].ToString();

                    BooleanQuery query = new BooleanQuery();
                    query.Add(new BooleanClause(new TermQuery(new Term("Id", id.ToLowerInvariant())), Occur.MUST));
                    query.Add(new BooleanClause(new TermQuery(new Term("Version", version)), Occur.MUST));

                    writer.DeleteDocuments(query);

                    Document doc = new Document();

                    doc.Add(new Field("Id", item["nuget:id"].ToString(), Field.Store.YES, Field.Index.ANALYZED));
                    doc.Add(new Field("Version", item["nuget:version"].ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));

                    writer.AddDocument(doc);
                }

                string trace = Guid.NewGuid().ToString();

                writer.Commit(new Dictionary<string, string> 
                { 
                    { "commitTimeStamp", commitTimeStamp.ToString("O") },
                    { "trace", trace }
                });

                Trace.TraceInformation("COMMIT {0} documents, index contains {1} documents, commitTimeStamp {2}, trace: {3}",
                    i, writer.NumDocs(), commitTimeStamp.ToString("O"), trace);
            }

            return Task.FromResult(true);
        }
开发者ID:jinujoseph,项目名称:NuGet.Services.Metadata,代码行数:44,代码来源:IndexingCatalogCollector.cs

示例4: TestIndexWriter_LUCENE4656

        public virtual void TestIndexWriter_LUCENE4656()
        {
            Store.Directory directory = NewDirectory();
            IndexWriter writer = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, null));

            TokenStream ts = new EmptyTokenStream();
            assertFalse(ts.HasAttribute<ITermToBytesRefAttribute>());

            Document doc = new Document();
            doc.Add(new StringField("id", "0", Field.Store.YES));
            doc.Add(new TextField("description", ts));

            // this should not fail because we have no TermToBytesRefAttribute
            writer.AddDocument(doc);

            assertEquals(1, writer.NumDocs());

            writer.Dispose();
            directory.Dispose();
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:20,代码来源:TestEmptyTokenStream.cs

示例5: TestReadAndWrite

        public void TestReadAndWrite()
        {
            var connectionString = Environment.GetEnvironmentVariable("DataConnectionString") ?? "UseDevelopmentStorage=true";

            var cloudStorageAccount = CloudStorageAccount.Parse(connectionString);

            // default AzureDirectory stores cache in local temp folder
            var azureDirectory = new AzureDirectory(cloudStorageAccount, "testcatalog");

            using (var indexWriter = new IndexWriter(azureDirectory, new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30), !IndexReader.IndexExists(azureDirectory), new Lucene.Net.Index.IndexWriter.MaxFieldLength(IndexWriter.DEFAULT_MAX_FIELD_LENGTH)))
            {
                indexWriter.SetRAMBufferSizeMB(10.0);

                for (int iDoc = 0; iDoc < 10000; iDoc++)
                {
                    var doc = new Document();
                    doc.Add(new Field("id", DateTime.Now.ToFileTimeUtc().ToString() + "-" + iDoc.ToString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                    doc.Add(new Field("Title", GeneratePhrase(10), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                    doc.Add(new Field("Body", GeneratePhrase(40), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
                    indexWriter.AddDocument(doc);
                }

                Console.WriteLine("Total docs is {0}", indexWriter.NumDocs());
            }

            using (var searcher = new IndexSearcher(azureDirectory))
            {
                Assert.AreNotEqual(0, SearchForPhrase(searcher, "dog"));
                Assert.AreNotEqual(0, SearchForPhrase(searcher, "cat"));
                Assert.AreNotEqual(0, SearchForPhrase(searcher, "car"));
            }

            // check the container exists, and delete it
            var blobClient = cloudStorageAccount.CreateCloudBlobClient();
            var container = blobClient.GetContainerReference("testcatalog");
            Assert.IsTrue(container.Exists()); // check the container exists
            container.Delete();
        }
开发者ID:azure-contrib,项目名称:AzureDirectory,代码行数:38,代码来源:IntegrationTests.cs

示例6: TestRollingUpdates_Mem


//.........这里部分代码省略.........

                bool doUpdate;
                if (s != null && updateCount < SIZE)
                {
                    TopDocs hits = s.Search(new TermQuery(idTerm), 1);
                    Assert.AreEqual(1, hits.TotalHits);
                    doUpdate = !w.TryDeleteDocument(r, hits.ScoreDocs[0].Doc);
                    if (VERBOSE)
                    {
                        if (doUpdate)
                        {
                            Console.WriteLine("  tryDeleteDocument failed");
                        }
                        else
                        {
                            Console.WriteLine("  tryDeleteDocument succeeded");
                        }
                    }
                }
                else
                {
                    doUpdate = true;
                    if (VERBOSE)
                    {
                        Console.WriteLine("  no searcher: doUpdate=true");
                    }
                }

                updateCount++;

                if (doUpdate)
                {
                    w.UpdateDocument(idTerm, doc);
                }
                else
                {
                    w.AddDocument(doc);
                }

                if (docIter >= SIZE && Random().Next(50) == 17)
                {
                    if (r != null)
                    {
                        r.Dispose();
                    }

                    bool applyDeletions = Random().NextBoolean();

                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: reopen applyDeletions=" + applyDeletions);
                    }

                    r = w.GetReader(applyDeletions);
                    if (applyDeletions)
                    {
                        s = NewSearcher(r);
                    }
                    else
                    {
                        s = null;
                    }
                    Assert.IsTrue(!applyDeletions || r.NumDocs == SIZE, "applyDeletions=" + applyDeletions + " r.NumDocs=" + r.NumDocs + " vs SIZE=" + SIZE);
                    updateCount = 0;
                }
            }

            if (r != null)
            {
                r.Dispose();
            }

            w.Commit();
            Assert.AreEqual(SIZE, w.NumDocs());

            w.Dispose();

            TestIndexWriter.AssertNoUnreferencedFiles(dir, "leftover files after rolling updates");

            docs.Dispose();

            // LUCENE-4455:
            SegmentInfos infos = new SegmentInfos();
            infos.Read(dir);
            long totalBytes = 0;
            foreach (SegmentCommitInfo sipc in infos.Segments)
            {
                totalBytes += sipc.SizeInBytes();
            }
            long totalBytes2 = 0;
            foreach (string fileName in dir.ListAll())
            {
                if (!fileName.StartsWith(IndexFileNames.SEGMENTS))
                {
                    totalBytes2 += dir.FileLength(fileName);
                }
            }
            Assert.AreEqual(totalBytes2, totalBytes);
            dir.Dispose();
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:101,代码来源:TestRollingUpdates.cs

示例7: TestFutureCommit

        public virtual void TestFutureCommit()
        {
            Directory dir = NewDirectory();

            IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
            Document doc = new Document();
            w.AddDocument(doc);

            // commit to "first"
            IDictionary<string, string> commitData = new Dictionary<string, string>();
            commitData["tag"] = "first";
            w.CommitData = commitData;
            w.Commit();

            // commit to "second"
            w.AddDocument(doc);
            commitData["tag"] = "second";
            w.CommitData = commitData;
            w.Dispose();

            // open "first" with IndexWriter
            IndexCommit commit = null;
            foreach (IndexCommit c in DirectoryReader.ListCommits(dir))
            {
                if (c.UserData["tag"].Equals("first"))
                {
                    commit = c;
                    break;
                }
            }

            Assert.IsNotNull(commit);

            w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).SetIndexCommit(commit));

            Assert.AreEqual(1, w.NumDocs());

            // commit IndexWriter to "third"
            w.AddDocument(doc);
            commitData["tag"] = "third";
            w.CommitData = commitData;
            w.Dispose();

            // make sure "second" commit is still there
            commit = null;
            foreach (IndexCommit c in DirectoryReader.ListCommits(dir))
            {
                if (c.UserData["tag"].Equals("second"))
                {
                    commit = c;
                    break;
                }
            }

            Assert.IsNotNull(commit);

            dir.Dispose();
        }
开发者ID:paulirwin,项目名称:lucene.net,代码行数:58,代码来源:TestIndexWriterCommit.cs

示例8: RunTest


//.........这里部分代码省略.........
                    // We sort by relevance but the scores should be identical so sort falls back to by docID:
                    if (hits.TotalHits != subDocs.SubIDs.Count)
                    {
                        Console.WriteLine("packID=" + subDocs.PackID + ": expected " + subDocs.SubIDs.Count + " hits but got " + hits.TotalHits);
                        doFail = true;
                    }
                    else
                    {
                        int lastDocID = -1;
                        int startDocID = -1;
                        foreach (ScoreDoc scoreDoc in hits.ScoreDocs)
                        {
                            int docID = scoreDoc.Doc;
                            if (lastDocID != -1)
                            {
                                Assert.AreEqual(1 + lastDocID, docID);
                            }
                            else
                            {
                                startDocID = docID;
                            }
                            lastDocID = docID;
                            Document doc = s.Doc(docID);
                            Assert.AreEqual(subDocs.PackID, doc.Get("packID"));
                        }

                        lastDocID = startDocID - 1;
                        foreach (string subID in subDocs.SubIDs)
                        {
                            hits = s.Search(new TermQuery(new Term("docid", subID)), 1);
                            Assert.AreEqual(1, hits.TotalHits);
                            int docID = hits.ScoreDocs[0].Doc;
                            if (lastDocID != -1)
                            {
                                Assert.AreEqual(1 + lastDocID, docID);
                            }
                            lastDocID = docID;
                        }
                    }
                }
                else
                {
                    // Pack was deleted -- make sure its docs are
                    // deleted.  We can't verify packID is deleted
                    // because we can re-use packID for update:
                    foreach (string subID in subDocs.SubIDs)
                    {
                        Assert.AreEqual(0, s.Search(new TermQuery(new Term("docid", subID)), 1).TotalHits);
                    }
                }
            }

            // Verify: make sure all not-deleted docs are in fact
            // not deleted:
            int endID = Convert.ToInt32(docs.NextDoc().Get("docid"));
            docs.Dispose();

            for (int id = 0; id < endID; id++)
            {
                string stringID = "" + id;
                if (!delIDs.Contains(stringID))
                {
                    TopDocs hits = s.Search(new TermQuery(new Term("docid", stringID)), 1);
                    if (hits.TotalHits != 1)
                    {
                        Console.WriteLine("doc id=" + stringID + " is not supposed to be deleted, but got hitCount=" + hits.TotalHits + "; delIDs=" + string.Join(",",  delIDs.ToArray()));
                        doFail = true;
                    }
                }
            }
            Assert.IsFalse(doFail);

            Assert.AreEqual(AddCount.Get() - DelCount.Get(), s.IndexReader.NumDocs, "index=" + Writer.SegString() + " addCount=" + AddCount + " delCount=" + DelCount);
            ReleaseSearcher(s);

            Writer.Commit();

            Assert.AreEqual(AddCount.Get() - DelCount.Get(), Writer.NumDocs(), "index=" + Writer.SegString() + " addCount=" + AddCount + " delCount=" + DelCount);

            DoClose();
            Writer.Dispose(false);

            // Cannot shutdown until after writer is closed because
            // writer has merged segment warmer that uses IS to run
            // searches, and that IS may be using this es!
            /*if (es != null)
            {
              es.shutdown();
              es.awaitTermination(1, TimeUnit.SECONDS);
            }*/

            TestUtil.CheckIndex(Dir);
            Dir.Dispose();
            System.IO.Directory.Delete(tempDir.FullName, true);

            if (VERBOSE)
            {
                Console.WriteLine("TEST: done [" + (DateTime.UtcNow - t0).TotalMilliseconds + " ms]");
            }
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:101,代码来源:ThreadedIndexingAndSearchingTestCase.cs

示例9: TestFutureCommit

        public void TestFutureCommit()
        {
            Directory dir = new MockRAMDirectory();

            IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), new NoDeletionPolicy(), IndexWriter.MaxFieldLength.UNLIMITED);
            Document doc = new Document();
            w.AddDocument(doc);

            // commit to "first"
            System.Collections.Generic.Dictionary<string, string> commitData = new System.Collections.Generic.Dictionary<string, string>();
            commitData["tag"]="first";
            w.Commit(commitData);

            // commit to "second"
            w.AddDocument(doc);
            commitData["tag"]="second";
            w.Commit(commitData);
            w.Close();

            // open "first" with IndexWriter
            IndexCommit commit = null;
            System.Collections.IEnumerator it = IndexReader.ListCommits(dir).GetEnumerator();
            while (it.MoveNext())
            {
                IndexCommit c = (IndexCommit)it.Current;
                string tag = (String)c.GetUserData()["tag"];
                if ("first".Equals(tag))
                {
                    commit = c;
                    break;
                }
            }

            Assert.NotNull(commit);

            w = new IndexWriter(dir, new WhitespaceAnalyzer(), new NoDeletionPolicy(), IndexWriter.MaxFieldLength.UNLIMITED, commit);

            Assert.AreEqual(1, w.NumDocs());

            // commit IndexWriter to "third"
            w.AddDocument(doc);
            commitData["tag"]="third";
            w.Commit(commitData);
            w.Close();

            // make sure "second" commit is still there
            commit = null;
            it = IndexReader.ListCommits(dir).GetEnumerator();
            while (it.MoveNext())
            {
                IndexCommit c = (IndexCommit)it.Current;
                string tag = (String)c.GetUserData()["tag"];
                if ("second".Equals(tag))
                {
                    commit = c;
                    break;
                }
            }

            Assert.NotNull(commit);

            IndexReader r = IndexReader.Open(commit, true);
            Assert.AreEqual(2, r.NumDocs());
            r.Close();

            // open "second", w/ writeable IndexReader & commit
            r = IndexReader.Open(commit, new NoDeletionPolicy(), false);
            Assert.AreEqual(2, r.NumDocs());
            r.DeleteDocument(0);
            r.DeleteDocument(1);
            commitData["tag"]="fourth";
            r.Commit(commitData);
            r.Close();

            // make sure "third" commit is still there
            commit = null;
            it = IndexReader.ListCommits(dir).GetEnumerator();
            while (it.MoveNext())
            {
                IndexCommit c = (IndexCommit)it.Current;
                string tag = (String)c.GetUserData()["tag"];
                if ("third".Equals(tag))
                {
                    commit = c;
                    break;
                }
            }
            Assert.NotNull(commit);

            dir.Close();
        }
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:91,代码来源:TestIndexWriter.cs

示例10: RunTest

        public virtual void RunTest(Random random, Directory directory)
        {
            IndexWriter writer = new IndexWriter(directory, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2)).SetMergePolicy(NewLogMergePolicy()));

            for (int iter = 0; iter < NUM_ITER; iter++)
            {
                int iterFinal = iter;

                ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 1000;

                FieldType customType = new FieldType(StringField.TYPE_STORED);
                customType.OmitNorms = true;

                for (int i = 0; i < 200; i++)
                {
                    Document d = new Document();
                    d.Add(NewField("id", Convert.ToString(i), customType));
                    d.Add(NewField("contents", English.IntToEnglish(i), customType));
                    writer.AddDocument(d);
                }

                ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 4;

                ThreadClass[] threads = new ThreadClass[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    int iFinal = i;
                    IndexWriter writerFinal = writer;
                    threads[i] = new ThreadAnonymousInnerClassHelper(this, iterFinal, customType, iFinal, writerFinal);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Join();
                }

                Assert.IsTrue(!Failed);

                int expectedDocCount = (int)((1 + iter) * (200 + 8 * NUM_ITER2 * (NUM_THREADS / 2.0) * (1 + NUM_THREADS)));

                Assert.AreEqual(expectedDocCount, writer.NumDocs(), "index=" + writer.SegString() + " numDocs=" + writer.NumDocs() + " maxDoc=" + writer.MaxDoc + " config=" + writer.Config);
                Assert.AreEqual(expectedDocCount, writer.MaxDoc, "index=" + writer.SegString() + " numDocs=" + writer.NumDocs() + " maxDoc=" + writer.MaxDoc + " config=" + writer.Config);

                writer.Dispose();
                writer = new IndexWriter(directory, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(2));

                DirectoryReader reader = DirectoryReader.Open(directory);
                Assert.AreEqual(1, reader.Leaves.Count, "reader=" + reader);
                Assert.AreEqual(expectedDocCount, reader.NumDocs);
                reader.Dispose();
            }
            writer.Dispose();
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:59,代码来源:TestThreadedForceMerge.cs

示例11: TestForceMergeDeletes2

        public virtual void TestForceMergeDeletes2()
        {
            Directory dir = NewDirectory();
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetMergePolicy(NewLogMergePolicy(50)));

            Document document = new Document();

            FieldType customType = new FieldType();
            customType.Stored = true;

            FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
            customType1.Tokenized = false;
            customType1.StoreTermVectors = true;
            customType1.StoreTermVectorPositions = true;
            customType1.StoreTermVectorOffsets = true;

            Field storedField = NewField("stored", "stored", customType);
            document.Add(storedField);
            Field termVectorField = NewField("termVector", "termVector", customType1);
            document.Add(termVectorField);
            Field idField = NewStringField("id", "", Field.Store.NO);
            document.Add(idField);
            for (int i = 0; i < 98; i++)
            {
                idField.StringValue = "" + i;
                writer.AddDocument(document);
            }
            writer.Dispose();

            IndexReader ir = DirectoryReader.Open(dir);
            Assert.AreEqual(98, ir.MaxDoc());
            Assert.AreEqual(98, ir.NumDocs());
            ir.Dispose();

            IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
            writer = new IndexWriter(dir, dontMergeConfig);
            for (int i = 0; i < 98; i += 2)
            {
                writer.DeleteDocuments(new Term("id", "" + i));
            }
            writer.Dispose();

            ir = DirectoryReader.Open(dir);
            Assert.AreEqual(49, ir.NumDocs());
            ir.Dispose();

            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy(3)));
            Assert.AreEqual(49, writer.NumDocs());
            writer.ForceMergeDeletes();
            writer.Dispose();
            ir = DirectoryReader.Open(dir);
            Assert.AreEqual(49, ir.MaxDoc());
            Assert.AreEqual(49, ir.NumDocs());
            ir.Dispose();
            dir.Dispose();
        }
开发者ID:joyanta,项目名称:lucene.net,代码行数:56,代码来源:TestIndexWriterMerging.cs

示例12: TestExistingDeletes

        public virtual void TestExistingDeletes()
        {
            Directory[] dirs = new Directory[2];
            for (int i = 0; i < dirs.Length; i++)
            {
                dirs[i] = NewDirectory();
                IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
                IndexWriter writer = new IndexWriter(dirs[i], conf);
                Document doc = new Document();
                doc.Add(new StringField("id", "myid", Field.Store.NO));
                writer.AddDocument(doc);
                writer.Dispose();
            }

            IndexWriterConfig conf_ = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            IndexWriter writer_ = new IndexWriter(dirs[0], conf_);

            // Now delete the document
            writer_.DeleteDocuments(new Term("id", "myid"));
            IndexReader r = DirectoryReader.Open(dirs[1]);
            try
            {
                writer_.AddIndexes(r);
            }
            finally
            {
                r.Dispose();
            }
            writer_.Commit();
            Assert.AreEqual(1, writer_.NumDocs(), "Documents from the incoming index should not have been deleted");
            writer_.Dispose();

            foreach (Directory dir in dirs)
            {
                dir.Dispose();
            }
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:37,代码来源:TestAddIndexes.cs

示例13: TestIndexWriterDirtSimple

        public virtual void TestIndexWriterDirtSimple()
        {
            Directory dir = new RAMDirectory();
            IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            TieredMergePolicy tmp = NewTieredMergePolicy();
            iwc.SetMergePolicy(tmp);
            iwc.SetMaxBufferedDocs(2);
            tmp.MaxMergeAtOnce = 100;
            tmp.SegmentsPerTier = 100;
            tmp.ForceMergeDeletesPctAllowed = 30.0;
            IndexWriter w = new IndexWriter(dir, iwc);

            int numDocs = 2;

            for (int i = 0; i < numDocs; i++)
            {
                Document doc = new Document();
                doc.Add(NewTextField("content", "aaa " + i, Field.Store.NO));
                w.AddDocument(doc);
            }

            Assert.AreEqual(numDocs, w.MaxDoc);
            Assert.AreEqual(numDocs, w.NumDocs());
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:24,代码来源:TestTieredMergePolicy.cs

示例14: TestDocCount

		public virtual void  TestDocCount()
		{
			Directory dir = new RAMDirectory();
			
			IndexWriter writer = null;
			IndexReader reader = null;
			int i;
			
			IndexWriter.SetDefaultWriteLockTimeout(2000);
			Assert.AreEqual(2000, IndexWriter.GetDefaultWriteLockTimeout());
			
			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			
			IndexWriter.SetDefaultWriteLockTimeout(1000);
			
			// add 100 documents
			for (i = 0; i < 100; i++)
			{
				AddDoc(writer);
			}
			Assert.AreEqual(100, writer.DocCount());
			writer.Close();
			
			// delete 40 documents
			reader = IndexReader.Open(dir);
			for (i = 0; i < 40; i++)
			{
				reader.DeleteDocument(i);
			}
			reader.Close();
			
			// test doc count before segments are merged/index is optimized
			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			Assert.AreEqual(100, writer.DocCount());
			writer.Close();
			
			reader = IndexReader.Open(dir);
			Assert.AreEqual(100, reader.MaxDoc());
			Assert.AreEqual(60, reader.NumDocs());
			reader.Close();
			
			// optimize the index and check that the new doc count is correct
			writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
			Assert.AreEqual(100, writer.MaxDoc());
			Assert.AreEqual(60, writer.NumDocs());
			writer.Optimize();
			Assert.AreEqual(60, writer.MaxDoc());
			Assert.AreEqual(60, writer.NumDocs());
			writer.Close();
			
			// check that the index reader gives the same numbers.
			reader = IndexReader.Open(dir);
			Assert.AreEqual(60, reader.MaxDoc());
			Assert.AreEqual(60, reader.NumDocs());
			reader.Close();
			
			// make sure opening a new index for create over
			// this existing one works correctly:
			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
			Assert.AreEqual(0, writer.MaxDoc());
			Assert.AreEqual(0, writer.NumDocs());
			writer.Close();
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:63,代码来源:TestIndexWriter.cs

示例15: TestExpungeDeletes2

		public virtual void  TestExpungeDeletes2()
		{
			Directory dir = new MockRAMDirectory();
			IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			writer.SetMaxBufferedDocs(2);
			writer.SetMergeFactor(50);
			writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
			
			Document document = new Document();
			
			document = new Document();
			Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
			document.Add(storedField);
			Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
			document.Add(termVectorField);
			for (int i = 0; i < 98; i++)
				writer.AddDocument(document);
			writer.Close();
			
			IndexReader ir = IndexReader.Open(dir);
			Assert.AreEqual(98, ir.MaxDoc());
			Assert.AreEqual(98, ir.NumDocs());
			for (int i = 0; i < 98; i += 2)
				ir.DeleteDocument(i);
			Assert.AreEqual(49, ir.NumDocs());
			ir.Close();
			
			writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			writer.SetMergeFactor(3);
			Assert.AreEqual(49, writer.NumDocs());
			writer.ExpungeDeletes();
			writer.Close();
			ir = IndexReader.Open(dir);
			Assert.AreEqual(49, ir.MaxDoc());
			Assert.AreEqual(49, ir.NumDocs());
			ir.Close();
			dir.Close();
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:38,代码来源:TestIndexWriter.cs


注:本文中的Lucene.Net.Index.IndexWriter.NumDocs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。