当前位置: 首页>>代码示例>>C#>>正文


C# IndexWriter.MaxDoc方法代码示例

本文整理汇总了C#中Lucene.Net.Index.IndexWriter.MaxDoc方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.MaxDoc方法的具体用法?C# IndexWriter.MaxDoc怎么用?C# IndexWriter.MaxDoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Lucene.Net.Index.IndexWriter的用法示例。


在下文中一共展示了IndexWriter.MaxDoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: OpenWriterWithCommit

        public void OpenWriterWithCommit()
        {
            SegmentsGenCommit sgCommit = new SegmentsGenCommit(this.directory);
            IndexWriter writer = new IndexWriter(this.directory, new WhitespaceAnalyzer(), null, IndexWriter.MaxFieldLength.UNLIMITED, sgCommit);
            Assert.AreEqual(10, writer.MaxDoc());
            IndexReader reader = writer.GetReader();

            IndexCommit commit = reader.GetIndexCommit();
            Assert.AreEqual(commit.GetGeneration(), sgCommit.GetGeneration());
            Assert.AreEqual(commit.GetSegmentsFileName(), sgCommit.GetSegmentsFileName());
        }
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:11,代码来源:SegmentsGenCommitTest.cs

示例2: TestExactFileNames

        public virtual void TestExactFileNames()
        {
            System.String outputDir = "lucene.backwardscompat0.index";
            RmDir(outputDir);

            try
            {
                Directory dir = FSDirectory.Open(new System.IO.DirectoryInfo(FullDir(outputDir)));

                IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
                                                     IndexWriter.MaxFieldLength.UNLIMITED);
                writer.SetRAMBufferSizeMB(16.0);
                for (int i = 0; i < 35; i++)
                {
                    AddDoc(writer, i);
                }
                Assert.AreEqual(35, writer.MaxDoc(), "wrong doc count");
                writer.Close();

                // Delete one doc so we get a .del file:
                IndexReader reader = IndexReader.Open(dir, false);
                Term searchTerm = new Term("id", "7");
                int delCount = reader.DeleteDocuments(searchTerm);
                Assert.AreEqual(1, delCount, "didn't delete the right number of documents");

                // Set one norm so we get a .s0 file:
                reader.SetNorm(21, "content", (float) 1.5);
                reader.Close();

                // The numbering of fields can vary depending on which
                // JRE is in use.  On some JREs we see content bound to
                // field 0; on others, field 1.  So, here we have to
                // figure out which field number corresponds to
                // "content", and then set our expected file names below
                // accordingly:
                CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs");
                FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm");
                int contentFieldIndex = -1;
                for (int i = 0; i < fieldInfos.Size(); i++)
                {
                    FieldInfo fi = fieldInfos.FieldInfo(i);
                    if (fi.name_ForNUnit.Equals("content"))
                    {
                        contentFieldIndex = i;
                        break;
                    }
                }
                cfsReader.Close();
                Assert.IsTrue(contentFieldIndex != -1,
                              "could not locate the 'content' field number in the _2.cfs segment");

                // Now verify file names:
                System.String[] expected;
                expected = new System.String[]
                               {"_0.cfs", "_0_1.del", "_0_1.s" + contentFieldIndex, "segments_3", "segments.gen"};

                System.String[] actual = dir.ListAll();
                System.Array.Sort(expected);
                System.Array.Sort(actual);
                if (!CollectionsHelper.Equals(expected, actual))
                {
                    Assert.Fail("incorrect filenames in index: expected:\n    " + AsString(expected) +
                                "\n  actual:\n    " + AsString(actual));
                }
                dir.Close();
            }
            finally
            {
                RmDir(outputDir);
            }
        }
开发者ID:Nangal,项目名称:lucene.net,代码行数:71,代码来源:TestBackwardsCompatibility.cs

示例3: GenerateRandomDocs

        private void GenerateRandomDocs(int numCats, int numDocs)
        {
            using (var dir = new IsolatedStorageDirectory(IndexDir))
            {
                Random rgen = new Random();
                string[] categories = Enumerable.Range(0, numCats).Select(x => RandomString(4, rgen)).ToArray();
                IEnumerable<Document> docs = Enumerable.Range(0, numDocs).Select(x => RandomDocument(categories[rgen.Next(0, numCats - 1)], rgen));

                using (IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true))
                {
                    System.Threading.Tasks.Parallel.ForEach(docs, d =>
                    {
                        writer.AddDocument(d); //multi-access to writer
                    });

                    Assert.AreEqual(docs.Count(), writer.MaxDoc(), "Unexpected error in \"writer.AddDocument\"");
                }
            }
        }
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:19,代码来源:TestIsolatedStorageDirectory.cs

示例4: SetUp

        public override void  SetUp()
        {
            base.SetUp();
            RAMDirectory directory = new RAMDirectory();
            IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
            for (int i = 0; i < docFields.Length; i++)
            {
                Document document = new Document();
                document.Add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
                writer.AddDocument(document);
            }
            writer.Close();
            searcher = new IndexSearcher(directory, true);

            // Make big index
            dir2 = new MockRAMDirectory(directory);

            // First multiply small test index:
            mulFactor = 1;
            int docCount = 0;
            do
            {
                Directory copy = new RAMDirectory(dir2);
                IndexWriter indexWriter = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
                indexWriter.AddIndexesNoOptimize(new[] {copy});
                docCount = indexWriter.MaxDoc();
                indexWriter.Close();
                mulFactor *= 2;
            } while (docCount < 3000);

            IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
            Document doc = new Document();
            doc.Add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
            for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++)
            {
                w.AddDocument(doc);
            }
            doc = new Document();
            doc.Add(new Field("field2", "big bad bug", Field.Store.NO, Field.Index.ANALYZED));
            for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++)
            {
                w.AddDocument(doc);
            }
            // optimize to 1 segment
            w.Optimize();
            reader = w.GetReader();
            w.Close();
            bigSearcher = new IndexSearcher(reader);
        }
开发者ID:Nangal,项目名称:lucene.net,代码行数:49,代码来源:TestBoolean2.cs

示例5: CreateIndex

 public virtual void  CreateIndex(System.String dirName, bool doCFS)
 {
     
     RmDir(dirName);
     
     dirName = FullDir(dirName);
     
     Directory dir = FSDirectory.Open(new System.IO.DirectoryInfo(dirName));
     IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.UseCompoundFile = doCFS;
     writer.SetMaxBufferedDocs(10);
     
     for (int i = 0; i < 35; i++)
     {
         AddDoc(writer, i);
     }
     Assert.AreEqual(35, writer.MaxDoc(), "wrong doc count");
     writer.Close();
     
     // open fresh writer so we get no prx file in the added segment
     writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     writer.UseCompoundFile = doCFS;
     writer.SetMaxBufferedDocs(10);
     AddNoProxDoc(writer);
     writer.Close();
     
     // Delete one doc so we get a .del file:
     IndexReader reader = IndexReader.Open(dir, false);
     Term searchTerm = new Term("id", "7");
     int delCount = reader.DeleteDocuments(searchTerm);
     Assert.AreEqual(1, delCount, "didn't delete the right number of documents");
     
     // Set one norm so we get a .s0 file:
     reader.SetNorm(21, "content", (float) 1.5);
     reader.Close();
 }
开发者ID:Nangal,项目名称:lucene.net,代码行数:36,代码来源:TestBackwardsCompatibility.cs

示例6: TestMoreMerges

        public virtual void TestMoreMerges()
        {
            // main directory
            Directory dir = NewDirectory();
            // auxiliary directory
            Directory aux = NewDirectory();
            Directory aux2 = NewDirectory();

            SetUpDirs(dir, aux, true);

            IndexWriter writer = NewWriter(aux2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(100).SetMergePolicy(NewLogMergePolicy(10)));
            writer.AddIndexes(aux);
            Assert.AreEqual(30, writer.MaxDoc());
            Assert.AreEqual(3, writer.SegmentCount);
            writer.Dispose();

            IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
            writer = new IndexWriter(aux, dontMergeConfig);
            for (int i = 0; i < 27; i++)
            {
                writer.DeleteDocuments(new Term("id", "" + i));
            }
            writer.Dispose();
            IndexReader reader = DirectoryReader.Open(aux);
            Assert.AreEqual(3, reader.NumDocs());
            reader.Dispose();

            dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
            writer = new IndexWriter(aux2, dontMergeConfig);
            for (int i = 0; i < 8; i++)
            {
                writer.DeleteDocuments(new Term("id", "" + i));
            }
            writer.Dispose();
            reader = DirectoryReader.Open(aux2);
            Assert.AreEqual(22, reader.NumDocs());
            reader.Dispose();

            writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(6).SetMergePolicy(NewLogMergePolicy(4)));

            writer.AddIndexes(aux, aux2);
            Assert.AreEqual(1040, writer.MaxDoc());
            Assert.AreEqual(1000, writer.GetDocCount(0));
            writer.Dispose();
            dir.Dispose();
            aux.Dispose();
            aux2.Dispose();
        }
开发者ID:joyanta,项目名称:lucene.net,代码行数:48,代码来源:TestAddIndexes.cs

示例7: DocumentsWriter

		internal DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain)
		{
			InitBlock();
			this.directory = directory;
			this.writer = writer;
			this.similarity = writer.GetSimilarity();
			flushedDocCount = writer.MaxDoc();
			
			consumer = indexingChain.GetChain(this);
			if (consumer is DocFieldProcessor)
			{
				docFieldProcessor = (DocFieldProcessor) consumer;
			}
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:14,代码来源:DocumentsWriter.cs

示例8: IndexPublication

        public int IndexPublication(Lucene.Net.Store.Directory indexDir, IssueDocumentDto bean)
        {
            IndexWriter writer = null;
            Document doc = null;
            Field field = null;
            IndexWriter idxModif = null;

            int numIndexed = 0;

            try
            {
                if (IndexWriter.IsLocked(indexDir))
                    IndexWriter.Unlock(indexDir);
                idxModif = new IndexWriter(indexDir, this.Analizer, false, new IndexWriter.MaxFieldLength(2500000));

                doc = new Document();

                doc.Add(new Field("issue_id", bean.IssueId.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.CompanyId != -1) doc.Add(new Field("company_id", bean.CompanyId.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.IssueIdPerCompany != -1) doc.Add(new Field("issue_per_company", bean.IssueIdPerCompany.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.PriorityId != -1) doc.Add(new Field("priority_id", bean.PriorityId.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.StatusId != -1) doc.Add(new Field("status_id", bean.StatusId.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (!string.IsNullOrEmpty(bean.CurrentOwner)) doc.Add(new Field("curr_owner", bean.CurrentOwner, Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (!string.IsNullOrEmpty(bean.LastOwner)) doc.Add(new Field("last_owner", bean.LastOwner, Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (!string.IsNullOrEmpty(bean.ProjectName)) doc.Add(new Field("project_name", bean.ProjectName, Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));

                if (bean.AssignedCUDate != DateTime.MinValue) doc.Add(new Field("assigned_cu_date", DateTools.DateToString(bean.AssignedCUDate, DateTools.Resolution.DAY), Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (bean.DestinationDate != DateTime.MinValue) doc.Add(new Field("destination_date", DateTools.DateToString(bean.DestinationDate, DateTools.Resolution.DAY), Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (!string.IsNullOrEmpty(bean.DescLast)) doc.Add(new Field("desc_last", bean.DescLast, Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (bean.ReadUsersBitIds1 != -1) doc.Add(new Field("read_usr_bit1", bean.ReadUsersBitIds1.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.ReadUsersBitIds2 != -1) doc.Add(new Field("read_usr_bit1", bean.ReadUsersBitIds2.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.LastUpdate != DateTime.MinValue) doc.Add(new Field("last_update", DateTools.DateToString(bean.LastUpdate, DateTools.Resolution.DAY), Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (bean.StartDate != DateTime.MinValue) doc.Add(new Field("start_date", DateTools.DateToString(bean.StartDate, DateTools.Resolution.DAY), Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (bean.IsAllDay) doc.Add(new Field("is_all_day", "1", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                else doc.Add(new Field("is_all_day", "0", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));
                if (bean.DestReminderDate != DateTime.MinValue) doc.Add(new Field("dest_rem_date", DateTools.DateToString(bean.DestReminderDate, DateTools.Resolution.DAY), Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED));
                if (bean.ReccurenceId != -1) doc.Add(new Field("reccurence_id", bean.ReccurenceId.ToString(), Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NO));

                idxModif.AddDocument(doc);
                idxModif.Optimize();
                idxModif.Commit();

                numIndexed = idxModif.MaxDoc();
                idxModif.Dispose();

            }
            catch
            {
                if (writer != null)
                {
                    idxModif.Optimize();
                    idxModif.Commit();
                    idxModif.Dispose();
                }
                throw;
            }
            return numIndexed;
        }
开发者ID:ulise28,项目名称:TeamDoWebService,代码行数:58,代码来源:LuceneDao.cs

示例9: TestExpungeDeletes

		public virtual void  TestExpungeDeletes()
		{
			Directory dir = new MockRAMDirectory();
			IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			writer.SetMaxBufferedDocs(2);
			writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
			
			Document document = new Document();
			
			document = new Document();
			Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
			document.Add(storedField);
			Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
			document.Add(termVectorField);
			for (int i = 0; i < 10; i++)
				writer.AddDocument(document);
			writer.Close();
			
			IndexReader ir = IndexReader.Open(dir);
			Assert.AreEqual(10, ir.MaxDoc());
			Assert.AreEqual(10, ir.NumDocs());
			ir.DeleteDocument(0);
			ir.DeleteDocument(7);
			Assert.AreEqual(8, ir.NumDocs());
			ir.Close();
			
			writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
			Assert.AreEqual(8, writer.NumDocs());
			Assert.AreEqual(10, writer.MaxDoc());
			writer.ExpungeDeletes();
			Assert.AreEqual(8, writer.NumDocs());
			writer.Close();
			ir = IndexReader.Open(dir);
			Assert.AreEqual(8, ir.MaxDoc());
			Assert.AreEqual(8, ir.NumDocs());
			ir.Close();
			dir.Close();
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:38,代码来源:TestIndexWriter.cs

示例10: TestFlushDocCount

        public virtual void TestFlushDocCount()
        {
            int[] numThreads = new int[] { 2 + AtLeast(1), 1 };
            for (int i = 0; i < numThreads.Length; i++)
            {

                int numDocumentsToIndex = 50 + AtLeast(30);
                AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
                Directory dir = NewDirectory();
                MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
                IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetFlushPolicy(flushPolicy);

                int numDWPT = 1 + AtLeast(2);
                DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
                iwc.SetIndexerThreadPool(threadPool);
                iwc.SetMaxBufferedDocs(2 + AtLeast(10));
                iwc.SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
                iwc.SetMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
                IndexWriter writer = new IndexWriter(dir, iwc);
                flushPolicy = (MockDefaultFlushPolicy)writer.Config.FlushPolicy;
                Assert.IsTrue(flushPolicy.FlushOnDocCount());
                Assert.IsFalse(flushPolicy.FlushOnDeleteTerms());
                Assert.IsFalse(flushPolicy.FlushOnRAM());
                DocumentsWriter docsWriter = writer.DocsWriter;
                Assert.IsNotNull(docsWriter);
                DocumentsWriterFlushControl flushControl = docsWriter.FlushControl;
                Assert.AreEqual(0, flushControl.FlushBytes(), " bytes must be 0 after init");

                IndexThread[] threads = new IndexThread[numThreads[i]];
                for (int x = 0; x < threads.Length; x++)
                {
                    threads[x] = new IndexThread(this, numDocs, numThreads[i], writer, LineDocFile, false);
                    threads[x].Start();
                }

                for (int x = 0; x < threads.Length; x++)
                {
                    threads[x].Join();
                }

                Assert.AreEqual(0, flushControl.FlushBytes(), " all flushes must be due numThreads=" + numThreads[i]);
                Assert.AreEqual(numDocumentsToIndex, writer.NumDocs());
                Assert.AreEqual(numDocumentsToIndex, writer.MaxDoc());
                Assert.IsTrue(flushPolicy.PeakDocCountWithoutFlush <= iwc.MaxBufferedDocs, "peak bytes without flush exceeded watermark");
                AssertActiveBytesAfter(flushControl);
                writer.Dispose();
                Assert.AreEqual(0, flushControl.ActiveBytes());
                dir.Dispose();
            }
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:50,代码来源:TestFlushByRamOrCountsPolicy.cs

示例11: UpdateIndex

        /// <summary>
        /// Update Index by Language
        /// </summary>
        /// <param name="indexDir"></param>
        /// <param name="lng"></param>
        /// <returns></returns>
        public void UpdateIndex(Lucene.Net.Store.Directory indexDir, String lng)
        {
            #if DEBUG
            T.TraceMessage("Begin full indexing , language {0}, time: {1}", lng, DateTime.Now.ToLongTimeString());
            #endif
            IndexWriter writer = null;
            int step = 1000;
            int totalDoc = 0;

            try
            {
                DateTime beginTime = DateTime.Now;
                int num = 0, numPar = 0;

                num = new IssuesDao().GetTotalPublicationByLanguage(lng);

                if (IndexWriter.IsLocked(indexDir))
                    IndexWriter.Unlock(indexDir);
                writer = new IndexWriter(indexDir, this.Analizer, true, new IndexWriter.MaxFieldLength(2500000));

                // change number of documents to store in memory before writing them to the disk
                //writer.SetMergeFactor();
                if (num != 0)
                {
                    if (step >= num)
                    {
                        long ini = DateTime.Now.Millisecond;
                        totalDoc = Index(writer, 1, num, lng);
                        long fin = DateTime.Now.Millisecond;
                    }
                    else
                    {
                        int numStep = (num / step);
                        int cont = 1;
                        int start = 0;
                        int end = 0;

                        for (int i = 0; i < numStep; i++)
                        {
                            start = (cont + (i * step));
                            end = start + step;
                            DateTime ini = DateTime.Now;

                            numPar = Index(writer, start, end, lng);

                            DateTime fin = DateTime.Now;
                            TimeSpan ts = fin.Subtract(ini);

            #if DEBUG
                            T.TraceMessage("Indexing from {0} to {1} took {2} seconds", start, end, (ts.Minutes * 60 + ts.Seconds));
            #endif
                        }

                        if (num != end)
                        {
                            start = end;
                            end = num;

                            numPar = Index(writer, start, end + 1, lng);
                        }
                    }
                }
                DateTime endTime = DateTime.Now;
                TimeSpan tsFull = endTime.Subtract(beginTime);

                //Commit writer
                writer.Commit();
                writer.Dispose();

            #if DEBUG
                T.TraceMessage("Full indexing took {0} seconds, indexed {1} documents", ((tsFull.Hours * 3600) + (tsFull.Minutes * 60) + tsFull.Seconds), writer.MaxDoc());
            #endif
            }
            catch (Exception ex)
            {
                if (writer != null)
                {
                    writer.Optimize();
                    writer.Commit();
                    writer.Dispose();
                }
                T.TraceError("Error Full Index , class {0}, language {1} ", CLASS_NAME, lng);
                T.TraceError(ex);
                throw ex;
            }
        }
开发者ID:ulise28,项目名称:TeamDoWebService,代码行数:92,代码来源:LuceneDao.cs

示例12: RunFlushByRam

        protected internal virtual void RunFlushByRam(int numThreads, double maxRamMB, bool ensureNotStalled)
        {
            int numDocumentsToIndex = 10 + AtLeast(30);
            AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
            Directory dir = NewDirectory();
            MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
            MockAnalyzer analyzer = new MockAnalyzer(Random());
            analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH);

            IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetFlushPolicy(flushPolicy);
            int numDWPT = 1 + AtLeast(2);
            DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
            iwc.SetIndexerThreadPool(threadPool);
            iwc.SetRAMBufferSizeMB(maxRamMB);
            iwc.SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
            iwc.SetMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
            IndexWriter writer = new IndexWriter(dir, iwc);
            flushPolicy = (MockDefaultFlushPolicy)writer.Config.FlushPolicy;
            Assert.IsFalse(flushPolicy.FlushOnDocCount());
            Assert.IsFalse(flushPolicy.FlushOnDeleteTerms());
            Assert.IsTrue(flushPolicy.FlushOnRAM());
            DocumentsWriter docsWriter = writer.DocsWriter;
            Assert.IsNotNull(docsWriter);
            DocumentsWriterFlushControl flushControl = docsWriter.FlushControl;
            Assert.AreEqual(0, flushControl.FlushBytes(), " bytes must be 0 after init");

            IndexThread[] threads = new IndexThread[numThreads];
            for (int x = 0; x < threads.Length; x++)
            {
                threads[x] = new IndexThread(this, numDocs, numThreads, writer, LineDocFile, false);
                threads[x].Start();
            }

            for (int x = 0; x < threads.Length; x++)
            {
                threads[x].Join();
            }
            long maxRAMBytes = (long)(iwc.RAMBufferSizeMB * 1024.0 * 1024.0);
            Assert.AreEqual(0, flushControl.FlushBytes(), " all flushes must be due numThreads=" + numThreads);
            Assert.AreEqual(numDocumentsToIndex, writer.NumDocs());
            Assert.AreEqual(numDocumentsToIndex, writer.MaxDoc());
            Assert.IsTrue(flushPolicy.PeakBytesWithoutFlush <= maxRAMBytes, "peak bytes without flush exceeded watermark");
            AssertActiveBytesAfter(flushControl);
            if (flushPolicy.HasMarkedPending)
            {
                Assert.IsTrue(maxRAMBytes < flushControl.PeakActiveBytes);
            }
            if (ensureNotStalled)
            {
                Assert.IsFalse(docsWriter.FlushControl.StallControl.WasStalled());
            }
            writer.Dispose();
            Assert.AreEqual(0, flushControl.ActiveBytes());
            dir.Dispose();
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:55,代码来源:TestFlushByRamOrCountsPolicy.cs

示例13: TestStallControl

        public virtual void TestStallControl()
        {
            int[] numThreads = new int[] { 4 + Random().Next(8), 1 };
            int numDocumentsToIndex = 50 + Random().Next(50);
            for (int i = 0; i < numThreads.Length; i++)
            {
                AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
                MockDirectoryWrapper dir = NewMockDirectory();
                // mock a very slow harddisk sometimes here so that flushing is very slow
                dir.Throttling = MockDirectoryWrapper.Throttling_e.SOMETIMES;
                IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
                iwc.SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
                iwc.SetMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
                FlushPolicy flushPolicy = new FlushByRamOrCountsPolicy();
                iwc.SetFlushPolicy(flushPolicy);

                DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numThreads[i] == 1 ? 1 : 2);
                iwc.SetIndexerThreadPool(threadPool);
                // with such a small ram buffer we should be stalled quiet quickly
                iwc.SetRAMBufferSizeMB(0.25);
                IndexWriter writer = new IndexWriter(dir, iwc);
                IndexThread[] threads = new IndexThread[numThreads[i]];
                for (int x = 0; x < threads.Length; x++)
                {
                    threads[x] = new IndexThread(this, numDocs, numThreads[i], writer, LineDocFile, false);
                    threads[x].Start();
                }

                for (int x = 0; x < threads.Length; x++)
                {
                    threads[x].Join();
                }
                DocumentsWriter docsWriter = writer.DocsWriter;
                Assert.IsNotNull(docsWriter);
                DocumentsWriterFlushControl flushControl = docsWriter.FlushControl;
                Assert.AreEqual(0, flushControl.FlushBytes(), " all flushes must be due");
                Assert.AreEqual(numDocumentsToIndex, writer.NumDocs());
                Assert.AreEqual(numDocumentsToIndex, writer.MaxDoc());
                if (numThreads[i] == 1)
                {
                    Assert.IsFalse("single thread must not block numThreads: " + numThreads[i], docsWriter.FlushControl.StallControl.HasBlocked());
                }
                if (docsWriter.FlushControl.PeakNetBytes > (2d * iwc.RAMBufferSizeMB * 1024d * 1024d))
                {
                    Assert.IsTrue(docsWriter.FlushControl.StallControl.WasStalled());
                }
                AssertActiveBytesAfter(flushControl);
                writer.Dispose(true);
                dir.Dispose();
            }
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:51,代码来源:TestFlushByRamOrCountsPolicy.cs

示例14: TestRandom

        public virtual void TestRandom()
        {
            int numThreads = 1 + Random().Next(8);
            int numDocumentsToIndex = 50 + AtLeast(70);
            AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
            Directory dir = NewDirectory();
            IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
            iwc.SetFlushPolicy(flushPolicy);

            int numDWPT = 1 + Random().Next(8);
            DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
            iwc.SetIndexerThreadPool(threadPool);

            IndexWriter writer = new IndexWriter(dir, iwc);
            flushPolicy = (MockDefaultFlushPolicy)writer.Config.FlushPolicy;
            DocumentsWriter docsWriter = writer.DocsWriter;
            Assert.IsNotNull(docsWriter);
            DocumentsWriterFlushControl flushControl = docsWriter.FlushControl;

            Assert.AreEqual(0, flushControl.FlushBytes(), " bytes must be 0 after init");

            IndexThread[] threads = new IndexThread[numThreads];
            for (int x = 0; x < threads.Length; x++)
            {
                threads[x] = new IndexThread(this, numDocs, numThreads, writer, LineDocFile, true);
                threads[x].Start();
            }

            for (int x = 0; x < threads.Length; x++)
            {
                threads[x].Join();
            }
            Assert.AreEqual(0, flushControl.FlushBytes(), " all flushes must be due");
            Assert.AreEqual(numDocumentsToIndex, writer.NumDocs());
            Assert.AreEqual(numDocumentsToIndex, writer.MaxDoc());
            if (flushPolicy.FlushOnRAM() && !flushPolicy.FlushOnDocCount() && !flushPolicy.FlushOnDeleteTerms())
            {
                long maxRAMBytes = (long)(iwc.RAMBufferSizeMB * 1024.0 * 1024.0);
                Assert.IsTrue(flushPolicy.PeakBytesWithoutFlush <= maxRAMBytes, "peak bytes without flush exceeded watermark");
                if (flushPolicy.HasMarkedPending)
                {
                    Assert.IsTrue("max: " + maxRAMBytes + " " + flushControl.PeakActiveBytes, maxRAMBytes <= flushControl.PeakActiveBytes);
                }
            }
            AssertActiveBytesAfter(flushControl);
            writer.Commit();
            Assert.AreEqual(0, flushControl.ActiveBytes());
            IndexReader r = DirectoryReader.Open(dir);
            Assert.AreEqual(numDocumentsToIndex, r.NumDocs());
            Assert.AreEqual(numDocumentsToIndex, r.MaxDoc());
            if (!flushPolicy.FlushOnRAM())
            {
                Assert.IsFalse("never stall if we don't flush on RAM", docsWriter.FlushControl.StallControl.WasStalled());
                Assert.IsFalse("never block if we don't flush on RAM", docsWriter.FlushControl.StallControl.HasBlocked());
            }
            r.Dispose();
            writer.Dispose();
            dir.Dispose();
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:60,代码来源:TestFlushByRamOrCountsPolicy.cs

示例15: DocumentsWriter

        internal DocumentsWriter(Directory directory, IndexWriter writer)
        {
            this.directory = directory;
            this.writer = writer;
            this.similarity = writer.GetSimilarity();
            flushedDocCount = writer.MaxDoc();

            byteBlockAllocator = new ByteBlockAllocator(this);
            waitQueue = new WaitQueue(this);

            /*
              This is the current indexing chain:

              DocConsumer / DocConsumerPerThread
                --> code: DocFieldProcessor / DocFieldProcessorPerThread
                  --> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
                    --> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
                      --> code: DocInverter / DocInverterPerThread / DocInverterPerField
                        --> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
                          --> code: TermsHash / TermsHashPerThread / TermsHashPerField
                            --> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
                              --> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
                              --> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
                        --> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
                          --> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
                      --> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
            */

            // TODO FI: this should be something the user can pass in
            // Build up indexing chain:
            TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(this);
            TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();

            InvertedDocConsumer termsHash = new TermsHash(this, true, freqProxWriter,
                                                                 new TermsHash(this, false, termVectorsWriter, null));
            NormsWriter normsWriter = new NormsWriter();
            DocInverter docInverter = new DocInverter(termsHash, normsWriter);
            StoredFieldsWriter fieldsWriter = new StoredFieldsWriter(this);
            DocFieldConsumers docFieldConsumers = new DocFieldConsumers(docInverter, fieldsWriter);
            consumer = docFieldProcessor = new DocFieldProcessor(this, docFieldConsumers);
        }
开发者ID:cqm0609,项目名称:lucene-file-finder,代码行数:41,代码来源:DocumentsWriter.cs


注:本文中的Lucene.Net.Index.IndexWriter.MaxDoc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。