本文整理汇总了C#中Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.SetMaxBufferedDocs方法的具体用法?C# IndexWriter.SetMaxBufferedDocs怎么用?C# IndexWriter.SetMaxBufferedDocs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.SetMaxBufferedDocs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestRollbackIntegrityWithBufferFlush
public void TestRollbackIntegrityWithBufferFlush()
{
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 5; i++)
{
Document doc = new Document();
doc.Add(new Field("pk", i.ToString(), Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
w.AddDocument(doc);
}
w.Close();
// If buffer size is small enough to cause a flush, errors ensue...
w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
w.SetMaxBufferedDocs(2);
Term pkTerm = new Term("pk", "");
for (int i = 0; i < 3; i++)
{
Document doc = new Document();
String value = i.ToString();
doc.Add(new Field("pk", value, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
doc.Add(new Field("text", "foo", Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
w.UpdateDocument(pkTerm.CreateTerm(value), doc);
}
w.Rollback();
IndexReader r = IndexReader.Open(dir, true);
Assert.AreEqual(5, r.NumDocs(), "index should contain same number of docs post rollback");
r.Close();
dir.Close();
}
示例2: BuildAutoCompleteIndex
/// <summary>
/// Open the index in the given directory and create a new index of word frequency for the
/// given index.</summary>
public void BuildAutoCompleteIndex()
{
// use a custom analyzer so we can do EdgeNGramFiltering
var analyzer = new AutoCompleteAnalyzer();
using (var writer = new IndexWriter(m_directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED))
{
writer.MergeFactor = 300;
writer.SetMaxBufferedDocs(150);
// go through every word, storing the original word (incl. n-grams)
// and the number of times it occurs
foreach (var hotel in _hotels)
{
if (hotel.Name.Length < 3)
continue; // too short we bail but "too long" is fine...
// ok index the word
// use the number of documents this word appears in
int freq = hotel.SearchCount;
var doc = MakeDocument(hotel, freq);
writer.AddDocument(doc);
}
writer.Optimize();
}
// re-open our reader
ReplaceSearcher();
}
示例3: TestSorting
public virtual void TestSorting()
{
Directory directory = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(1000);
writer.AddDocument(Adoc(new System.String[]{"id", "a", "title", "ipod", "str_s", "a"}));
writer.AddDocument(Adoc(new System.String[]{"id", "b", "title", "ipod ipod", "str_s", "b"}));
writer.AddDocument(Adoc(new System.String[]{"id", "c", "title", "ipod ipod ipod", "str_s", "c"}));
writer.AddDocument(Adoc(new System.String[]{"id", "x", "title", "boosted", "str_s", "x"}));
writer.AddDocument(Adoc(new System.String[]{"id", "y", "title", "boosted boosted", "str_s", "y"}));
writer.AddDocument(Adoc(new System.String[]{"id", "z", "title", "boosted boosted boosted", "str_s", "z"}));
IndexReader r = writer.GetReader();
writer.Close();
IndexSearcher searcher = new IndexSearcher(r);
RunTest(searcher, true);
RunTest(searcher, false);
searcher.Close();
r.Close();
directory.Close();
}
示例4: ApplyToWriter
public void ApplyToWriter(IndexWriter writer)
{
try
{
if (MergeFactor != null)
{
writer.SetMergeFactor((int) MergeFactor);
}
if (MaxMergeDocs != null)
{
writer.SetMaxMergeDocs((int) MaxMergeDocs);
}
if (MaxBufferedDocs != null)
{
writer.SetMaxBufferedDocs((int) MaxBufferedDocs);
}
if (RamBufferSizeMb != null)
{
writer.SetRAMBufferSizeMB((int) RamBufferSizeMb);
}
if (TermIndexInterval != null)
{
writer.SetTermIndexInterval((int) TermIndexInterval);
}
}
catch (ArgumentOutOfRangeException)
{
// TODO: Log it
}
}
示例5: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
int extraCount = 0;
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i*20 + j));
writer.AddDocument(doc);
}
while (true)
{
// must cycle here because sometimes the merge flushes
// the doc we just added and so there's nothing to
// flush, and we don't hit the exception
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush(true, false, true);
if (failure.hitExc)
Assert.Fail("failed to hit IOException");
extraCount++;
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
break;
}
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory, true);
Assert.AreEqual(200 + extraCount, reader.NumDocs());
reader.Close();
directory.Close();
}
示例6: TestCommitUserData
public virtual void TestCommitUserData()
{
RAMDirectory d = new MockRAMDirectory();
System.Collections.Generic.IDictionary<string, string> commitUserData = new System.Collections.Generic.Dictionary<string,string>();
commitUserData["foo"] = "fighters";
// set up writer
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
for (int i = 0; i < 27; i++)
AddDocumentWithFields(writer);
writer.Close();
IndexReader r = IndexReader.Open(d, false);
r.DeleteDocument(5);
r.Flush(commitUserData);
r.Close();
SegmentInfos sis = new SegmentInfos();
sis.Read(d);
IndexReader r2 = IndexReader.Open(d, false);
IndexCommit c = r.IndexCommit;
Assert.AreEqual(c.UserData, commitUserData);
Assert.AreEqual(sis.GetCurrentSegmentFileName(), c.SegmentsFileName);
Assert.IsTrue(c.Equals(r.IndexCommit));
// Change the index
writer = new IndexWriter(d, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
for (int i = 0; i < 7; i++)
AddDocumentWithFields(writer);
writer.Close();
IndexReader r3 = r2.Reopen();
Assert.IsFalse(c.Equals(r3.IndexCommit));
Assert.IsFalse(r2.IndexCommit.IsOptimized);
r3.Close();
writer = new IndexWriter(d, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.Optimize();
writer.Close();
r3 = r2.Reopen();
Assert.IsTrue(r3.IndexCommit.IsOptimized);
r2.Close();
r3.Close();
d.Close();
}
示例7: CreateIndexWriter
/// <summary>
/// Получение класса для построения индекса, оптимизированного под обработку больших объёмов данных
/// </summary>
public static IndexWriter CreateIndexWriter()
{
var indexPath = GetIndexDir();
var w =
new IndexWriter(
indexPath,
new RussianAnalyzer(Version.LUCENE_30),
!IndexReader.IndexExists(indexPath),
IndexWriter.MaxFieldLength.UNLIMITED);
// optimizing
w.SetMaxBufferedDocs(4*1024);
//w.SetMergeFactor(30);
return w;
}
示例8: TestNormalCase
public virtual void TestNormalCase()
{
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(10);
writer.MergeFactor = 10;
writer.SetMergePolicy(new LogDocMergePolicy(writer));
for (int i = 0; i < 100; i++)
{
AddDoc(writer);
CheckInvariants(writer);
}
writer.Close();
}
示例9: InitIndex
private IndexWriter InitIndex(MockRAMDirectory dir)
{
dir.SetLockFactory(NoLockFactory.Instance);
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
//writer.setMaxBufferedDocs(2);
writer.SetMaxBufferedDocs(10);
((ConcurrentMergeScheduler) writer.MergeScheduler).SetSuppressExceptions();
Document doc = new Document();
doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED));
for (int i = 0; i < 157; i++)
writer.AddDocument(doc);
return writer;
}
示例10: CreateIndex
private void CreateIndex(int numHits)
{
int numDocs = 500;
Directory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(10);
for (int i = 0; i < numDocs; i++)
{
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
System.String content;
if (i % (numDocs / numHits) == 0)
{
// add a document that matches the query "term1 term2"
content = this.term1 + " " + this.term2;
}
else if (i % 15 == 0)
{
// add a document that only contains term1
content = this.term1 + " " + this.term1;
}
else
{
// add a document that contains term2 but not term 1
content = this.term3 + " " + this.term2;
}
doc.Add(new Field(this.field, content, Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
// make sure the index has only a single segment
writer.Optimize();
writer.Close();
// the index is a single segment, thus IndexReader.open() returns an instance of SegmentReader
SegmentReader reader = (SegmentReader) IndexReader.Open(directory);
// we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
reader.ProxStream_ForNUnitTest = new SeeksCountingStream(this, reader.ProxStream_ForNUnitTest);
this.searcher = new IndexSearcher(reader);
}
示例11: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i * 20 + j));
writer.AddDocument(doc);
}
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush();
Assert.Fail("failed to hit IOException");
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual(200, reader.NumDocs());
reader.Close();
directory.Close();
}
示例12: TestDeletedDocs
public virtual void TestDeletedDocs()
{
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
doc.Add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for (int i = 0; i < 19; i++)
{
writer.AddDocument(doc);
}
writer.Close();
IndexReader reader = IndexReader.Open(dir);
reader.DeleteDocument(5);
reader.Close();
CheckIndex.out_Renamed = new System.IO.StringWriter();
bool condition = CheckIndex.Check(dir, false);
String message = CheckIndex.out_Renamed.ToString();
Assert.IsTrue(condition, message);
}
示例13: TestIndexing
public virtual void TestIndexing()
{
Directory mainDir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.UseCompoundFile = false;
IndexReader reader = writer.GetReader(); // start pooling readers
reader.Close();
writer.MergeFactor = 2;
writer.SetMaxBufferedDocs(10);
RunThread[] indexThreads = new RunThread[4];
for (int x = 0; x < indexThreads.Length; x++)
{
indexThreads[x] = new RunThread(this, x % 2, writer);
indexThreads[x].Name = "Thread " + x;
indexThreads[x].Start();
}
long startTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
long duration = 5 * 1000;
while (((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) - startTime) < duration)
{
System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 100));
}
int delCount = 0;
int addCount = 0;
for (int x = 0; x < indexThreads.Length; x++)
{
indexThreads[x].run_Renamed_Field = false;
Assert.IsTrue(indexThreads[x].ex == null);
addCount += indexThreads[x].addCount;
delCount += indexThreads[x].delCount;
}
for (int x = 0; x < indexThreads.Length; x++)
{
indexThreads[x].Join();
}
//System.out.println("addCount:"+addCount);
//System.out.println("delCount:"+delCount);
writer.Close();
mainDir.Close();
}
示例14: TestNoOverMerge
public virtual void TestNoOverMerge()
{
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(10);
writer.MergeFactor = 10;
writer.SetMergePolicy(new LogDocMergePolicy(writer));
bool noOverMerge = false;
for (int i = 0; i < 100; i++)
{
AddDoc(writer);
CheckInvariants(writer);
if (writer.GetNumBufferedDocuments() + writer.GetSegmentCount() >= 18)
{
noOverMerge = true;
}
}
Assert.IsTrue(noOverMerge);
writer.Close();
}
示例15: TestDeleteAllNRT
public virtual void TestDeleteAllNRT()
{
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true);
modifier.SetMaxBufferedDocs(2);
modifier.SetMaxBufferedDeleteTerms(2);
int id = 0;
int value_Renamed = 100;
for (int i = 0; i < 7; i++)
{
AddDoc(modifier, ++id, value_Renamed);
}
modifier.Commit();
IndexReader reader = modifier.GetReader();
Assert.AreEqual(7, reader.NumDocs());
reader.Close();
AddDoc(modifier, ++id, value_Renamed);
AddDoc(modifier, ++id, value_Renamed);
// Delete all
modifier.DeleteAll();
reader = modifier.GetReader();
Assert.AreEqual(0, reader.NumDocs());
reader.Close();
// Roll it back
modifier.Rollback();
modifier.Close();
// Validate that the docs are still there
reader = IndexReader.Open(dir);
Assert.AreEqual(7, reader.NumDocs());
reader.Close();
dir.Close();
}