本文整理汇总了C#中Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs方法的具体用法?C# Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs怎么用?C# Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexWriter
的用法示例。
在下文中一共展示了Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestFilteredSearch_Renamed
public virtual void TestFilteredSearch_Renamed()
{
bool enforceSingleSegment = true;
RAMDirectory directory = new RAMDirectory();
int[] filterBits = { 1, 36 };
SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits);
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
SearchFiltered(writer, directory, filter, enforceSingleSegment);
// run the test on more than one segment
enforceSingleSegment = false;
// reset - it is stateful
filter.Reset();
writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
// we index 60 docs - this will create 6 segments
writer.SetMaxBufferedDocs(10);
SearchFiltered(writer, directory, filter, enforceSingleSegment);
}
示例2: RunTest
private void RunTest(Directory dir)
{
// Run for ~7 seconds
long stopTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 7000;
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
// Force frequent commits
writer.SetMaxBufferedDocs(2);
SupportClass.ThreadClass t = new AnonymousClassThread(stopTime, writer, this);
t.Start();
// While the above indexing thread is running, take many
// backups:
while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < stopTime)
{
BackupIndex(dir, dp);
System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 20));
if (!t.IsAlive)
break;
}
t.Join();
// Add one more document to force writer to commit a
// final segment, so deletion policy has a chance to
// delete again:
Document doc = new Document();
doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.AddDocument(doc);
// Make sure we don't have any leftover files in the
// directory:
writer.Close();
TestIndexWriter.AssertNoUnreferencedFiles(dir, "some files were not deleted but should have been");
}
示例3: TestReuseAcrossWriters
public virtual void TestReuseAcrossWriters()
{
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
// Force frequent commits
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for (int i = 0; i < 7; i++)
writer.AddDocument(doc);
IndexCommit cp = (IndexCommit) dp.Snapshot();
CopyFiles(dir, cp);
writer.Close();
CopyFiles(dir, cp);
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
CopyFiles(dir, cp);
for (int i = 0; i < 7; i++)
writer.AddDocument(doc);
CopyFiles(dir, cp);
writer.Close();
CopyFiles(dir, cp);
dp.Release();
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
writer.Close();
try
{
CopyFiles(dir, cp);
Assert.Fail("did not hit expected IOException");
}
catch (System.IO.IOException ioe)
{
// expected
}
dir.Close();
}
示例4: BuildDir
internal virtual void BuildDir(Directory dir, int nDocs, int maxFields, int maxFieldLen)
{
IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
iw.SetMaxBufferedDocs(10);
for (int j = 0; j < nDocs; j++)
{
Document d = new Document();
int nFields = r.Next(maxFields);
for (int i = 0; i < nFields; i++)
{
int flen = r.Next(maxFieldLen);
System.Text.StringBuilder sb = new System.Text.StringBuilder("^ ");
while (sb.Length < flen)
sb.Append(' ').Append(words[r.Next(words.Length)]);
sb.Append(" $");
Field.Store store = Field.Store.YES; // make random later
Field.Index index = Field.Index.TOKENIZED; // make random later
d.Add(new Field("f" + i, sb.ToString(), store, index));
}
iw.AddDocument(d);
}
iw.Close();
}
示例5: TestSubclassConcurrentMergeScheduler
public virtual void TestSubclassConcurrentMergeScheduler()
{
MockRAMDirectory dir = new MockRAMDirectory();
dir.FailOn(new FailOnlyOnMerge());
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
MyMergeScheduler ms = new MyMergeScheduler(this);
writer.SetMergeScheduler(ms);
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(Lucene.Net.Index.IndexWriter.DISABLE_AUTO_FLUSH);
for (int i = 0; i < 20; i++)
writer.AddDocument(doc);
ms.Sync();
writer.Close();
Assert.IsTrue(mergeThreadCreated);
Assert.IsTrue(mergeCalled);
Assert.IsTrue(excCalled);
dir.Close();
Assert.IsTrue(ConcurrentMergeScheduler.AnyUnhandledExceptions());
}
示例6: GetFullStrings
private IndexSearcher GetFullStrings()
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(4);
writer.SetMergeFactor(97);
for (int i = 0; i < NUM_STRINGS; i++)
{
Document doc = new Document();
System.String num = GetRandomCharString(GetRandomNumber(2, 8), 48, 52);
doc.Add(new Field("tracer", num, Field.Store.YES, Field.Index.NO));
//doc.add (new Field ("contents", Integer.toString(i), Field.Store.NO, Field.Index.ANALYZED));
doc.Add(new Field("string", num, Field.Store.NO, Field.Index.NOT_ANALYZED));
System.String num2 = GetRandomCharString(GetRandomNumber(1, 4), 48, 50);
doc.Add(new Field("string2", num2, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.Add(new Field("tracer2", num2, Field.Store.YES, Field.Index.NO));
doc.SetBoost(2); // produce some scores above 1.0
writer.SetMaxBufferedDocs(GetRandomNumber(2, 12));
writer.AddDocument(doc);
}
//writer.optimize ();
//System.out.println(writer.getSegmentCount());
writer.Close();
return new IndexSearcher(indexStore);
}
示例7: TestReuseAcrossWriters
public virtual void TestReuseAcrossWriters()
{
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
// Force frequent flushes
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for (int i = 0; i < 7; i++)
{
writer.AddDocument(doc);
if (i % 2 == 0)
{
writer.Commit();
}
}
IndexCommit cp = dp.Snapshot();
CopyFiles(dir, cp);
writer.Close();
CopyFiles(dir, cp);
writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
CopyFiles(dir, cp);
for (int i = 0; i < 7; i++)
{
writer.AddDocument(doc);
if (i % 2 == 0)
{
writer.Commit();
}
}
CopyFiles(dir, cp);
writer.Close();
CopyFiles(dir, cp);
dp.Release();
writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
writer.Close();
Assert.Throws<System.IO.FileNotFoundException>(() => CopyFiles(dir, cp), "did not hit expected IOException");
dir.Close();
}
示例8: TestQuery
public virtual void TestQuery()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter iw = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
iw.SetMaxBufferedDocs(2); // force multi-segment
AddDoc("one", iw, 1f);
AddDoc("two", iw, 20f);
AddDoc("three four", iw, 300f);
iw.Close();
IndexReader ir = IndexReader.Open(dir);
IndexSearcher is_Renamed = new IndexSearcher(ir);
ScoreDoc[] hits;
// assert with norms scoring turned off
hits = is_Renamed.Search(new MatchAllDocsQuery(), null, 1000).ScoreDocs;
Assert.AreEqual(3, hits.Length);
Assert.AreEqual(ir.Document(hits[0].Doc).Get("key"), "one");
Assert.AreEqual(ir.Document(hits[1].Doc).Get("key"), "two");
Assert.AreEqual(ir.Document(hits[2].Doc).Get("key"), "three four");
// assert with norms scoring turned on
MatchAllDocsQuery normsQuery = new MatchAllDocsQuery("key");
hits = is_Renamed.Search(normsQuery, null, 1000).ScoreDocs;
Assert.AreEqual(3, hits.Length);
Assert.AreEqual(ir.Document(hits[0].Doc).Get("key"), "three four");
Assert.AreEqual(ir.Document(hits[1].Doc).Get("key"), "two");
Assert.AreEqual(ir.Document(hits[2].Doc).Get("key"), "one");
// change norm & retest
ir.SetNorm(0, "key", 400f);
normsQuery = new MatchAllDocsQuery("key");
hits = is_Renamed.Search(normsQuery, null, 1000).ScoreDocs;
Assert.AreEqual(3, hits.Length);
Assert.AreEqual(ir.Document(hits[0].Doc).Get("key"), "one");
Assert.AreEqual(ir.Document(hits[1].Doc).Get("key"), "three four");
Assert.AreEqual(ir.Document(hits[2].Doc).Get("key"), "two");
// some artificial queries to trigger the use of skipTo():
BooleanQuery bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq, null, 1000).ScoreDocs;
Assert.AreEqual(3, hits.Length);
bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new TermQuery(new Term("key", "three")), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq, null, 1000).ScoreDocs;
Assert.AreEqual(1, hits.Length);
// delete a document:
is_Renamed.GetIndexReader().DeleteDocument(0);
hits = is_Renamed.Search(new MatchAllDocsQuery(), null, 1000).ScoreDocs;
Assert.AreEqual(2, hits.Length);
// test parsable toString()
QueryParser qp = new QueryParser("key", analyzer);
hits = is_Renamed.Search(qp.Parse(new MatchAllDocsQuery().ToString()), null, 1000).ScoreDocs;
Assert.AreEqual(2, hits.Length);
// test parsable toString() with non default boost
Query maq = new MatchAllDocsQuery();
maq.SetBoost(2.3f);
Query pq = qp.Parse(maq.ToString());
hits = is_Renamed.Search(pq, null, 1000).ScoreDocs;
Assert.AreEqual(2, hits.Length);
is_Renamed.Close();
ir.Close();
dir.Close();
}
示例9: GetIndex
// create an index of all the documents, or just the x, or just the y documents
private Searcher GetIndex(bool even, bool odd)
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(1000);
for (int i = 0; i < data.Length; ++i)
{
if (((i % 2) == 0 && even) || ((i % 2) == 1 && odd))
{
Document doc = new Document();
doc.Add(new Field("tracer", data[i][0], Field.Store.YES, Field.Index.NO));
doc.Add(new Field("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
if (data[i][2] != null)
doc.Add(new Field("int", data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][3] != null)
doc.Add(new Field("float", data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][4] != null)
doc.Add(new Field("string", data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][5] != null)
doc.Add(new Field("custom", data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][6] != null)
doc.Add(new Field("i18n", data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][7] != null)
doc.Add(new Field("long", data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][8] != null)
doc.Add(new Field("double", data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][9] != null)
doc.Add(new Field("short", data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][10] != null)
doc.Add(new Field("byte", data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][11] != null)
doc.Add(new Field("parser", data[i][11], Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.SetBoost(2); // produce some scores above 1.0
writer.AddDocument(doc);
}
}
//writer.optimize ();
writer.Close();
IndexSearcher s = new IndexSearcher(indexStore);
s.SetDefaultFieldSortScoring(true, true);
return s;
}
示例10: Create
private void Create()
{
// NOTE: put seed in here to make failures
// deterministic, but do not commit with a seed (to
// better test):
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(17);
Document doc = new Document();
Document doc2 = new Document();
Field id = new Field("id", "", Field.Store.YES, Field.Index.NO);
doc.Add(id);
doc2.Add(id);
Field contents = new Field("contents", "", Field.Store.NO, Field.Index.ANALYZED);
doc.Add(contents);
doc2.Add(contents);
Field byteField = new Field("byte", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(byteField);
doc2.Add(byteField);
Field shortField = new Field("short", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(shortField);
doc2.Add(shortField);
Field intField = new Field("int", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(intField);
doc2.Add(intField);
Field longField = new Field("long", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(longField);
doc2.Add(longField);
Field floatField = new Field("float", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(floatField);
doc2.Add(floatField);
Field doubleField = new Field("double", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(doubleField);
doc2.Add(doubleField);
// we use two diff string fields so our FieldCache usage
// is less suspicious to cache inspection
Field stringField = new Field("string", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringField);
Field stringFieldIdx = new Field("stringIdx", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringFieldIdx);
// doc2 doesn't have stringField or stringFieldIdx, so we get nulls
for (int i = 0; i < NUM_DOCS; i++)
{
id.SetValue("" + i);
if (i % 1000 == 0)
{
contents.SetValue("a b c z");
}
else if (i % 100 == 0)
{
contents.SetValue("a b c y");
}
else if (i % 10 == 0)
{
contents.SetValue("a b c x");
}
else
{
contents.SetValue("a b c");
}
byteField.SetValue("" + NextInt((sbyte) System.SByte.MinValue, (sbyte) System.SByte.MaxValue));
if (NextInt(10) == 3)
{
shortField.SetValue("" + System.Int16.MinValue);
}
else if (NextInt(10) == 7)
{
shortField.SetValue("" + System.Int16.MaxValue);
}
else
{
shortField.SetValue("" + NextInt(System.Int16.MinValue, System.Int16.MaxValue));
}
if (NextInt(10) == 3)
{
intField.SetValue("" + System.Int32.MinValue);
}
else if (NextInt(10) == 7)
{
intField.SetValue("" + System.Int32.MaxValue);
}
else
{
intField.SetValue("" + this.r.Next());
}
if (NextInt(10) == 3)
//.........这里部分代码省略.........
示例11: IndexWriteHandler
/// <summary>
/// ����ִ����
/// </summary>
public void IndexWriteHandler()
{
//�����ĵ�������д��
writer = new Lucene.Net.Index.IndexWriter(Directorys.IndexDirectory, new ThesaurusAnalyzer(), !File.Exists(Directorys.IndexDirectory + "segments.gen"));
//���������Ƭ����
writer.SetMaxBufferedDocs(maxBufferLength);
//�״�����Ż�
writer.Optimize();
int count = 0;
//����ѭ��
while (true)
{
//����ɾ������
while (deleteQueue.Count > 0 && count < maxBufferLength)
{
count++;
writer.DeleteDocuments(deleteQueue.Dequeue());
}
//������¶���
while (updateQueue.Count > 0 && count < maxBufferLength)
{
count++;
Lucene.Net.Documents.Document doc = updateQueue.Dequeue();
writer.UpdateDocument(new Lucene.Net.Index.Term("id", doc.Get("id")), doc);
}
//������������
while (addQueue.Count > 0 && count < maxBufferLength)
{
count++;
writer.AddDocument(addQueue.Dequeue());
}
//������뵵����Ƭ
if (writer.NumRamDocs() > 0)
{
writer.Flush();
}
//��������Ƿ�ﵽ�����,�����������ʱ�Ż���Ƭ,�����߳���ͣ100����
if (count >= maxBufferLength)
{
writer.Optimize();
count = 0;
}
else
{
Thread.Sleep(100);
}
}
}
示例12: IndexDictionary
/// <summary> Index a Dictionary</summary>
/// <param name="dict">the dictionary to index</param>
/// <param name="mergeFactor">mergeFactor to use when indexing</param>
/// <param name="ramMB">the max amount or memory in MB to use</param>
/// <throws> IOException </throws>
/// <throws>AlreadyClosedException if the Spellchecker is already closed</throws>
public virtual void IndexDictionary(IDictionary dict, int mergeFactor, int ramMB)
{
lock (modifyCurrentIndexLock)
{
EnsureOpen();
Directory dir = this.spellindex;
IndexWriter writer = new IndexWriter(spellindex, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
writer.MergeFactor = mergeFactor;
writer.SetMaxBufferedDocs(ramMB);
System.Collections.IEnumerator iter = dict.GetWordsIterator();
while (iter.MoveNext())
{
System.String word = (System.String)iter.Current;
int len = word.Length;
if (len < 3)
{
continue; // too short we bail but "too long" is fine...
}
if (this.Exist(word))
{
// if the word already exist in the gramindex
continue;
}
// ok index the word
Document doc = CreateDocument(word, GetMin(len), GetMax(len));
writer.AddDocument(doc);
}
// close writer
writer.Optimize();
writer.Close();
// also re-open the spell index to see our own changes when the next suggestion
// is fetched:
SwapSearcher(dir);
}
}
示例13: IndexDictionary
/// <summary> Index a Dictionary</summary>
/// <param name="dict">the dictionary to index
/// </param>
/// <throws> IOException </throws>
public virtual void IndexDictionary(Dictionary dict)
{
IndexReader.Unlock(spellindex);
IndexWriter writer = new IndexWriter(spellindex, new WhitespaceAnalyzer(), !IndexReader.IndexExists(spellindex));
writer.SetMergeFactor(300);
writer.SetMaxBufferedDocs(150);
System.Collections.IEnumerator iter = dict.GetWordsIterator();
while (iter.MoveNext())
{
System.String word = (System.String) iter.Current;
int len = word.Length;
if (len < 3)
{
continue; // too short we bail but "too long" is fine...
}
if (this.Exist(word))
{
// if the word already exist in the gramindex
continue;
}
// ok index the word
Document doc = CreateDocument(word, GetMin(len), GetMax(len));
writer.AddDocument(doc);
}
// close writer
writer.Optimize();
writer.Close();
// close reader
reader.Close();
reader = null;
}
示例14: Index
/// <summary> Forms a Lucene index based on the 2 maps.
///
/// </summary>
/// <param name="indexDir">the direcotry where the index should be created
/// </param>
/// <param name="">word2Nums
/// </param>
/// <param name="">num2Words
/// </param>
private static void Index(System.String indexDir, System.Collections.IDictionary word2Nums, System.Collections.IDictionary num2Words)
{
int row = 0;
int mod = 1;
// override the specific index if it already exists
IndexWriter writer = new IndexWriter(indexDir, ana, true);
writer.SetUseCompoundFile(true); // why?
// blindly up these parameters for speed
writer.SetMergeFactor(writer.GetMergeFactor() * 2);
writer.SetMaxBufferedDocs(writer.GetMaxBufferedDocs() * 2);
System.Collections.IEnumerator i1 = word2Nums.Keys.GetEnumerator();
while (i1.MoveNext())
// for each word
{
System.String g = (System.String) i1.Current;
Document doc = new Document();
int n = Index(word2Nums, num2Words, g, doc);
if (n > 0)
{
doc.Add(new Field(F_WORD, g, Field.Store.YES, Field.Index.UN_TOKENIZED));
if ((++row % mod) == 0)
{
o.WriteLine("\trow=" + row + "/" + word2Nums.Count + " doc= " + doc);
mod *= 2;
}
writer.AddDocument(doc);
} // else degenerate
}
o.WriteLine("Optimizing..");
writer.Optimize();
writer.Close();
}