本文整理汇总了C#中Lucene.Net.Index.IndexWriter.SetMergeScheduler方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.SetMergeScheduler方法的具体用法?C# IndexWriter.SetMergeScheduler怎么用?C# IndexWriter.SetMergeScheduler使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.SetMergeScheduler方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Initialize
public void Initialize()
{
directory = FSDirectory.Open(new DirectoryInfo(path));
if (IndexWriter.IsLocked(directory))
IndexWriter.Unlock(directory);
analyzer = new LowerCaseKeywordAnalyzer();
writer = new IndexWriter(directory, analyzer, IndexWriter.MaxFieldLength.UNLIMITED);
writer.SetMergeScheduler(new ErrorLoggingConcurrentMergeScheduler());
currentIndexSearcherHolder.SetIndexSearcher(new IndexSearcher(directory, true));
}
示例2: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
int extraCount = 0;
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i*20 + j));
writer.AddDocument(doc);
}
while (true)
{
// must cycle here because sometimes the merge flushes
// the doc we just added and so there's nothing to
// flush, and we don't hit the exception
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush(true, false, true);
if (failure.hitExc)
Assert.Fail("failed to hit IOException");
extraCount++;
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
break;
}
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory, true);
Assert.AreEqual(200 + extraCount, reader.NumDocs());
reader.Close();
directory.Close();
}
示例3: Initialize
public void Initialize()
{
if (System.IO.Directory.Exists(path) == false)
System.IO.Directory.CreateDirectory(path);
directory = FSDirectory.Open(new DirectoryInfo(path));
if (IndexWriter.IsLocked(directory))
IndexWriter.Unlock(directory);
analyzer = new LowerCaseKeywordAnalyzer();
snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
writer = new IndexWriter(directory, analyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED);
writer.SetMergeScheduler(new ErrorLoggingConcurrentMergeScheduler());
currentIndexSearcherHolder.SetIndexSearcher(new IndexSearcher(directory, true));
}
示例4: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i * 20 + j));
writer.AddDocument(doc);
}
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush();
Assert.Fail("failed to hit IOException");
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual(200, reader.NumDocs());
reader.Close();
directory.Close();
}
示例5: TestExceptionDuringSync
public virtual void TestExceptionDuringSync()
{
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.FailOn(failure);
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
failure.SetDoFail();
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect sync exceptions in the merge threads
cms.SetSuppressExceptions();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(5);
for (int i = 0; i < 23; i++)
AddDoc(writer);
cms.Sync();
Assert.IsTrue(failure.didFail);
failure.ClearDoFail();
writer.Close();
IndexReader reader = IndexReader.Open(dir);
Assert.AreEqual(23, reader.NumDocs());
reader.Close();
dir.Close();
}
示例6: OpenIndexOnStartup
private void OpenIndexOnStartup()
{
analyzer = new LowerCaseKeywordAnalyzer();
snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
bool resetTried = false;
bool recoveryTried = false;
while (true)
{
LuceneDirectory luceneDirectory = null;
try
{
luceneDirectory = OpenOrCreateLuceneDirectory(indexDirectory);
// Skip sanity test if we are running in memory. Index will not exist anyways.
if (!configuration.RunInMemory && !IsIndexStateValid(luceneDirectory))
throw new InvalidOperationException("Sanity check on the index failed.");
directory = luceneDirectory;
writer = new IndexWriter(directory, analyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED);
writer.SetMergeScheduler(new ErrorLoggingConcurrentMergeScheduler());
currentIndexSearcherHolder.SetIndexSearcher(new IndexSearcher(directory, true));
break;
}
catch (Exception e)
{
if (resetTried)
throw new InvalidOperationException("Could not open / create index for file system '" + name + "', reset already tried", e);
if (recoveryTried == false && luceneDirectory != null)
{
recoveryTried = true;
StartupLog.WarnException("Could not open index for file system '" + name + "'. Trying to recover index", e);
StartupLog.Info("Recover functionality is still not implemented. Skipping.");
}
else
{
resetTried = true;
StartupLog.WarnException("Could not open index for file system '" + name + "'. Recovery operation failed, forcibly resetting index", e);
TryResettingIndex();
}
}
}
}
示例7: TestNoExtraFiles
public virtual void TestNoExtraFiles()
{
RAMDirectory directory = new MockRAMDirectory();
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = pass == 0;
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
for (int iter = 0; iter < 7; iter++)
{
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
for (int j = 0; j < 21; j++)
{
Document doc = new Document();
doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
writer.Close();
TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit);
// Reopen
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
}
writer.Close();
}
directory.Close();
}
示例8: CreateIndexWriter
private void CreateIndexWriter()
{
snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
indexWriter = new IndexWriter(directory, stopAnalyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED);
using (indexWriter.MergeScheduler) { }
indexWriter.SetMergeScheduler(new ErrorLoggingConcurrentMergeScheduler());
// RavenDB already manages the memory for those, no need for Lucene to do this as well
indexWriter.SetMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
indexWriter.SetRAMBufferSizeMB(1024);
}
示例9: TestTermVectorCorruption2
public virtual void TestTermVectorCorruption2()
{
Directory dir = new MockRAMDirectory();
for (int iter = 0; iter < 4; iter++)
{
bool autoCommit = 1 == iter / 2;
IndexWriter writer = new IndexWriter(dir, autoCommit, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy());
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
document.Add(storedField);
writer.AddDocument(document);
writer.AddDocument(document);
document = new Document();
document.Add(storedField);
Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
document.Add(termVectorField);
writer.AddDocument(document);
writer.Optimize();
writer.Close();
IndexReader reader = IndexReader.Open(dir);
Assert.IsTrue(reader.GetTermFreqVectors(0) == null);
Assert.IsTrue(reader.GetTermFreqVectors(1) == null);
Assert.IsTrue(reader.GetTermFreqVectors(2) != null);
reader.Close();
}
dir.Close();
}
示例10: runTest
public virtual void runTest(Directory directory, bool autoCommit, MergeScheduler merger)
{
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
writer.SetMaxBufferedDocs(2);
if (merger != null)
writer.SetMergeScheduler(merger);
for (int iter = 0; iter < NUM_ITER; iter++)
{
int iterFinal = iter;
writer.SetMergeFactor(1000);
for (int i = 0; i < 200; i++)
{
Document d = new Document();
d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.Add(new Field("contents", English.IntToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d);
}
writer.SetMergeFactor(4);
//writer.setInfoStream(System.out);
int docCount = writer.DocCount();
SupportClass.ThreadClass[] threads = new SupportClass.ThreadClass[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++)
{
int iFinal = i;
IndexWriter writerFinal = writer;
threads[i] = new AnonymousClassThread(writerFinal, iFinal, iterFinal, this);
}
for (int i = 0; i < NUM_THREADS; i++)
threads[i].Start();
for (int i = 0; i < NUM_THREADS; i++)
threads[i].Join();
Assert.IsTrue(!failed);
int expectedDocCount = (int) ((1 + iter) * (200 + 8 * NUM_ITER2 * (NUM_THREADS / 2.0) * (1 + NUM_THREADS)));
// System.out.println("TEST: now index=" + writer.segString());
Assert.AreEqual(expectedDocCount, writer.DocCount());
if (!autoCommit)
{
writer.Close();
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
writer.SetMaxBufferedDocs(2);
}
IndexReader reader = IndexReader.Open(directory);
Assert.IsTrue(reader.IsOptimized());
Assert.AreEqual(expectedDocCount, reader.NumDocs());
reader.Close();
}
writer.Close();
}
示例11: RunStressTest
/*
Run one indexer and 2 searchers against single index as
stress test.
*/
public virtual void RunStressTest(Directory directory, bool autoCommit, MergeScheduler mergeScheduler)
{
IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true);
modifier.SetMaxBufferedDocs(10);
TimedThread[] threads = new TimedThread[4];
if (mergeScheduler != null)
modifier.SetMergeScheduler(mergeScheduler);
// One modifier that writes 10 docs then removes 5, over
// and over:
IndexerThread indexerThread = new IndexerThread(modifier, threads);
threads[0] = indexerThread;
indexerThread.Start();
IndexerThread indexerThread2 = new IndexerThread(modifier, threads);
threads[2] = indexerThread2;
indexerThread2.Start();
// Two searchers that constantly just re-instantiate the
// searcher:
SearcherThread searcherThread1 = new SearcherThread(directory, threads);
threads[3] = searcherThread1;
searcherThread1.Start();
SearcherThread searcherThread2 = new SearcherThread(directory, threads);
threads[3] = searcherThread2;
searcherThread2.Start();
indexerThread.Join();
indexerThread2.Join();
searcherThread1.Join();
searcherThread2.Join();
modifier.Close();
Assert.IsTrue(!indexerThread.failed, "hit unexpected exception in indexer");
Assert.IsTrue(!indexerThread2.failed, "hit unexpected exception in indexer2");
Assert.IsTrue(!searcherThread1.failed, "hit unexpected exception in search1");
Assert.IsTrue(!searcherThread2.failed, "hit unexpected exception in search2");
//System.out.println(" Writer: " + indexerThread.count + " iterations");
//System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
//System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
}
示例12: RunStressTest
/*
Run one indexer and 2 searchers against single index as
stress test.
*/
public virtual void RunStressTest(Directory directory, bool autoCommit, MergeScheduler mergeScheduler)
{
IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true);
modifier.SetMaxBufferedDocs(10);
TimedThread[] threads = new TimedThread[4];
int numThread = 0;
if (mergeScheduler != null)
modifier.SetMergeScheduler(mergeScheduler);
// One modifier that writes 10 docs then removes 5, over
// and over:
IndexerThread indexerThread = new IndexerThread(this, modifier, threads);
threads[numThread++] = indexerThread;
indexerThread.Start();
IndexerThread indexerThread2 = new IndexerThread(this, modifier, threads);
threads[numThread++] = indexerThread2;
indexerThread2.Start();
// Two searchers that constantly just re-instantiate the
// searcher:
SearcherThread searcherThread1 = new SearcherThread(directory, threads);
threads[numThread++] = searcherThread1;
searcherThread1.Start();
SearcherThread searcherThread2 = new SearcherThread(directory, threads);
threads[numThread++] = searcherThread2;
searcherThread2.Start();
for (int i = 0; i < numThread; i++)
threads[i].Join();
modifier.Close();
for (int i = 0; i < numThread; i++)
Assert.IsTrue(!((TimedThread) threads[i]).failed);
//System.out.println(" Writer: " + indexerThread.count + " iterations");
//System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
//System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
}
示例13: TestNoExtraFiles
public virtual void TestNoExtraFiles()
{
RAMDirectory directory = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int iter = 0; iter < 7; iter++)
{
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
for (int j = 0; j < 21; j++)
{
Document doc = new Document();
doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
writer.Close();
TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles");
// Reopen
writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
}
writer.Close();
directory.Close();
}
示例14: TestMergeCompressedFields
public virtual void TestMergeCompressedFields()
{
System.IO.FileInfo indexDir = new System.IO.FileInfo(System.IO.Path.Combine(SupportClass.AppSettings.Get("tempDir", ""), "mergecompressedfields"));
Directory dir = FSDirectory.Open(indexDir);
try
{
for (int i = 0; i < 5; i++)
{
// Must make a new writer & doc each time, w/
// different fields, so bulk merge of stored fields
// cannot run:
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), i == 0, IndexWriter.MaxFieldLength.UNLIMITED);
try
{
w.SetMergeFactor(5);
w.SetMergeScheduler(new SerialMergeScheduler());
Document doc = new Document();
doc.Add(new Field("test1", "this is some data that will be compressed this this this", Field.Store.COMPRESS, Field.Index.NO));
doc.Add(new Field("test2", new byte[20], Field.Store.COMPRESS));
doc.Add(new Field("field" + i, "random field", Field.Store.NO, Field.Index.ANALYZED));
w.AddDocument(doc);
}
finally
{
w.Close();
}
}
byte[] cmp = new byte[20];
IndexReader r = IndexReader.Open(dir);
try
{
for (int i = 0; i < 5; i++)
{
Document doc = r.Document(i);
Assert.AreEqual(doc.GetField("test1").StringValue(), "this is some data that will be compressed this this this");
byte[] b = doc.GetField("test2").BinaryValue();
Assert.AreEqual(b.Length, cmp.Length);
for (int j = 0; j < b.Length; j++)
Assert.AreEqual(b[j], cmp[j]);
}
}
finally
{
r.Close();
}
}
finally
{
dir.Close();
_TestUtil.RmDir(indexDir);
}
}
示例15: TestTermVectorCorruption
public virtual void TestTermVectorCorruption()
{
Directory dir = new MockRAMDirectory();
for (int iter = 0; iter < 4; iter++)
{
bool autoCommit = 1 == iter / 2;
IndexWriter writer = new IndexWriter(dir, autoCommit, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy(writer));
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
document.Add(storedField);
writer.AddDocument(document);
writer.AddDocument(document);
document = new Document();
document.Add(storedField);
Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
document.Add(termVectorField);
writer.AddDocument(document);
writer.Optimize();
writer.Close();
IndexReader reader = IndexReader.Open(dir);
for (int i = 0; i < reader.NumDocs(); i++)
{
reader.Document(i);
reader.GetTermFreqVectors(i);
}
reader.Close();
writer = new IndexWriter(dir, autoCommit, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy(writer));
Directory[] indexDirs = new Directory[]{new MockRAMDirectory(dir)};
writer.AddIndexes(indexDirs);
writer.Close();
}
dir.Close();
}