本文整理汇总了C#中Lucene.Net.Store.MockRAMDirectory.SetMaxSizeInBytes方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Store.MockRAMDirectory.SetMaxSizeInBytes方法的具体用法?C# Lucene.Net.Store.MockRAMDirectory.SetMaxSizeInBytes怎么用?C# Lucene.Net.Store.MockRAMDirectory.SetMaxSizeInBytes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Store.MockRAMDirectory
的用法示例。
在下文中一共展示了Lucene.Net.Store.MockRAMDirectory.SetMaxSizeInBytes方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestOperationsOnDiskFull
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier
/// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
/// </summary>
private void TestOperationsOnDiskFull(bool updates)
{
bool debug = false;
Term searchTerm = new Term("content", "aaa");
int START_COUNT = 157;
int END_COUNT = 144;
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = (0 == pass);
// First build up a starting index:
MockRAMDirectory startDir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(startDir, autoCommit, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 157; i++)
{
Document d = new Document();
d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d);
}
writer.Close();
long diskUsage = startDir.SizeInBytes();
long diskFree = diskUsage + 10;
System.IO.IOException err = null;
bool done = false;
// Iterate w/ ever increasing free disk space:
while (!done)
{
MockRAMDirectory dir = new MockRAMDirectory(startDir);
dir.SetPreventDoubleWrite(false);
IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer());
modifier.SetMaxBufferedDocs(1000); // use flush or close
modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
// full; after, give it infinite disk space & turn
// off random IOExceptions & retry w/ same reader:
bool success = false;
for (int x = 0; x < 2; x++)
{
double rate = 0.1;
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
System.String testName;
if (0 == x)
{
thisDiskFree = diskFree;
if (diskRatio >= 2.0)
{
rate /= 2;
}
if (diskRatio >= 4.0)
{
rate /= 2;
}
if (diskRatio >= 6.0)
{
rate = 0.0;
}
if (debug)
{
System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
}
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
}
else
{
thisDiskFree = 0;
rate = 0.0;
if (debug)
{
System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
}
testName = "reader re-use after disk full";
}
dir.SetMaxSizeInBytes(thisDiskFree);
dir.SetRandomIOExceptionRate(rate, diskFree);
try
{
if (0 == x)
{
int docId = 12;
for (int i = 0; i < 13; i++)
{
//.........这里部分代码省略.........
示例2: TestAddIndexOnDiskFull
//.........这里部分代码省略.........
long thisDiskFree;
System.String testName = null;
if (0 == x)
{
thisDiskFree = diskFree;
if (diskRatio >= 2.0)
{
rate /= 2;
}
if (diskRatio >= 4.0)
{
rate /= 2;
}
if (diskRatio >= 6.0)
{
rate = 0.0;
}
if (debug)
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit;
}
else
{
thisDiskFree = 0;
rate = 0.0;
if (debug)
testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit;
}
if (debug)
System.Console.Out.WriteLine("\ncycle: " + testName);
dir.SetMaxSizeInBytes(thisDiskFree);
dir.SetRandomIOExceptionRate(rate, diskFree);
try
{
if (0 == method)
{
writer2.AddIndexes(dirs);
}
else if (1 == method)
{
IndexReader[] readers = new IndexReader[dirs.Length];
for (int i = 0; i < dirs.Length; i++)
{
readers[i] = IndexReader.Open(dirs[i]);
}
try
{
writer2.AddIndexes(readers);
}
finally
{
for (int i = 0; i < dirs.Length; i++)
{
readers[i].Close();
}
}
}
else
{
writer2.AddIndexesNoOptimize(dirs);
}
示例3: TestAddDocumentOnDiskFull
public virtual void TestAddDocumentOnDiskFull()
{
bool debug = false;
for (int pass = 0; pass < 3; pass++)
{
if (debug)
System.Console.Out.WriteLine("TEST: pass=" + pass);
bool autoCommit = pass == 0;
bool doAbort = pass == 2;
long diskFree = 200;
while (true)
{
if (debug)
System.Console.Out.WriteLine("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.SetMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
MergeScheduler ms = writer.GetMergeScheduler();
if (ms is ConcurrentMergeScheduler)
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler)ms).SetSuppressExceptions_ForNUnitTest();
bool hitError = false;
try
{
for (int i = 0; i < 200; i++)
{
AddDoc(writer);
}
}
catch (System.IO.IOException e)
{
if (debug)
{
System.Console.Out.WriteLine("TEST: exception on addDoc");
System.Console.Out.WriteLine(e.StackTrace);
}
hitError = true;
}
if (hitError)
{
if (doAbort)
{
writer.Abort();
}
else
{
try
{
writer.Close();
}
catch (System.IO.IOException e)
{
if (debug)
{
System.Console.Out.WriteLine("TEST: exception on close");
System.Console.Out.WriteLine(e.StackTrace);
}
dir.SetMaxSizeInBytes(0);
writer.Close();
}
}
_TestUtil.SyncConcurrentMerges(ms);
AssertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit);
// Make sure reader can open the index:
IndexReader.Open(dir).Close();
dir.Close();
// Now try again w/ more space:
diskFree += 500;
}
else
{
_TestUtil.SyncConcurrentMerges(writer);
dir.Close();
break;
}
}
}
}
示例4: TestImmediateDiskFullWithThreads
public virtual void TestImmediateDiskFullWithThreads()
{
int NUM_THREADS = 3;
for (int iter = 0; iter < 10; iter++)
{
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect disk full exceptions in the merge threads
cms.SetSuppressExceptions_ForNUnitTest();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(4);
dir.SetMaxSizeInBytes(4 * 1024 + 20 * iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
//bool diskFull = false;
for (int i = 0; i < NUM_THREADS; i++)
threads[i] = new IndexerThread(this, writer, true);
for (int i = 0; i < NUM_THREADS; i++)
threads[i].Start();
for (int i = 0; i < NUM_THREADS; i++)
{
while (true)
{
try
{
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].Join();
break;
}
catch (System.Threading.ThreadInterruptedException)
{
SupportClass.ThreadClass.Current().Interrupt();
}
}
if (threads[i].IsAlive)
Assert.Fail("thread seems to be hung");
else
Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
}
try
{
writer.Close(false);
}
catch (System.IO.IOException)
{
}
dir.Close();
}
}
示例5: TestImmediateDiskFull
public virtual void TestImmediateDiskFull()
{
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
dir.SetMaxSizeInBytes(dir.GetRecomputedActualSizeInBytes());
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
doc.Add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try
{
writer.AddDocument(doc);
Assert.Fail("did not hit disk full");
}
catch (System.IO.IOException)
{
}
// Without fix for LUCENE-1130: this call will hang:
try
{
writer.AddDocument(doc);
Assert.Fail("did not hit disk full");
}
catch (System.IO.IOException)
{
}
try
{
writer.Close(false);
Assert.Fail("did not hit disk full");
}
catch (System.IO.IOException)
{
}
}
示例6: TestDiskFull
public virtual void TestDiskFull()
{
bool debug = false;
Term searchTerm = new Term("content", "aaa");
int START_COUNT = 157;
int END_COUNT = 144;
// First build up a starting index:
RAMDirectory startDir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 157; i++)
{
Document d = new Document();
d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d);
}
writer.Close();
long diskUsage = startDir.SizeInBytes();
long diskFree = diskUsage + 100;
System.IO.IOException err = null;
bool done = false;
// Iterate w/ ever increasing free disk space:
while (!done)
{
MockRAMDirectory dir = new MockRAMDirectory(startDir);
// If IndexReader hits disk full, it can write to
// the same files again.
dir.SetPreventDoubleWrite(false);
IndexReader reader = IndexReader.Open(dir, false);
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
// full; after, give it infinite disk space & turn
// off random IOExceptions & retry w/ same reader:
bool success = false;
for (int x = 0; x < 2; x++)
{
double rate = 0.05;
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
System.String testName;
if (0 == x)
{
thisDiskFree = diskFree;
if (diskRatio >= 2.0)
{
rate /= 2;
}
if (diskRatio >= 4.0)
{
rate /= 2;
}
if (diskRatio >= 6.0)
{
rate = 0.0;
}
if (debug)
{
System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
}
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
}
else
{
thisDiskFree = 0;
rate = 0.0;
if (debug)
{
System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
}
testName = "reader re-use after disk full";
}
dir.SetMaxSizeInBytes(thisDiskFree);
dir.SetRandomIOExceptionRate(rate, diskFree);
try
{
if (0 == x)
{
int docId = 12;
for (int i = 0; i < 13; i++)
{
reader.DeleteDocument(docId);
reader.SetNorm(docId, "contents", (float) 2.0);
docId += 12;
}
}
reader.Close();
//.........这里部分代码省略.........
示例7: TestImmediateDiskFullWithThreads
public virtual void TestImmediateDiskFullWithThreads()
{
int NUM_THREADS = 3;
for (int iter = 0; iter < 10; iter++)
{
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect disk full exceptions in the merge threads
cms.SetSuppressExceptions();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(4);
dir.SetMaxSizeInBytes(4 * 1024 + 20 * iter);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++)
threads[i] = new IndexerThread(this, writer, true);
for (int i = 0; i < NUM_THREADS; i++)
threads[i].Start();
for (int i = 0; i < NUM_THREADS; i++)
{
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].Join();
Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
}
try
{
writer.Close(false);
}
catch (System.IO.IOException ioe)
{
}
dir.Close();
}
}
示例8: TestOperationsOnDiskFull
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier
/// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
/// </summary>
private void TestOperationsOnDiskFull(bool updates)
{
bool debug = false;
Term searchTerm = new Term("content", "aaa");
int START_COUNT = 157;
int END_COUNT = 144;
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = (0 == pass);
// First build up a starting index:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, autoCommit, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 157; i++)
{
Document d = new Document();
d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
writer.AddDocument(d);
}
writer.Close();
long diskUsage = startDir.SizeInBytes();
long diskFree = diskUsage + 10;
System.IO.IOException err = null;
bool done = false;
// Iterate w/ ever increasing free disk space:
while (!done)
{
MockRAMDirectory dir = new MockRAMDirectory(startDir);
IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer());
modifier.SetMaxBufferedDocs(1000); // use flush or close
modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
// full; after, give it infinite disk space & turn
// off random IOExceptions & retry w/ same reader:
bool success = false;
for (int x = 0; x < 2; x++)
{
double rate = 0.1;
//UPGRADE_WARNING: Data types in Visual C# might be different. Verify the accuracy of narrowing conversions. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1042'"
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
System.String testName;
if (0 == x)
{
thisDiskFree = diskFree;
if (diskRatio >= 2.0)
{
rate /= 2;
}
if (diskRatio >= 4.0)
{
rate /= 2;
}
if (diskRatio >= 6.0)
{
rate = 0.0;
}
if (debug)
{
System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
}
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
}
else
{
thisDiskFree = 0;
rate = 0.0;
if (debug)
{
System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
}
testName = "reader re-use after disk full";
}
dir.SetMaxSizeInBytes(thisDiskFree);
dir.SetRandomIOExceptionRate(rate, diskFree);
try
{
if (0 == x)
{
int docId = 12;
for (int i = 0; i < 13; i++)
{
//.........这里部分代码省略.........