本文整理汇总了C#中Lucene.Net.Store.RAMDirectory.SizeInBytes方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Store.RAMDirectory.SizeInBytes方法的具体用法?C# Lucene.Net.Store.RAMDirectory.SizeInBytes怎么用?C# Lucene.Net.Store.RAMDirectory.SizeInBytes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Store.RAMDirectory
的用法示例。
在下文中一共展示了Lucene.Net.Store.RAMDirectory.SizeInBytes方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestAddIndexOnDiskFull
public virtual void TestAddIndexOnDiskFull()
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR * 25;
bool debug = false;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for (int i = 0; i < NUM_DIR; i++)
{
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(), true);
for (int j = 0; j < 25; j++)
{
AddDocWithIndex(writer, 25 * i + j);
}
writer.Close();
System.String[] files = dirs[i].List();
for (int j = 0; j < files.Length; j++)
{
inputDiskUsage += dirs[i].FileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexes into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer2 = new IndexWriter(startDir, new WhitespaceAnalyzer(), true);
for (int j = 0; j < START_COUNT; j++)
{
AddDocWithIndex(writer2, j);
}
writer2.Close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.Open(startDir);
Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");
IndexSearcher searcher = new IndexSearcher(reader);
Hits hits = searcher.Search(new TermQuery(searchTerm));
Assert.AreEqual(57, hits.Length(), "first number of hits");
searcher.Close();
reader.Close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.list();
long diskUsage = startDir.SizeInBytes();
long startDiskUsage = 0;
System.String[] files2 = startDir.List();
for (int i = 0; i < files2.Length; i++)
{
startDiskUsage += startDir.FileLength(files2[i]);
}
for (int iter = 0; iter < 6; iter++)
{
if (debug)
System.Console.Out.WriteLine("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage + 100;
bool autoCommit = iter % 2 == 0;
int method = iter / 2;
bool success = false;
bool done = false;
System.String methodName;
if (0 == method)
{
methodName = "addIndexes(Directory[])";
}
else if (1 == method)
{
methodName = "addIndexes(IndexReader[])";
}
else
{
methodName = "addIndexesNoOptimize(Directory[])";
}
while (!done)
//.........这里部分代码省略.........
示例2: TestOperationsOnDiskFull
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier
/// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
/// </summary>
private void TestOperationsOnDiskFull(bool updates)
{
bool debug = false;
Term searchTerm = new Term("content", "aaa");
int START_COUNT = 157;
int END_COUNT = 144;
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = (0 == pass);
// First build up a starting index:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(startDir, autoCommit, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 157; i++)
{
Document d = new Document();
d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
writer.AddDocument(d);
}
writer.Close();
long diskUsage = startDir.SizeInBytes();
long diskFree = diskUsage + 10;
System.IO.IOException err = null;
bool done = false;
// Iterate w/ ever increasing free disk space:
while (!done)
{
MockRAMDirectory dir = new MockRAMDirectory(startDir);
IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer());
modifier.SetMaxBufferedDocs(1000); // use flush or close
modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
// full; after, give it infinite disk space & turn
// off random IOExceptions & retry w/ same reader:
bool success = false;
for (int x = 0; x < 2; x++)
{
double rate = 0.1;
//UPGRADE_WARNING: Data types in Visual C# might be different. Verify the accuracy of narrowing conversions. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1042'"
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
System.String testName;
if (0 == x)
{
thisDiskFree = diskFree;
if (diskRatio >= 2.0)
{
rate /= 2;
}
if (diskRatio >= 4.0)
{
rate /= 2;
}
if (diskRatio >= 6.0)
{
rate = 0.0;
}
if (debug)
{
System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
}
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
}
else
{
thisDiskFree = 0;
rate = 0.0;
if (debug)
{
System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
}
testName = "reader re-use after disk full";
}
dir.SetMaxSizeInBytes(thisDiskFree);
dir.SetRandomIOExceptionRate(rate, diskFree);
try
{
if (0 == x)
{
int docId = 12;
for (int i = 0; i < 13; i++)
{
//.........这里部分代码省略.........