本文整理汇总了C#中Lucene.Net.Store.RAMDirectory.ListAll方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Store.RAMDirectory.ListAll方法的具体用法?C# Lucene.Net.Store.RAMDirectory.ListAll怎么用?C# Lucene.Net.Store.RAMDirectory.ListAll使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Store.RAMDirectory
的用法示例。
在下文中一共展示了Lucene.Net.Store.RAMDirectory.ListAll方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestAddIndexOnDiskFull
public virtual void TestAddIndexOnDiskFull()
{
int START_COUNT = 57;
int NUM_DIR = 50;
int END_COUNT = START_COUNT + NUM_DIR * 25;
bool debug = false;
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for (int i = 0; i < NUM_DIR; i++)
{
dirs[i] = new RAMDirectory();
IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int j = 0; j < 25; j++)
{
AddDocWithIndex(writer, 25 * i + j);
}
writer.Close();
System.String[] files = dirs[i].ListAll();
for (int j = 0; j < files.Length; j++)
{
inputDiskUsage += dirs[i].FileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexes into a copy of this:
RAMDirectory startDir = new RAMDirectory();
IndexWriter writer2 = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int j = 0; j < START_COUNT; j++)
{
AddDocWithIndex(writer2, j);
}
writer2.Close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.Open(startDir);
Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs;
Assert.AreEqual(57, hits.Length, "first number of hits");
searcher.Close();
reader.Close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.SizeInBytes();
long startDiskUsage = 0;
System.String[] files2 = startDir.ListAll();
for (int i = 0; i < files2.Length; i++)
{
startDiskUsage += startDir.FileLength(files2[i]);
}
for (int iter = 0; iter < 6; iter++)
{
if (debug)
System.Console.Out.WriteLine("TEST: iter=" + iter);
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage + 100;
bool autoCommit = iter % 2 == 0;
int method = iter / 2;
bool success = false;
bool done = false;
System.String methodName;
if (0 == method)
{
methodName = "addIndexes(Directory[])";
}
else if (1 == method)
{
methodName = "addIndexes(IndexReader[])";
}
else
{
methodName = "addIndexesNoOptimize(Directory[])";
}
while (!done)
//.........这里部分代码省略.........
示例2: TestSmallRAMBuffer
public virtual void TestSmallRAMBuffer()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetRAMBufferSizeMB(0.000001);
int lastNumFile = dir.ListAll().Length;
for (int j = 0; j < 9; j++)
{
Document doc = new Document();
doc.Add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
int numFile = dir.ListAll().Length;
// Verify that with a tiny RAM buffer we see new
// segment after every doc
Assert.IsTrue(numFile > lastNumFile);
lastNumFile = numFile;
}
writer.Close();
dir.Close();
}
示例3: TestKeepAllDeletionPolicy
public virtual void TestKeepAllDeletionPolicy()
{
for (int pass = 0; pass < 4; pass++)
{
bool autoCommit = pass < 2;
bool useCompoundFile = (pass % 2) > 0;
// Never deletes a commit
KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy(this);
Directory dir = new RAMDirectory();
policy.dir = dir;
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
writer.SetMaxBufferedDocs(10);
writer.SetUseCompoundFile(useCompoundFile);
writer.SetMergeScheduler(new SerialMergeScheduler());
for (int i = 0; i < 107; i++)
{
AddDoc(writer);
if (autoCommit && i % 10 == 0)
writer.Commit();
}
writer.Close();
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
writer.SetUseCompoundFile(useCompoundFile);
writer.Optimize();
writer.Close();
Assert.AreEqual(2, policy.numOnInit);
if (!autoCommit)
// If we are not auto committing then there should
// be exactly 2 commits (one per close above):
Assert.AreEqual(2, policy.numOnCommit);
// Test listCommits
System.Collections.ICollection commits = IndexReader.ListCommits(dir);
if (!autoCommit)
// 1 from opening writer + 2 from closing writer
Assert.AreEqual(3, commits.Count);
// 1 from opening writer + 2 from closing writer +
// 11 from calling writer.commit() explicitly above
else
Assert.AreEqual(14, commits.Count);
System.Collections.IEnumerator it = commits.GetEnumerator();
// Make sure we can open a reader on each commit:
while (it.MoveNext())
{
IndexCommit commit = (IndexCommit) it.Current;
IndexReader r = IndexReader.Open(commit, null);
r.Close();
}
// Simplistic check: just verify all segments_N's still
// exist, and, I can open a reader on each:
dir.DeleteFile(IndexFileNames.SEGMENTS_GEN);
long gen = SegmentInfos.GetCurrentSegmentGeneration(dir);
while (gen > 0)
{
IndexReader reader = IndexReader.Open(dir);
reader.Close();
dir.DeleteFile(IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
gen--;
if (gen > 0)
{
// Now that we've removed a commit point, which
// should have orphan'd at least one index file.
// Open & close a writer and assert that it
// actually removed something:
int preCount = dir.ListAll().Length;
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
writer.Close();
int postCount = dir.ListAll().Length;
Assert.IsTrue(postCount < preCount);
}
}
dir.Close();
}
}
示例4: TestSimulatedCorruptIndex2
public virtual void TestSimulatedCorruptIndex2()
{
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents
for (int i = 0; i < 100; i++)
{
AddDoc(writer);
}
// close
writer.Close();
long gen = SegmentInfos.GetCurrentSegmentGeneration(dir);
Assert.IsTrue(gen > 1, "segment generation should be > 1 but got " + gen);
System.String[] files = dir.ListAll();
for (int i = 0; i < files.Length; i++)
{
if (files[i].EndsWith(".cfs"))
{
dir.DeleteFile(files[i]);
break;
}
}
IndexReader reader = null;
try
{
reader = IndexReader.Open(dir);
Assert.Fail("reader did not hit IOException on opening a corrupt index");
}
catch (System.Exception e)
{
}
if (reader != null)
{
reader.Close();
}
}
示例5: TestDeleteLeftoverFiles
public virtual void TestDeleteLeftoverFiles()
{
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(10);
int i;
for (i = 0; i < 35; i++)
{
AddDoc(writer, i);
}
writer.SetUseCompoundFile(false);
for (; i < 45; i++)
{
AddDoc(writer, i);
}
writer.Close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.Open(dir);
Term searchTerm = new Term("id", "7");
int delCount = reader.DeleteDocuments(searchTerm);
Assert.AreEqual(1, delCount, "didn't delete the right number of documents");
// Set one norm so we get a .s0 file:
reader.SetNorm(21, "content", (float) 1.5);
reader.Close();
// Now, artificially create an extra .del file & extra
// .s0 file:
System.String[] files = dir.ListAll();
/*
for(int j=0;j<files.length;j++) {
System.out.println(j + ": " + files[j]);
}
*/
// The numbering of fields can vary depending on which
// JRE is in use. On some JREs we see content bound to
// field 0; on others, field 1. So, here we have to
// figure out which field number corresponds to
// "content", and then set our expected file names below
// accordingly:
CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
int contentFieldIndex = - 1;
for (i = 0; i < fieldInfos.Size(); i++)
{
FieldInfo fi = fieldInfos.FieldInfo(i);
if (fi.name_ForNUnit.Equals("content"))
{
contentFieldIndex = i;
break;
}
}
cfsReader.Close();
Assert.IsTrue(contentFieldIndex != - 1, "could not locate the 'content' field number in the _2.cfs segment");
System.String normSuffix = "s" + contentFieldIndex;
// Create a bogus separate norms file for a
// segment/field that actually has a separate norms file
// already:
CopyFile(dir, "_2_1." + normSuffix, "_2_2." + normSuffix);
// Create a bogus separate norms file for a
// segment/field that actually has a separate norms file
// already, using the "not compound file" extension:
CopyFile(dir, "_2_1." + normSuffix, "_2_2.f" + contentFieldIndex);
// Create a bogus separate norms file for a
// segment/field that does not have a separate norms
// file already:
CopyFile(dir, "_2_1." + normSuffix, "_1_1." + normSuffix);
// Create a bogus separate norms file for a
// segment/field that does not have a separate norms
// file already using the "not compound file" extension:
CopyFile(dir, "_2_1." + normSuffix, "_1_1.f" + contentFieldIndex);
// Create a bogus separate del file for a
// segment that already has a separate del file:
CopyFile(dir, "_0_1.del", "_0_2.del");
// Create a bogus separate del file for a
// segment that does not yet have a separate del file:
CopyFile(dir, "_0_1.del", "_1_1.del");
// Create a bogus separate del file for a
// non-existent segment:
CopyFile(dir, "_0_1.del", "_188_1.del");
// Create a bogus segment file:
CopyFile(dir, "_0.cfs", "_188.cfs");
// Create a bogus fnm file when the CFS already exists:
CopyFile(dir, "_0.cfs", "_0.fnm");
//.........这里部分代码省略.........