本文整理汇总了C#中Lucene.Net.Index.SegmentInfos.Size方法的典型用法代码示例。如果您正苦于以下问题:C# SegmentInfos.Size方法的具体用法?C# SegmentInfos.Size怎么用?C# SegmentInfos.Size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.SegmentInfos
的用法示例。
在下文中一共展示了SegmentInfos.Size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: DoBody
protected internal override object DoBody(string segmentFileName)
{
var sis = new SegmentInfos();
sis.Read(directory, segmentFileName);
var readers = new SegmentReader[sis.Size()];
for (int i = sis.Size() - 1; i >= 0; i--)
{
System.IO.IOException prior = null;
bool success = false;
try
{
readers[i] = new SegmentReader(sis.Info(i), termInfosIndexDivisor, IOContext.READ);
success = true;
}
catch (System.IO.IOException ex)
{
prior = ex;
}
finally
{
if (!success)
{
IOUtils.CloseWhileHandlingException(prior, readers);
}
}
}
return new StandardDirectoryReader(directory, readers, null, sis, termInfosIndexDivisor, false);
}
示例2: TestAllSegmentsLarge
public virtual void TestAllSegmentsLarge()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 2;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例3: TestAddIndexes
public virtual void TestAddIndexes()
{
Directory dir1 = NewDirectory();
Directory dir2 = NewDirectory();
IndexWriter writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d1 = new Document();
d1.Add(new TextField("f1", "first field", Field.Store.YES));
d1.Add(new TextField("f2", "second field", Field.Store.YES));
writer.AddDocument(d1);
writer.Dispose();
writer = new IndexWriter(dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d2 = new Document();
FieldType customType2 = new FieldType(TextField.TYPE_STORED);
customType2.StoreTermVectors = true;
d2.Add(new TextField("f2", "second field", Field.Store.YES));
d2.Add(new Field("f1", "first field", customType2));
d2.Add(new TextField("f3", "third field", Field.Store.YES));
d2.Add(new TextField("f4", "fourth field", Field.Store.YES));
writer.AddDocument(d2);
writer.Dispose();
writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer.AddIndexes(dir2);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir1);
Assert.AreEqual(2, sis.Size());
FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));
Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
// make sure the ordering of the "external" segment is preserved
Assert.AreEqual("f2", fis2.FieldInfo(0).Name);
Assert.AreEqual("f1", fis2.FieldInfo(1).Name);
Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
Assert.AreEqual("f4", fis2.FieldInfo(3).Name);
dir1.Dispose();
dir2.Dispose();
}
示例4: TestPartialMerge
public virtual void TestPartialMerge()
{
Directory dir = NewDirectory();
Document doc = new Document();
doc.Add(NewStringField("content", "aaa", Field.Store.NO));
int incrMin = TEST_NIGHTLY ? 15 : 40;
for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt(Random(), incrMin, 5 * incrMin))
{
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.MinMergeDocs = 1;
ldmp.MergeFactor = 5;
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
for (int j = 0; j < numDocs; j++)
{
writer.AddDocument(doc);
}
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
int segCount = sis.Size();
ldmp = new LogDocMergePolicy();
ldmp.MergeFactor = 5;
writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(ldmp));
writer.ForceMerge(3);
writer.Dispose();
sis = new SegmentInfos();
sis.Read(dir);
int optSegCount = sis.Size();
if (segCount < 3)
{
Assert.AreEqual(segCount, optSegCount);
}
else
{
Assert.AreEqual(3, optSegCount);
}
}
dir.Dispose();
}
示例5: TestBackgroundForceMerge
public virtual void TestBackgroundForceMerge()
{
Directory dir = NewDirectory();
for (int pass = 0; pass < 2; pass++)
{
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(51)));
Document doc = new Document();
doc.Add(NewStringField("field", "aaa", Field.Store.NO));
for (int i = 0; i < 100; i++)
{
writer.AddDocument(doc);
}
writer.ForceMerge(1, false);
if (0 == pass)
{
writer.Dispose();
DirectoryReader reader = DirectoryReader.Open(dir);
Assert.AreEqual(1, reader.Leaves.Count);
reader.Dispose();
}
else
{
// Get another segment to flush so we can verify it is
// NOT included in the merging
writer.AddDocument(doc);
writer.AddDocument(doc);
writer.Dispose();
DirectoryReader reader = DirectoryReader.Open(dir);
Assert.IsTrue(reader.Leaves.Count > 1);
reader.Dispose();
SegmentInfos infos = new SegmentInfos();
infos.Read(dir);
Assert.AreEqual(2, infos.Size());
}
}
dir.Dispose();
}
示例6: TestMaxNumSegments2
public virtual void TestMaxNumSegments2()
{
Directory dir = NewDirectory();
Document doc = new Document();
doc.Add(NewStringField("content", "aaa", Field.Store.NO));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.MinMergeDocs = 1;
ldmp.MergeFactor = 4;
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(ldmp).SetMergeScheduler(new ConcurrentMergeScheduler()));
for (int iter = 0; iter < 10; iter++)
{
for (int i = 0; i < 19; i++)
{
writer.AddDocument(doc);
}
writer.Commit();
writer.WaitForMerges();
writer.Commit();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
int segCount = sis.Size();
writer.ForceMerge(7);
writer.Commit();
writer.WaitForMerges();
sis = new SegmentInfos();
sis.Read(dir);
int optSegCount = sis.Size();
if (segCount < 7)
{
Assert.AreEqual(segCount, optSegCount);
}
else
{
Assert.AreEqual(7, optSegCount, "seg: " + segCount);
}
}
writer.Dispose();
dir.Dispose();
}
示例7: TestMergeFactor
public virtual void TestMergeFactor()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 5);
AddDocs(writer, 3);
AddDocs(writer, 3);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 3;
lmp.MergeFactor = 2;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Should only be 4 segments in the index, because of the merge factor and
// max merge docs settings.
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(4, sis.Size());
}
示例8: FindMerges
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos)
{
MergeSpecification mergeSpec = null;
//System.out.println("MRMP: findMerges sis=" + segmentInfos);
int numSegments = segmentInfos.Size();
IList<SegmentCommitInfo> segments = new List<SegmentCommitInfo>();
ICollection<SegmentCommitInfo> merging = Writer.Get().MergingSegments;
foreach (SegmentCommitInfo sipc in segmentInfos.Segments)
{
if (!merging.Contains(sipc))
{
segments.Add(sipc);
}
}
numSegments = segments.Count;
if (numSegments > 1 && (numSegments > 30 || Random.Next(5) == 3))
{
segments = CollectionsHelper.Shuffle(segments);
// TODO: sometimes make more than 1 merge?
mergeSpec = new MergeSpecification();
int segsToMerge = TestUtil.NextInt(Random, 1, numSegments);
mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge)));
}
return mergeSpec;
}
示例9: DoCheckIndex
/// <summary>
/// Returns a <seealso cref="Status"/> instance detailing
/// the state of the index.
/// </summary>
/// <param name="onlySegments"> list of specific segment names to check
///
/// <p>As this method checks every byte in the specified
/// segments, on a large index it can take quite a long
/// time to run.
///
/// <p><b>WARNING</b>: make sure
/// you only call this when the index is not opened by any
/// writer. </param>
public virtual Status DoCheckIndex(IList<string> onlySegments)
{
NumberFormatInfo nf = CultureInfo.CurrentCulture.NumberFormat;
SegmentInfos sis = new SegmentInfos();
Status result = new Status();
result.Dir = Dir;
try
{
sis.Read(Dir);
}
catch (Exception t)
{
Msg(infoStream, "ERROR: could not read any segments file in directory");
result.MissingSegments = true;
if (infoStream != null)
{
// LUCENENET NOTE: Some tests rely on the error type being in
// the message. We can't get the error type with StackTrace, we
// need ToString() for that.
infoStream.WriteLine(t.ToString());
//infoStream.WriteLine(t.StackTrace);
}
return result;
}
// find the oldest and newest segment versions
string oldest = Convert.ToString(int.MaxValue), newest = Convert.ToString(int.MinValue);
string oldSegs = null;
bool foundNonNullVersion = false;
IComparer<string> versionComparator = StringHelper.VersionComparator;
foreach (SegmentCommitInfo si in sis.Segments)
{
string version = si.Info.Version;
if (version == null)
{
// pre-3.1 segment
oldSegs = "pre-3.1";
}
else
{
foundNonNullVersion = true;
if (versionComparator.Compare(version, oldest) < 0)
{
oldest = version;
}
if (versionComparator.Compare(version, newest) > 0)
{
newest = version;
}
}
}
int numSegments = sis.Size();
string segmentsFileName = sis.SegmentsFileName;
// note: we only read the format byte (required preamble) here!
IndexInput input = null;
try
{
input = Dir.OpenInput(segmentsFileName, IOContext.READONCE);
}
catch (Exception t)
{
Msg(infoStream, "ERROR: could not open segments file in directory");
if (infoStream != null)
{
// LUCENENET NOTE: Some tests rely on the error type being in
// the message. We can't get the error type with StackTrace, we
// need ToString() for that.
infoStream.WriteLine(t.ToString());
//infoStream.WriteLine(t.StackTrace);
}
result.CantOpenSegments = true;
return result;
}
int format = 0;
try
{
format = input.ReadInt();
}
catch (Exception t)
{
Msg(infoStream, "ERROR: could not read segment file version in directory");
if (infoStream != null)
{
// LUCENENET NOTE: Some tests rely on the error type being in
// the message. We can't get the error type with StackTrace, we
// need ToString() for that.
//.........这里部分代码省略.........
示例10: TestByteSizeLimit
public virtual void TestByteSizeLimit()
{
// tests that the max merge size constraint is applied during forceMerge.
Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one.
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
const int numSegments = 15;
for (int i = 0; i < numSegments; i++)
{
int numDocs = i == 7 ? 30 : 1;
AddDocs(writer, numDocs);
}
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
double min = sis.Info(0).SizeInBytes();
conf = NewWriterConfig();
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
lmp.MaxMergeMBForForcedMerge = (min + 1) / (1 << 20);
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Should only be 3 segments in the index, because one of them exceeds the size limit
sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例11: TestNumDocsLimit
public virtual void TestNumDocsLimit()
{
// tests that the max merge docs constraint is applied during forceMerge.
Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one.
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 5);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 3;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Should only be 3 segments in the index, because one of them exceeds the size limit
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例12: TestSingleNonMergeableSegment
public virtual void TestSingleNonMergeableSegment()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3, true);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 3;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Verify that the last segment does not have deletions.
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(1, sis.Size());
}
示例13: TestSingleMergeableTooLargeSegment
public virtual void TestSingleMergeableTooLargeSegment()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 5, true);
// delete the last document
writer.DeleteDocuments(new Term("id", "4"));
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 2;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Verify that the last segment does not have deletions.
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(1, sis.Size());
Assert.IsTrue(sis.Info(0).HasDeletions());
}
示例14: CheckAllSegmentsUpgraded
private int CheckAllSegmentsUpgraded(Directory dir)
{
SegmentInfos infos = new SegmentInfos();
infos.Read(dir);
if (VERBOSE)
{
Console.WriteLine("checkAllSegmentsUpgraded: " + infos);
}
foreach (SegmentCommitInfo si in infos.Segments)
{
Assert.AreEqual(Constants.LUCENE_MAIN_VERSION, si.Info.Version);
}
return infos.Size();
}
示例15: CommitPoint
public CommitPoint(ICollection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos)
{
this.Directory_Renamed = directory;
this.CommitsToDelete = commitsToDelete;
UserData_Renamed = segmentInfos.UserData;
SegmentsFileName_Renamed = segmentInfos.SegmentsFileName;
Generation_Renamed = segmentInfos.Generation;
Files = segmentInfos.Files(directory, true);
SegmentCount_Renamed = segmentInfos.Size();
}