本文整理汇总了C#中Lucene.Net.Index.SegmentInfos类的典型用法代码示例。如果您正苦于以下问题:C# SegmentInfos类的具体用法?C# SegmentInfos怎么用?C# SegmentInfos使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SegmentInfos类属于Lucene.Net.Index命名空间,在下文中一共展示了SegmentInfos类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestByteSizeLimit
public virtual void TestByteSizeLimit()
{
// tests that the max merge size constraint is applied during forceMerge.
Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one.
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
const int numSegments = 15;
for (int i = 0; i < numSegments; i++)
{
int numDocs = i == 7 ? 30 : 1;
AddDocs(writer, numDocs);
}
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
double min = sis.Info(0).SizeInBytes();
conf = NewWriterConfig();
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
lmp.MaxMergeMBForForcedMerge = (min + 1) / (1 << 20);
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Should only be 3 segments in the index, because one of them exceeds the size limit
sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例2: OneMerge
public OneMerge(SegmentInfos segments, bool useCompoundFile)
{
if (0 == segments.Count)
throw new System.SystemException("segments must include at least one segment");
this.segments = segments;
this.useCompoundFile = useCompoundFile;
}
示例3: MultiSegmentReader
private int[] starts; // 1st docno for each segment
#endregion Fields
#region Constructors
/// <summary>Construct reading the named set of readers. </summary>
internal MultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory, bool readOnly)
: base(directory, sis, closeDirectory, readOnly)
{
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
// reverse because IndexWriter merges & deletes
// the newest segments first.
SegmentReader[] readers = new SegmentReader[sis.Count];
for (int i = sis.Count - 1; i >= 0; i--)
{
try
{
readers[i] = SegmentReader.Get(readOnly, sis.Info(i));
}
catch (System.IO.IOException e)
{
// Close all readers we had opened:
for (i++; i < sis.Count; i++)
{
try
{
readers[i].Close();
}
catch (System.IO.IOException)
{
// keep going - we want to clean up as much as possible
}
}
throw e;
}
}
Initialize(readers);
}
示例4: DoBody
protected internal override object DoBody(string segmentFileName)
{
var sis = new SegmentInfos();
sis.Read(directory, segmentFileName);
var readers = new SegmentReader[sis.Size()];
for (int i = sis.Size() - 1; i >= 0; i--)
{
System.IO.IOException prior = null;
bool success = false;
try
{
readers[i] = new SegmentReader(sis.Info(i), termInfosIndexDivisor, IOContext.READ);
success = true;
}
catch (System.IO.IOException ex)
{
prior = ex;
}
finally
{
if (!success)
{
IOUtils.CloseWhileHandlingException(prior, readers);
}
}
}
return new StandardDirectoryReader(directory, readers, null, sis, termInfosIndexDivisor, false);
}
示例5: TestAllSegmentsLarge
public virtual void TestAllSegmentsLarge()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 2;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例6: ReadCurrentVersion
/// <summary> Current version number from segments file.</summary>
public static long ReadCurrentVersion(Directory directory)
{
IndexInput input = directory.OpenInput(IndexFileNames.SEGMENTS);
int format = 0;
long version = 0;
try
{
format = input.ReadInt();
if (format < 0)
{
if (format < FORMAT)
throw new System.IO.IOException("Unknown format version: " + format);
version = input.ReadLong(); // read version
}
}
finally
{
input.Close();
}
if (format < 0)
return version;
// We cannot be sure about the format of the file.
// Therefore we have to read the whole file and cannot simply seek to the version entry.
SegmentInfos sis = new SegmentInfos();
sis.Read(directory);
return sis.GetVersion();
}
示例7: StandardDirectoryReader
/// <summary>
/// called only from static open() methods </summary>
internal StandardDirectoryReader(Directory directory, AtomicReader[] readers, IndexWriter writer, SegmentInfos sis, int termInfosIndexDivisor, bool applyAllDeletes)
: base(directory, readers)
{
this.Writer = writer;
this.SegmentInfos = sis;
this.TermInfosIndexDivisor = termInfosIndexDivisor;
this.ApplyAllDeletes = applyAllDeletes;
}
示例8: DoBody
public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
{
var infos = new SegmentInfos();
infos.Read(directory, segmentFileName);
if (readOnly)
return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, termInfosIndexDivisor);
else
return new DirectoryReader(directory, infos, deletionPolicy, false, termInfosIndexDivisor);
}
示例9: SetUp
public override void SetUp()
{
base.SetUp();
dir = new RAMDirectory();
doc1 = new Document();
doc2 = new Document();
DocHelper.SetupDoc(doc1);
DocHelper.SetupDoc(doc2);
DocHelper.WriteDoc(dir, doc1);
DocHelper.WriteDoc(dir, doc2);
sis = new SegmentInfos();
sis.Read(dir);
}
示例10: DirectoryReader
/// <summary>Construct reading the named set of readers. </summary>
internal DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
{
this.directory = directory;
this.readOnly = readOnly;
this.segmentInfos = sis;
this.deletionPolicy = deletionPolicy;
this.termInfosIndexDivisor = termInfosIndexDivisor;
if (!readOnly)
{
// We assume that this segments_N was previously
// properly sync'd:
SupportClass.CollectionsHelper.AddAllIfNotContains(synced, sis.Files(directory, true));
}
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
// reverse because IndexWriter merges & deletes
// the newest segments first.
SegmentReader[] readers = new SegmentReader[sis.Count];
for (int i = sis.Count - 1; i >= 0; i--)
{
bool success = false;
try
{
readers[i] = SegmentReader.Get(readOnly, sis.Info(i), termInfosIndexDivisor);
success = true;
}
finally
{
if (!success)
{
// Close all readers we had opened:
for (i++; i < sis.Count; i++)
{
try
{
readers[i].Close();
}
catch (System.Exception ignore)
{
// keep going - we want to clean up as much as possible
}
}
}
}
}
Initialize(readers);
}
示例11: DoBody
public override System.Object DoBody()
{
SegmentInfos infos = new SegmentInfos();
infos.Read(directory);
if (infos.Count == 1)
{
// index is optimized
return SegmentReader.Get(infos, infos.Info(0), closeDirectory);
}
IndexReader[] readers = new IndexReader[infos.Count];
for (int i = 0; i < infos.Count; i++)
readers[i] = SegmentReader.Get(infos.Info(i));
return new MultiReader(directory, infos, closeDirectory, readers);
}
示例12: TestAddIndexes
public virtual void TestAddIndexes()
{
Directory dir1 = NewDirectory();
Directory dir2 = NewDirectory();
IndexWriter writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d1 = new Document();
d1.Add(new TextField("f1", "first field", Field.Store.YES));
d1.Add(new TextField("f2", "second field", Field.Store.YES));
writer.AddDocument(d1);
writer.Dispose();
writer = new IndexWriter(dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d2 = new Document();
FieldType customType2 = new FieldType(TextField.TYPE_STORED);
customType2.StoreTermVectors = true;
d2.Add(new TextField("f2", "second field", Field.Store.YES));
d2.Add(new Field("f1", "first field", customType2));
d2.Add(new TextField("f3", "third field", Field.Store.YES));
d2.Add(new TextField("f4", "fourth field", Field.Store.YES));
writer.AddDocument(d2);
writer.Dispose();
writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer.AddIndexes(dir2);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir1);
Assert.AreEqual(2, sis.Size());
FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));
Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
// make sure the ordering of the "external" segment is preserved
Assert.AreEqual("f2", fis2.FieldInfo(0).Name);
Assert.AreEqual("f1", fis2.FieldInfo(1).Name);
Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
Assert.AreEqual("f4", fis2.FieldInfo(3).Name);
dir1.Dispose();
dir2.Dispose();
}
示例13: MergeDocIDRemapper
internal int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
{
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.Info(0);
int i = 0;
while (true)
{
SegmentInfo info = infos.Info(i);
if (info.Equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for (int j = 0; j < docMaps.Length; i++, j++)
{
numDocs += infos.Info(i).docCount;
System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.Length];
newStarts = new int[docMaps.Length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for (i = 1; i < docMaps.Length; i++)
{
int lastDocCount = merge.segments.Info(i - 1).docCount;
starts[i] = starts[i - 1] + lastDocCount;
newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
}
示例14: TestPartialMerge
public virtual void TestPartialMerge()
{
Directory dir = NewDirectory();
Document doc = new Document();
doc.Add(NewStringField("content", "aaa", Field.Store.NO));
int incrMin = TEST_NIGHTLY ? 15 : 40;
for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt(Random(), incrMin, 5 * incrMin))
{
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.MinMergeDocs = 1;
ldmp.MergeFactor = 5;
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
for (int j = 0; j < numDocs; j++)
{
writer.AddDocument(doc);
}
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
int segCount = sis.Size();
ldmp = new LogDocMergePolicy();
ldmp.MergeFactor = 5;
writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(ldmp));
writer.ForceMerge(3);
writer.Dispose();
sis = new SegmentInfos();
sis.Read(dir);
int optSegCount = sis.Size();
if (segCount < 3)
{
Assert.AreEqual(segCount, optSegCount);
}
else
{
Assert.AreEqual(3, optSegCount);
}
}
dir.Dispose();
}
示例15: DoBody
protected internal override System.Object DoBody(System.String segmentFileName)
{
SegmentInfos infos = new SegmentInfos();
infos.Read(directory, segmentFileName);
DirectoryIndexReader reader;
if (infos.Count == 1)
{
// index is optimized
reader = SegmentReader.Get(infos, infos.Info(0), closeDirectory);
}
else
{
reader = new MultiSegmentReader(directory, infos, closeDirectory);
}
reader.SetDeletionPolicy(deletionPolicy);
return reader;
}