本文整理汇总了C#中Lucene.Net.Index.SegmentInfos.Info方法的典型用法代码示例。如果您正苦于以下问题:C# SegmentInfos.Info方法的具体用法?C# SegmentInfos.Info怎么用?C# SegmentInfos.Info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.SegmentInfos
的用法示例。
在下文中一共展示了SegmentInfos.Info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: DoBody
public override System.Object DoBody()
{
SegmentInfos infos = new SegmentInfos();
infos.Read(directory);
if (infos.Count == 1)
{
// index is optimized
return SegmentReader.Get(infos, infos.Info(0), closeDirectory);
}
IndexReader[] readers = new IndexReader[infos.Count];
for (int i = 0; i < infos.Count; i++)
readers[i] = SegmentReader.Get(infos.Info(i));
return new MultiReader(directory, infos, closeDirectory, readers);
}
示例2: MultiSegmentReader
private int[] starts; // 1st docno for each segment
#endregion Fields
#region Constructors
/// <summary>Construct reading the named set of readers. </summary>
internal MultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory, bool readOnly)
: base(directory, sis, closeDirectory, readOnly)
{
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
// reverse because IndexWriter merges & deletes
// the newest segments first.
SegmentReader[] readers = new SegmentReader[sis.Count];
for (int i = sis.Count - 1; i >= 0; i--)
{
try
{
readers[i] = SegmentReader.Get(readOnly, sis.Info(i));
}
catch (System.IO.IOException e)
{
// Close all readers we had opened:
for (i++; i < sis.Count; i++)
{
try
{
readers[i].Close();
}
catch (System.IO.IOException)
{
// keep going - we want to clean up as much as possible
}
}
throw e;
}
}
Initialize(readers);
}
示例3: TestByteSizeLimit
public virtual void TestByteSizeLimit()
{
// tests that the max merge size constraint is applied during forceMerge.
Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one.
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
const int numSegments = 15;
for (int i = 0; i < numSegments; i++)
{
int numDocs = i == 7 ? 30 : 1;
AddDocs(writer, numDocs);
}
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
double min = sis.Info(0).SizeInBytes();
conf = NewWriterConfig();
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
lmp.MaxMergeMBForForcedMerge = (min + 1) / (1 << 20);
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
// Should only be 3 segments in the index, because one of them exceeds the size limit
sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(3, sis.Size());
}
示例4: DoBody
protected internal override object DoBody(string segmentFileName)
{
var sis = new SegmentInfos();
sis.Read(directory, segmentFileName);
var readers = new SegmentReader[sis.Size()];
for (int i = sis.Size() - 1; i >= 0; i--)
{
System.IO.IOException prior = null;
bool success = false;
try
{
readers[i] = new SegmentReader(sis.Info(i), termInfosIndexDivisor, IOContext.READ);
success = true;
}
catch (System.IO.IOException ex)
{
prior = ex;
}
finally
{
if (!success)
{
IOUtils.CloseWhileHandlingException(prior, readers);
}
}
}
return new StandardDirectoryReader(directory, readers, null, sis, termInfosIndexDivisor, false);
}
示例5: TestAddIndexes
public virtual void TestAddIndexes()
{
Directory dir1 = NewDirectory();
Directory dir2 = NewDirectory();
IndexWriter writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d1 = new Document();
d1.Add(new TextField("f1", "first field", Field.Store.YES));
d1.Add(new TextField("f2", "second field", Field.Store.YES));
writer.AddDocument(d1);
writer.Dispose();
writer = new IndexWriter(dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d2 = new Document();
FieldType customType2 = new FieldType(TextField.TYPE_STORED);
customType2.StoreTermVectors = true;
d2.Add(new TextField("f2", "second field", Field.Store.YES));
d2.Add(new Field("f1", "first field", customType2));
d2.Add(new TextField("f3", "third field", Field.Store.YES));
d2.Add(new TextField("f4", "fourth field", Field.Store.YES));
writer.AddDocument(d2);
writer.Dispose();
writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer.AddIndexes(dir2);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir1);
Assert.AreEqual(2, sis.Size());
FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));
Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
// make sure the ordering of the "external" segment is preserved
Assert.AreEqual("f2", fis2.FieldInfo(0).Name);
Assert.AreEqual("f1", fis2.FieldInfo(1).Name);
Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
Assert.AreEqual("f4", fis2.FieldInfo(3).Name);
dir1.Dispose();
dir2.Dispose();
}
示例6: MergeDocIDRemapper
internal int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
{
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.Info(0);
int i = 0;
while (true)
{
SegmentInfo info = infos.Info(i);
if (info.Equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for (int j = 0; j < docMaps.Length; i++, j++)
{
numDocs += infos.Info(i).docCount;
System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.Length];
newStarts = new int[docMaps.Length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for (i = 1; i < docMaps.Length; i++)
{
int lastDocCount = merge.segments.Info(i - 1).docCount;
starts[i] = starts[i - 1] + lastDocCount;
newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
}
示例7: DoBody
public override System.Object DoBody(System.String segmentFileName)
{
SegmentInfos infos = new SegmentInfos();
infos.Read(directory, segmentFileName);
if (infos.Count == 1)
{
// index is optimized
return SegmentReader.Get(infos, infos.Info(0), closeDirectory);
}
else
{
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
// reverse because IndexWriter merges & deletes
// the newest segments first.
IndexReader[] readers = new IndexReader[infos.Count];
for (int i = infos.Count - 1; i >= 0; i--)
{
try
{
readers[i] = SegmentReader.Get(infos.Info(i));
}
catch (System.IO.IOException e)
{
// Close all readers we had opened:
for (i++; i < infos.Count; i++)
{
readers[i].Close();
}
throw e;
}
}
return new MultiReader(directory, infos, closeDirectory, readers);
}
}
示例8: DoBody
protected internal override System.Object DoBody(System.String segmentFileName)
{
SegmentInfos infos = new SegmentInfos();
infos.Read(directory, segmentFileName);
DirectoryIndexReader reader;
if (infos.Count == 1)
{
// index is optimized
reader = SegmentReader.Get(infos, infos.Info(0), closeDirectory);
}
else
{
reader = new MultiSegmentReader(directory, infos, closeDirectory);
}
reader.SetDeletionPolicy(deletionPolicy);
return reader;
}
示例9: CommitPoint
public CommitPoint(IndexFileDeleter enclosingInstance, System.Collections.ICollection commitsToDelete, Directory directory, SegmentInfos segmentInfos)
{
InitBlock(enclosingInstance);
this.directory = directory;
this.commitsToDelete = commitsToDelete;
userData = segmentInfos.GetUserData();
segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
version = segmentInfos.GetVersion();
generation = segmentInfos.GetGeneration();
files = segmentInfos.Files(directory, true);
gen = segmentInfos.GetGeneration();
isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
}
示例10: ReaderCommit
internal ReaderCommit(SegmentInfos infos, Directory dir)
{
segmentsFileName = infos.GetCurrentSegmentFileName();
this.dir = dir;
int size = infos.Count;
files = new List<string>(size);
files.Add(segmentsFileName);
for (int i = 0; i < size; i++)
{
SegmentInfo info = infos.Info(i);
if (info.dir == dir)
SupportClass.CollectionsSupport.AddAll(info.Files(), files);
}
version = infos.GetVersion();
generation = infos.GetGeneration();
isOptimized = infos.Count == 1 && !infos.Info(0).HasDeletions();
}
示例11: SegString
private System.String SegString(SegmentInfos infos)
{
lock (this)
{
System.Text.StringBuilder buffer = new System.Text.StringBuilder();
int count = infos.Count;
for (int i = 0; i < count; i++)
{
if (i > 0)
{
buffer.Append(' ');
}
SegmentInfo info = infos.Info(i);
buffer.Append(info.SegString(directory));
if (info.dir != directory)
buffer.Append("**");
}
return buffer.ToString();
}
}
示例12: AddIndexesNoOptimize
/// <summary> Merges all segments from an array of indexes into this
/// index.
///
/// <p/>This may be used to parallelize batch indexing. A large document
/// collection can be broken into sub-collections. Each sub-collection can be
/// indexed in parallel, on a different thread, process or machine. The
/// complete index can then be created by merging sub-collection indexes
/// with this method.
///
/// <p/><b>NOTE:</b> the index in each Directory must not be
/// changed (opened by a writer) while this method is
/// running. This method does not acquire a write lock in
/// each input Directory, so it is up to the caller to
/// enforce this.
///
/// <p/><b>NOTE:</b> while this is running, any attempts to
/// add or delete documents (with another thread) will be
/// paused until this method completes.
///
/// <p/>This method is transactional in how Exceptions are
/// handled: it does not commit a new segments_N file until
/// all indexes are added. This means if an Exception
/// occurs (for example disk full), then either no indexes
/// will have been added or they all will have been.<p/>
///
/// <p/>Note that this requires temporary free space in the
/// Directory up to 2X the sum of all input indexes
/// (including the starting index). If readers/searchers
/// are open against the starting index, then temporary
/// free space required will be higher by the size of the
/// starting index (see {@link #Optimize()} for details).
/// <p/>
///
/// <p/>Once this completes, the final size of the index
/// will be less than the sum of all input index sizes
/// (including the starting index). It could be quite a
/// bit smaller (if there were many pending deletes) or
/// just slightly smaller.<p/>
///
/// <p/>
/// This requires this index not be among those to be added.
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
/// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
/// <throws> IOException if there is a low-level IO error </throws>
public virtual void AddIndexesNoOptimize(Directory[] dirs)
{
EnsureOpen();
NoDupDirs(dirs);
// Do not allow add docs or deletes while we are running:
docWriter.PauseAllThreads();
try
{
if (infoStream != null)
Message("flush at addIndexesNoOptimize");
Flush(true, false, true);
bool success = false;
StartTransaction(false);
try
{
int docCount = 0;
lock (this)
{
EnsureOpen();
for (int i = 0; i < dirs.Length; i++)
{
if (directory == dirs[i])
{
// cannot add this index: segments may be deleted in merge before added
throw new System.ArgumentException("Cannot add this index to itself");
}
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.Read(dirs[i]);
for (int j = 0; j < sis.Count; j++)
{
SegmentInfo info = sis.Info(j);
System.Diagnostics.Debug.Assert(!segmentInfos.Contains(info), "dup info dir=" + info.dir + " name=" + info.name);
docCount += info.docCount;
segmentInfos.Add(info); // add each info
}
}
}
// Notify DocumentsWriter that the flushed count just increased
docWriter.UpdateFlushedDocCount(docCount);
//.........这里部分代码省略.........
示例13: SetRollbackSegmentInfos
private void SetRollbackSegmentInfos(SegmentInfos infos)
{
lock (this)
{
rollbackSegmentInfos = (SegmentInfos) infos.Clone();
System.Diagnostics.Debug.Assert(!rollbackSegmentInfos.HasExternalSegments(directory));
rollbackSegments = new System.Collections.Hashtable();
int size = rollbackSegmentInfos.Count;
for (int i = 0; i < size; i++)
rollbackSegments[rollbackSegmentInfos.Info(i)] = (System.Int32) i;
}
}
示例14: FindMerges
/// <summary>Checks if any merges are now necessary and returns a
/// {@link MergePolicy.MergeSpecification} if so. A merge
/// is necessary when there are more than {@link
/// #setMergeFactor} segments at a given level. When
/// multiple levels have too many segments, this method
/// will return multiple merges, allowing the {@link
/// MergeScheduler} to use concurrency.
/// </summary>
public override MergeSpecification FindMerges(SegmentInfos infos)
{
int numSegments = infos.Count;
if (Verbose())
Message("findMerges: " + numSegments + " segments");
// Compute levels, which is just log (base mergeFactor)
// of the size of each segment
float[] levels = new float[numSegments];
float norm = (float) System.Math.Log(mergeFactor);
for (int i = 0; i < numSegments; i++)
{
SegmentInfo info = infos.Info(i);
long size = Size(info);
// Floor tiny segments
if (size < 1)
size = 1;
levels[i] = (float) System.Math.Log(size) / norm;
}
float levelFloor;
if (minMergeSize <= 0)
levelFloor = (float) 0.0;
else
{
levelFloor = (float) (System.Math.Log(minMergeSize) / norm);
}
// Now, we quantize the log values into levels. The
// first level is any segment whose log size is within
// LEVEL_LOG_SPAN of the max size, or, who has such as
// segment "to the right". Then, we find the max of all
// other segments and use that to define the next level
// segment, etc.
MergeSpecification spec = null;
int start = 0;
while (start < numSegments)
{
// Find max level of all segments not already
// quantized.
float maxLevel = levels[start];
for (int i = 1 + start; i < numSegments; i++)
{
float level = levels[i];
if (level > maxLevel)
maxLevel = level;
}
// Now search backwards for the rightmost segment that
// falls into this level:
float levelBottom;
if (maxLevel < levelFloor)
// All remaining segments fall into the min level
levelBottom = - 1.0F;
else
{
levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
// Force a boundary at the level floor
if (levelBottom < levelFloor && maxLevel >= levelFloor)
levelBottom = levelFloor;
}
int upto = numSegments - 1;
while (upto >= start)
{
if (levels[upto] >= levelBottom)
{
break;
}
upto--;
}
if (Verbose())
Message(" level " + levelBottom + " to " + maxLevel + ": " + (1 + upto - start) + " segments");
// Finally, record all merges that are viable at this level:
int end = start + mergeFactor;
while (end <= 1 + upto)
{
bool anyTooLarge = false;
for (int i = start; i < end; i++)
{
SegmentInfo info = infos.Info(i);
anyTooLarge |= (Size(info) >= maxMergeSize || SizeDocs(info) >= maxMergeDocs);
}
//.........这里部分代码省略.........
示例15: FindMergesForOptimize
/// <summary>Returns the merges necessary to optimize the index.
/// This merge policy defines "optimized" to mean only one
/// segment in the index, where that segment has no
/// deletions pending nor separate norms, and it is in
/// compound file format if the current useCompoundFile
/// setting is true. This method returns multiple merges
/// (mergeFactor at a time) so the {@link MergeScheduler}
/// in use may make use of concurrency.
/// </summary>
public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
{
MergeSpecification spec;
System.Diagnostics.Debug.Assert(maxNumSegments > 0);
if (!IsOptimized(infos, maxNumSegments, segmentsToOptimize))
{
// Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed
// since optimize started):
int last = infos.Count;
while (last > 0)
{
SegmentInfo info = infos.Info(--last);
if (segmentsToOptimize.Contains(info))
{
last++;
break;
}
}
if (last > 0)
{
spec = new MergeSpecification();
// First, enroll all "full" merges (size
// mergeFactor) to potentially be run concurrently:
while (last - maxNumSegments + 1 >= mergeFactor)
{
spec.Add(MakeOneMerge(infos, infos.Range(last - mergeFactor, last)));
last -= mergeFactor;
}
// Only if there are no full merges pending do we
// add a final partial (< mergeFactor segments) merge:
if (0 == spec.merges.Count)
{
if (maxNumSegments == 1)
{
// Since we must optimize down to 1 segment, the
// choice is simple:
if (last > 1 || !IsOptimized(infos.Info(0)))
spec.Add(MakeOneMerge(infos, infos.Range(0, last)));
}
else if (last > maxNumSegments)
{
// Take care to pick a partial merge that is
// least cost, but does not make the index too
// lopsided. If we always just picked the
// partial tail then we could produce a highly
// lopsided index over time:
// We must merge this many segments to leave
// maxNumSegments in the index (from when
// optimize was first kicked off):
int finalMergeSize = last - maxNumSegments + 1;
// Consider all possible starting points:
long bestSize = 0;
int bestStart = 0;
for (int i = 0; i < last - finalMergeSize + 1; i++)
{
long sumSize = 0;
for (int j = 0; j < finalMergeSize; j++)
sumSize += Size(infos.Info(j + i));
if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize))
{
bestStart = i;
bestSize = sumSize;
}
}
spec.Add(MakeOneMerge(infos, infos.Range(bestStart, bestStart + finalMergeSize)));
}
}
}
else
spec = null;
}
else
spec = null;
return spec;
}