本文整理汇总了C#中Lucene.Net.Index.MergePolicy类的典型用法代码示例。如果您正苦于以下问题:C# MergePolicy类的具体用法?C# MergePolicy怎么用?C# MergePolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MergePolicy类属于Lucene.Net.Index命名空间,在下文中一共展示了MergePolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MergeDocIDRemapper
internal int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
{
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.Info(0);
int i = 0;
while (true)
{
SegmentInfo info = infos.Info(i);
if (info.Equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for (int j = 0; j < docMaps.Length; i++, j++)
{
numDocs += infos.Info(i).docCount;
System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.Length];
newStarts = new int[docMaps.Length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for (i = 1; i < docMaps.Length; i++)
{
int lastDocCount = merge.segments.Info(i - 1).docCount;
starts[i] = starts[i - 1] + lastDocCount;
newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
}
示例2: AddMergeException
internal virtual void AddMergeException(MergePolicy.OneMerge merge)
{
lock (this)
{
System.Diagnostics.Debug.Assert(merge.GetException() != null);
if (!mergeExceptions.Contains(merge) && mergeGen == merge.mergeGen)
mergeExceptions.Add(merge);
}
}
示例3: DoMerge
protected override void DoMerge(MergePolicy.OneMerge merge)
{
TotMergedBytes += merge.TotalBytesSize();
base.DoMerge(merge);
}
示例4: CheckAbort
public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
{
this.merge = merge;
this.dir = dir;
}
示例5: UpgradeIndexMergePolicy
/// <summary>
/// Wrap the given <seealso cref="MergePolicy"/> and intercept forceMerge requests to
/// only upgrade segments written with previous Lucene versions.
/// </summary>
public UpgradeIndexMergePolicy(MergePolicy @base)
{
[email protected] = @base;
}
示例6: RemapDeletes
/// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
internal void RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
{
lock (this)
{
if (docMaps == null)
// The merged segments had no deletes so docIDs did not change and we have nothing to do
return ;
MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
flushedDocCount -= mapper.docShift;
}
}
示例7: MergeThread
public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
{
InitBlock(enclosingInstance);
this.writer = writer;
this.startMerge = startMerge;
}
示例8: DoMerge
/// <summary>Does the actual merge, by calling {@link IndexWriter#merge} </summary>
protected internal virtual void DoMerge(MergePolicy.OneMerge merge)
{
writer.Merge(merge);
}
示例9: Merge
/// <summary>
/// Merges the indicated segments, replacing them in the stack with a
/// single segment.
///
/// @lucene.experimental
/// </summary>
public virtual void Merge(MergePolicy.OneMerge merge)
{
bool success = false;
long t0 = DateTime.Now.Millisecond;
try
{
try
{
try
{
MergeInit(merge);
//if (merge.info != null) {
//System.out.println("MERGE: " + merge.info.info.name);
//}
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "now merge\n merge=" + SegString(merge.Segments) + "\n index=" + SegString());
}
MergeMiddle(merge);
MergeSuccess(merge);
success = true;
}
catch (Exception t)
{
HandleMergeException(t, merge);
}
}
finally
{
lock (this)
{
MergeFinish(merge);
if (!success)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "hit exception during merge");
}
if (merge.Info_Renamed != null && !segmentInfos.Contains(merge.Info_Renamed))
{
Deleter.Refresh(merge.Info_Renamed.Info.Name);
}
}
// this merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
if (success && !merge.Aborted && (merge.MaxNumSegments != -1 || (!closed && !Closing)))
{
UpdatePendingMerges(MergeTrigger.MERGE_FINISHED, merge.MaxNumSegments);
}
}
}
}
catch (System.OutOfMemoryException oom)
{
HandleOOM(oom, "merge");
}
if (merge.Info_Renamed != null && !merge.Aborted)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "merge time " + (DateTime.Now.Millisecond - t0) + " msec for " + merge.Info_Renamed.Info.DocCount + " docs");
}
}
}
示例10: HandleMergeException
private void HandleMergeException(Exception t, MergePolicy.OneMerge merge)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "handleMergeException: merge=" + SegString(merge.Segments) + " exc=" + t);
}
// Set the exception on the merge, so if
// forceMerge is waiting on us it sees the root
// cause exception:
merge.Exception = t;
AddMergeException(merge);
if ((t as MergePolicy.MergeAbortedException) != null)
{
// We can ignore this exception (it happens when
// close(false) or rollback is called), unless the
// merge involves segments from external directories,
// in which case we must throw it so, for example, the
// rollbackTransaction code in addIndexes* is
// executed.
if (merge.IsExternal)
{
throw t;
}
}
else
{
IOUtils.ReThrow(t);
}
}
示例11: CommitMerge
private bool CommitMerge(MergePolicy.OneMerge merge, MergeState mergeState)
{
lock (this)
{
Debug.Assert(TestPoint("startCommitMerge"));
if (HitOOM)
{
throw new InvalidOperationException("this writer hit an OutOfMemoryError; cannot complete merge");
}
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMerge: " + SegString(merge.Segments) + " index=" + SegString());
}
Debug.Assert(merge.RegisterDone);
// If merge was explicitly aborted, or, if rollback() or
// rollbackTransaction() had been called since our merge
// started (which results in an unqualified
// deleter.refresh() call that will remove any index
// file that current segments does not reference), we
// abort this merge
if (merge.Aborted)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMerge: skip: it was aborted");
}
// In case we opened and pooled a reader for this
// segment, drop it now. this ensures that we close
// the reader before trying to delete any of its
// files. this is not a very big deal, since this
// reader will never be used by any NRT reader, and
// another thread is currently running close(false)
// so it will be dropped shortly anyway, but not
// doing this makes MockDirWrapper angry in
// TestNRTThreads (LUCENE-5434):
readerPool.Drop(merge.Info_Renamed);
Deleter.DeleteNewFiles(merge.Info_Renamed.Files());
return false;
}
ReadersAndUpdates mergedUpdates = merge.Info_Renamed.Info.DocCount == 0 ? null : CommitMergedDeletesAndUpdates(merge, mergeState);
// System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMerge: mergedDeletes=" + mergedDeletes);
// If the doc store we are using has been closed and
// is in now compound format (but wasn't when we
// started), then we will switch to the compound
// format as well:
Debug.Assert(!segmentInfos.Contains(merge.Info_Renamed));
bool allDeleted = merge.Segments.Count == 0 || merge.Info_Renamed.Info.DocCount == 0 || (mergedUpdates != null && mergedUpdates.PendingDeleteCount == merge.Info_Renamed.Info.DocCount);
if (infoStream.IsEnabled("IW"))
{
if (allDeleted)
{
infoStream.Message("IW", "merged segment " + merge.Info_Renamed + " is 100% deleted" + (KeepFullyDeletedSegments_Renamed ? "" : "; skipping insert"));
}
}
bool dropSegment = allDeleted && !KeepFullyDeletedSegments_Renamed;
// If we merged no segments then we better be dropping
// the new segment:
Debug.Assert(merge.Segments.Count > 0 || dropSegment);
Debug.Assert(merge.Info_Renamed.Info.DocCount != 0 || KeepFullyDeletedSegments_Renamed || dropSegment);
if (mergedUpdates != null)
{
bool success = false;
try
{
if (dropSegment)
{
mergedUpdates.DropChanges();
}
// Pass false for assertInfoLive because the merged
// segment is not yet live (only below do we commit it
// to the segmentInfos):
readerPool.Release(mergedUpdates, false);
success = true;
}
finally
{
if (!success)
{
mergedUpdates.DropChanges();
readerPool.Drop(merge.Info_Renamed);
}
}
}
// Must do this after readerPool.release, in case an
// exception is hit e.g. writing the live docs for the
// merge segment, in which case we need to abort the
//.........这里部分代码省略.........
示例12: CommitMergedDeletesAndUpdates
/// <summary>
/// Carefully merges deletes and updates for the segments we just merged. this
/// is tricky because, although merging will clear all deletes (compacts the
/// documents) and compact all the updates, new deletes and updates may have
/// been flushed to the segments since the merge was started. this method
/// "carries over" such new deletes and updates onto the newly merged segment,
/// and saves the resulting deletes and updates files (incrementing the delete
/// and DV generations for merge.info). If no deletes were flushed, no new
/// deletes file is saved.
/// </summary>
private ReadersAndUpdates CommitMergedDeletesAndUpdates(MergePolicy.OneMerge merge, MergeState mergeState)
{
lock (this)
{
Debug.Assert(TestPoint("startCommitMergeDeletes"));
IList<SegmentCommitInfo> sourceSegments = merge.Segments;
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMergeDeletes " + SegString(merge.Segments));
}
// Carefully merge deletes that occurred after we
// started merging:
int docUpto = 0;
long minGen = long.MaxValue;
// Lazy init (only when we find a delete to carry over):
MergedDeletesAndUpdates holder = new MergedDeletesAndUpdates();
DocValuesFieldUpdates.Container mergedDVUpdates = new DocValuesFieldUpdates.Container();
for (int i = 0; i < sourceSegments.Count; i++)
{
SegmentCommitInfo info = sourceSegments[i];
minGen = Math.Min(info.BufferedDeletesGen, minGen);
int docCount = info.Info.DocCount;
Bits prevLiveDocs = merge.Readers[i].LiveDocs;
ReadersAndUpdates rld = readerPool.Get(info, false);
// We hold a ref so it should still be in the pool:
Debug.Assert(rld != null, "seg=" + info.Info.Name);
Bits currentLiveDocs = rld.LiveDocs;
IDictionary<string, DocValuesFieldUpdates> mergingFieldUpdates = rld.MergingFieldUpdates;
string[] mergingFields;
DocValuesFieldUpdates[] dvFieldUpdates;
DocValuesFieldUpdates.Iterator[] updatesIters;
if (mergingFieldUpdates.Count == 0)
{
mergingFields = null;
updatesIters = null;
dvFieldUpdates = null;
}
else
{
mergingFields = new string[mergingFieldUpdates.Count];
dvFieldUpdates = new DocValuesFieldUpdates[mergingFieldUpdates.Count];
updatesIters = new DocValuesFieldUpdates.Iterator[mergingFieldUpdates.Count];
int idx = 0;
foreach (KeyValuePair<string, DocValuesFieldUpdates> e in mergingFieldUpdates)
{
string field = e.Key;
DocValuesFieldUpdates updates = e.Value;
mergingFields[idx] = field;
dvFieldUpdates[idx] = mergedDVUpdates.GetUpdates(field, updates.Type);
if (dvFieldUpdates[idx] == null)
{
dvFieldUpdates[idx] = mergedDVUpdates.NewUpdates(field, updates.Type, mergeState.SegmentInfo.DocCount);
}
updatesIters[idx] = updates.GetIterator();
updatesIters[idx].NextDoc(); // advance to first update doc
++idx;
}
}
// System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: info=" + info + ", mergingUpdates=" + mergingUpdates);
if (prevLiveDocs != null)
{
// If we had deletions on starting the merge we must
// still have deletions now:
Debug.Assert(currentLiveDocs != null);
Debug.Assert(prevLiveDocs.Length() == docCount);
Debug.Assert(currentLiveDocs.Length() == docCount);
// There were deletes on this segment when the merge
// started. The merge has collapsed away those
// deletes, but, if new deletes were flushed since
// the merge started, we must now carefully keep any
// newly flushed deletes but mapping them to the new
// docIDs.
// Since we copy-on-write, if any new deletes were
// applied after merging has started, we can just
// check if the before/after liveDocs have changed.
// If so, we must carefully merge the liveDocs one
// doc at a time:
if (currentLiveDocs != prevLiveDocs)
{
// this means this segment received new deletes
// since we started the merge, so we
// must merge them:
//.........这里部分代码省略.........
示例13: MaybeApplyMergedDVUpdates
private void MaybeApplyMergedDVUpdates(MergePolicy.OneMerge merge, MergeState mergeState, int docUpto, MergedDeletesAndUpdates holder, string[] mergingFields, DocValuesFieldUpdates[] dvFieldUpdates, DocValuesFieldUpdates.Iterator[] updatesIters, int curDoc)
{
int newDoc = -1;
for (int idx = 0; idx < mergingFields.Length; idx++)
{
DocValuesFieldUpdates.Iterator updatesIter = updatesIters[idx];
if (updatesIter.Doc() == curDoc) // document has an update
{
if (holder.MergedDeletesAndUpdates_Renamed == null)
{
holder.Init(readerPool, merge, mergeState, false);
}
if (newDoc == -1) // map once per all field updates, but only if there are any updates
{
newDoc = holder.DocMap.Map(docUpto);
}
DocValuesFieldUpdates dvUpdates = dvFieldUpdates[idx];
dvUpdates.Add(newDoc, updatesIter.Value());
updatesIter.NextDoc(); // advance to next document
}
else
{
Debug.Assert(updatesIter.Doc() > curDoc, "field=" + mergingFields[idx] + " updateDoc=" + updatesIter.Doc() + " curDoc=" + curDoc);
}
}
}
示例14: Init
internal void Init(ReaderPool readerPool, MergePolicy.OneMerge merge, MergeState mergeState, bool initWritableLiveDocs)
{
if (MergedDeletesAndUpdates_Renamed == null)
{
MergedDeletesAndUpdates_Renamed = readerPool.Get(merge.Info_Renamed, true);
DocMap = merge.GetDocMap(mergeState);
Debug.Assert(DocMap.IsConsistent(merge.Info_Renamed.Info.DocCount));
}
if (initWritableLiveDocs && !InitializedWritableLiveDocs)
{
MergedDeletesAndUpdates_Renamed.InitWritableLiveDocs();
this.InitializedWritableLiveDocs = true;
}
}
示例15: EnsureValidMerge
private void EnsureValidMerge(MergePolicy.OneMerge merge)
{
lock (this)
{
foreach (SegmentCommitInfo info in merge.Segments)
{
if (!segmentInfos.Contains(info))
{
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.Info.Name + ") that is not in the current index " + SegString(), directory);
}
}
}
}