本文整理汇总了C#中Lucene.Net.Index.MergePolicy.CheckAborted方法的典型用法代码示例。如果您正苦于以下问题:C# MergePolicy.CheckAborted方法的具体用法?C# MergePolicy.CheckAborted怎么用?C# MergePolicy.CheckAborted使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.MergePolicy
的用法示例。
在下文中一共展示了MergePolicy.CheckAborted方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MergeMiddle
/// <summary>
/// Does the actual (time-consuming) work of the merge,
/// but without holding synchronized lock on IndexWriter
/// instance
/// </summary>
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
string mergedName = merge.Info_Renamed.Info.Name;
IList<SegmentCommitInfo> sourceSegments = merge.Segments;
IOContext context = new IOContext(merge.MergeInfo);
MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "merging " + SegString(merge.Segments));
}
merge.Readers = new List<SegmentReader>();
// this is try/finally to make sure merger's readers are
// closed:
bool success = false;
try
{
int segUpto = 0;
while (segUpto < sourceSegments.Count)
{
SegmentCommitInfo info = sourceSegments[segUpto];
// Hold onto the "live" reader; we will use this to
// commit merged deletes
ReadersAndUpdates rld = readerPool.Get(info, true);
// Carefully pull the most recent live docs and reader
SegmentReader reader;
Bits liveDocs;
int delCount;
lock (this)
{
// Must sync to ensure BufferedDeletesStream cannot change liveDocs,
// pendingDeleteCount and field updates while we pull a copy:
reader = rld.GetReaderForMerge(context);
liveDocs = rld.ReadOnlyLiveDocs;
delCount = rld.PendingDeleteCount + info.DelCount;
Debug.Assert(reader != null);
Debug.Assert(rld.VerifyDocCounts());
if (infoStream.IsEnabled("IW"))
{
if (rld.PendingDeleteCount != 0)
{
infoStream.Message("IW", "seg=" + SegString(info) + " delCount=" + info.DelCount + " pendingDelCount=" + rld.PendingDeleteCount);
}
else if (info.DelCount != 0)
{
infoStream.Message("IW", "seg=" + SegString(info) + " delCount=" + info.DelCount);
}
else
{
infoStream.Message("IW", "seg=" + SegString(info) + " no deletes");
}
}
}
// Deletes might have happened after we pulled the merge reader and
// before we got a read-only copy of the segment's actual live docs
// (taking pending deletes into account). In that case we need to
// make a new reader with updated live docs and del count.
if (reader.NumDeletedDocs() != delCount)
{
// fix the reader's live docs and del count
Debug.Assert(delCount > reader.NumDeletedDocs()); // beware of zombies
SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.Info.DocCount - delCount);
bool released = false;
try
{
rld.Release(reader);
released = true;
}
finally
{
if (!released)
{
newReader.DecRef();
}
}
reader = newReader;
}
merge.Readers.Add(reader);
//.........这里部分代码省略.........
示例2: MergeMiddle
/// <summary>Does the actual (time-consuming) work of the merge,
/// but without holding synchronized lock on IndexWriter
/// instance
/// </summary>
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
System.String mergedName = merge.info.name;
SegmentMerger merger = null;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
int numSegments = sourceSegments.Count;
if (infoStream != null)
Message("merging " + merge.SegString(directory));
merger = new SegmentMerger(this, mergedName, merge);
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
bool mergeDocStores = false;
System.Collections.Hashtable dss = new System.Collections.Hashtable();
String currentDocStoreSegment;
lock(this) {
currentDocStoreSegment = docWriter.GetDocStoreSegment();
}
bool currentDSSMerged = false;
// This is try/finally to make sure merger's readers are
// closed:
bool success = false;
try
{
int totDocCount = 0;
for (int i = 0; i < numSegments; i++)
{
SegmentInfo info = sourceSegments.Info(i);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, -1);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(true);
merger.Add(clone);
if (clone.HasDeletions())
{
mergeDocStores = true;
}
if (info.GetDocStoreOffset() != -1 && currentDocStoreSegment != null)
{
currentDSSMerged |= currentDocStoreSegment.Equals(info.GetDocStoreSegment());
}
totDocCount += clone.NumDocs();
}
if (infoStream != null)
{
Message("merge: total " + totDocCount + " docs");
}
merge.CheckAborted(directory);
// If deletions have arrived and it has now become
// necessary to merge doc stores, go and open them:
if (mergeDocStores && !merge.mergeDocStores)
{
merge.mergeDocStores = true;
lock (this)
{
if (currentDSSMerged)
{
if (infoStream != null)
{
Message("now flush at mergeMiddle");
}
DoFlush(true, false);
}
}
for (int i = 0; i < numSegments; i++)
{
merge.readersClone[i].OpenDocStores();
}
//.........这里部分代码省略.........
示例3: MergeMiddle
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
string mergedName = merge.info.name;
SegmentMerger merger = null;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
SegmentInfos sourceSegmentsClone = merge.segmentsClone;
int numSegments = sourceSegments.Count;
if (infoStream != null)
Message("merging " + merge.SegString(directory));
merger = new SegmentMerger(this, mergedName, merge);
bool success = false;
// This is try/finally to make sure merger's readers are
// closed:
try
{
int totDocCount = 0;
for (int i = 0; i < numSegments; i++)
{
SegmentInfo si = sourceSegmentsClone.Info(i);
IndexReader reader = SegmentReader.Get(true, si, MERGE_READ_BUFFER_SIZE, merge.mergeDocStores); // no need to set deleter (yet)
merger.Add(reader);
totDocCount += reader.NumDocs();
}
if (infoStream != null)
{
Message("merge: total " + totDocCount + " docs");
}
merge.CheckAborted(directory);
// This is where all the work happens:
mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);
System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);
success = true;
}
finally
{
// close readers before we attempt to delete
// now-obsolete segments
if (merger != null)
{
merger.CloseReaders();
}
}
if (!CommitMerge(merge, merger, mergedDocCount))
// commitMerge will return false if this merge was aborted
return 0;
if (merge.useCompoundFile)
{
// Maybe force a sync here to allow reclaiming of the
// disk space used by the segments we just merged:
if (autoCommit && DoCommitBeforeMergeCFS(merge))
{
long size;
lock (this)
{
size = merge.info.SizeInBytes();
}
Commit(size);
}
success = false;
string compoundFileName = mergedName + "." + IndexFileNames.COMPOUND_FILE_EXTENSION;
try
{
merger.CreateCompoundFile(compoundFileName);
success = true;
}
catch (System.IO.IOException ioe)
{
lock (this)
{
if (merge.IsAborted())
{
// This can happen if rollback or close(false)
// is called -- fall through to logic below to
// remove the partially created CFS:
success = true;
}
//.........这里部分代码省略.........
示例4: MergeMiddle
/// <summary>Does the actual (time-consuming) work of the merge,
/// but without holding synchronized lock on IndexWriter
/// instance
/// </summary>
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
System.String mergedName = merge.info.name;
SegmentMerger merger = null;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
int numSegments = sourceSegments.Count;
if (infoStream != null)
Message("merging " + merge.SegString(directory));
merger = new SegmentMerger(this, mergedName, merge);
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
bool mergeDocStores = false;
System.Collections.Hashtable dss = new System.Collections.Hashtable();
// This is try/finally to make sure merger's readers are
// closed:
bool success = false;
try
{
int totDocCount = 0;
for (int i = 0; i < numSegments; i++)
{
SegmentInfo info = sourceSegments.Info(i);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, - 1);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.Clone(true);
merger.Add(clone);
if (clone.HasDeletions())
{
mergeDocStores = true;
}
if (info.GetDocStoreOffset() != - 1)
{
dss[info.GetDocStoreSegment()] = info.GetDocStoreSegment();
}
totDocCount += clone.NumDocs();
}
if (infoStream != null)
{
Message("merge: total " + totDocCount + " docs");
}
merge.CheckAborted(directory);
// If deletions have arrived and it has now become
// necessary to merge doc stores, go and open them:
if (mergeDocStores && !merge.mergeDocStores)
{
merge.mergeDocStores = true;
lock (this)
{
String key = docWriter.GetDocStoreSegment();
if (key!=null && dss.Contains(key))
{
if (infoStream != null)
Message("now flush at mergeMiddle");
DoFlush(true, false);
}
}
for (int i = 0; i < numSegments; i++)
{
merge.readersClone[i].OpenDocStores();
}
// Clear DSS
lock (this)
{
merge.info.SetDocStore(- 1, null, false);
}
}
// This is where all the work happens:
//.........这里部分代码省略.........
示例5: MergeMiddle
/// <summary>Does the actual (time-consuming) work of the merge,
/// but without holding synchronized lock on IndexWriter
/// instance
/// </summary>
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
System.String mergedName = merge.info.name;
SegmentMerger merger = null;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
SegmentInfos sourceSegmentsClone = merge.segmentsClone;
int numSegments = sourceSegments.Count;
if (infoStream != null)
Message("merging " + merge.SegString(directory));
merger = new SegmentMerger(this, mergedName, merge);
// This is try/finally to make sure merger's readers are
// closed:
bool success = false;
try
{
int totDocCount = 0;
for (int i = 0; i < numSegments; i++)
{
SegmentInfo si = sourceSegmentsClone.Info(i);
IndexReader reader = SegmentReader.Get(si, MERGE_READ_BUFFER_SIZE, merge.mergeDocStores); // no need to set deleter (yet)
merger.Add(reader);
totDocCount += reader.NumDocs();
}
if (infoStream != null)
{
Message("merge: total " + totDocCount + " docs");
}
merge.CheckAborted(directory);
mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);
System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);
success = true;
}
finally
{
// close readers before we attempt to delete
// now-obsolete segments
if (merger != null)
{
merger.CloseReaders();
}
if (!success)
{
if (infoStream != null)
Message("hit exception during merge; now refresh deleter on segment " + mergedName);
lock (this)
{
AddMergeException(merge);
deleter.Refresh(mergedName);
}
}
}
if (!CommitMerge(merge))
// commitMerge will return false if this merge was aborted
return 0;
if (merge.useCompoundFile)
{
success = false;
bool skip = false;
System.String compoundFileName = mergedName + "." + IndexFileNames.COMPOUND_FILE_EXTENSION;
try
{
try
{
merger.CreateCompoundFile(compoundFileName);
success = true;
}
catch (System.IO.IOException ioe)
{
lock (this)
{
if (segmentInfos.IndexOf(merge.info) == - 1)
{
// If another merge kicked in and merged our
// new segment away while we were trying to
// build the compound file, we can hit a
//.........这里部分代码省略.........
示例6: CommitMerge
//.........这里部分代码省略.........
{
if (previousDeletes.Get(j))
System.Diagnostics.Debug.Assert(currentDeletes.Get(j));
else
{
if (currentDeletes.Get(j))
deletes.Set(docUpto);
docUpto++;
}
}
}
else
docUpto += docCount - previousDeletes.Count();
}
else if (currentInfo.HasDeletions())
{
// This segment had no deletes before but now it
// does:
if (deletes == null)
deletes = new BitVector(merge.info.docCount);
BitVector currentDeletes = new BitVector(directory, currentInfo.GetDelFileName());
for (int j = 0; j < docCount; j++)
{
if (currentDeletes.Get(j))
deletes.Set(docUpto);
docUpto++;
}
}
// No deletes before or after
else
docUpto += currentInfo.docCount;
merge.CheckAborted(directory);
}
if (deletes != null)
{
merge.info.AdvanceDelGen();
deletes.Write(directory, merge.info.GetDelFileName());
}
success = true;
}
finally
{
if (!success)
{
if (infoStream != null)
Message("hit exception creating merged deletes file");
deleter.Refresh(merge.info.name);
}
}
// Simple optimization: if the doc store we are using
// has been closed and is in now compound format (but
// wasn't when we started), then we will switch to the
// compound format as well:
System.String mergeDocStoreSegment = merge.info.GetDocStoreSegment();
if (mergeDocStoreSegment != null && !merge.info.GetDocStoreIsCompoundFile())
{
int size = segmentInfos.Count;
for (int i = 0; i < size; i++)
{
SegmentInfo info = segmentInfos.Info(i);
System.String docStoreSegment = info.GetDocStoreSegment();
if (docStoreSegment != null && docStoreSegment.Equals(mergeDocStoreSegment) && info.GetDocStoreIsCompoundFile())