本文整理汇总了C#中Lucene.Net.Index.SegmentReader.DecRef方法的典型用法代码示例。如果您正苦于以下问题:C# SegmentReader.DecRef方法的具体用法?C# SegmentReader.DecRef怎么用?C# SegmentReader.DecRef使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.SegmentReader
的用法示例。
在下文中一共展示了SegmentReader.DecRef方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MergeMiddle
/// <summary>
/// Does the actual (time-consuming) work of the merge,
/// but without holding synchronized lock on IndexWriter
/// instance
/// </summary>
private int MergeMiddle(MergePolicy.OneMerge merge)
{
merge.CheckAborted(directory);
string mergedName = merge.Info_Renamed.Info.Name;
IList<SegmentCommitInfo> sourceSegments = merge.Segments;
IOContext context = new IOContext(merge.MergeInfo);
MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "merging " + SegString(merge.Segments));
}
merge.Readers = new List<SegmentReader>();
// this is try/finally to make sure merger's readers are
// closed:
bool success = false;
try
{
int segUpto = 0;
while (segUpto < sourceSegments.Count)
{
SegmentCommitInfo info = sourceSegments[segUpto];
// Hold onto the "live" reader; we will use this to
// commit merged deletes
ReadersAndUpdates rld = readerPool.Get(info, true);
// Carefully pull the most recent live docs and reader
SegmentReader reader;
Bits liveDocs;
int delCount;
lock (this)
{
// Must sync to ensure BufferedDeletesStream cannot change liveDocs,
// pendingDeleteCount and field updates while we pull a copy:
reader = rld.GetReaderForMerge(context);
liveDocs = rld.ReadOnlyLiveDocs;
delCount = rld.PendingDeleteCount + info.DelCount;
Debug.Assert(reader != null);
Debug.Assert(rld.VerifyDocCounts());
if (infoStream.IsEnabled("IW"))
{
if (rld.PendingDeleteCount != 0)
{
infoStream.Message("IW", "seg=" + SegString(info) + " delCount=" + info.DelCount + " pendingDelCount=" + rld.PendingDeleteCount);
}
else if (info.DelCount != 0)
{
infoStream.Message("IW", "seg=" + SegString(info) + " delCount=" + info.DelCount);
}
else
{
infoStream.Message("IW", "seg=" + SegString(info) + " no deletes");
}
}
}
// Deletes might have happened after we pulled the merge reader and
// before we got a read-only copy of the segment's actual live docs
// (taking pending deletes into account). In that case we need to
// make a new reader with updated live docs and del count.
if (reader.NumDeletedDocs() != delCount)
{
// fix the reader's live docs and del count
Debug.Assert(delCount > reader.NumDeletedDocs()); // beware of zombies
SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.Info.DocCount - delCount);
bool released = false;
try
{
rld.Release(reader);
released = true;
}
finally
{
if (!released)
{
newReader.DecRef();
}
}
reader = newReader;
}
merge.Readers.Add(reader);
//.........这里部分代码省略.........
示例2: Release
/// <summary> Release the segment reader (i.e. decRef it and close if there
/// are no more references.
/// </summary>
/// <param name="sr">
/// </param>
/// <throws> IOException </throws>
public virtual void Release(SegmentReader sr, bool drop)
{
lock (this)
{
bool pooled = readerMap.Contains(sr.GetSegmentInfo());
System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.GetSegmentInfo()] == sr);
// Drop caller's ref; for an external reader (not
// pooled), this decRef will close it
sr.DecRef();
if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.GetRefCount() == 1)))
{
// We invoke deleter.checkpoint below, so we must be
// sync'd on IW if there are changes:
// TODO: java 5
// assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
// Discard (don't save) changes when we are dropping
// the reader; this is used only on the sub-readers
// after a successful merge.
sr.hasChanges &= !drop;
bool hasChanges = sr.hasChanges;
// Drop our ref -- this will commit any pending
// changes to the dir
sr.Close();
// We are the last ref to this reader; since we're
// not pooling readers, we release it:
readerMap.Remove(sr.GetSegmentInfo());
if (hasChanges)
{
// Must checkpoint w/ deleter, because this
// segment reader will have created new _X_N.del
// file.
enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
}
}
}
}
示例3: WriteFieldUpdates
//.........这里部分代码省略.........
}
finally
{
if (!success)
{
// Advance only the nextWriteDocValuesGen so that a 2nd
// attempt to write will write to a new file
Info.AdvanceNextWriteFieldInfosGen();
// Delete any partially created file(s):
foreach (string fileName in trackingDir.CreatedFiles)
{
try
{
dir.DeleteFile(fileName);
}
catch (Exception)
{
// Ignore so we throw only the first exc
}
}
}
}
Info.AdvanceFieldInfosGen();
// copy all the updates to mergingUpdates, so they can later be applied to the merged segment
if (IsMerging)
{
foreach (KeyValuePair<string, NumericDocValuesFieldUpdates> e in dvUpdates.NumericDVUpdates)
{
DocValuesFieldUpdates updates;
if (!MergingDVUpdates.TryGetValue(e.Key, out updates))
{
MergingDVUpdates[e.Key] = e.Value;
}
else
{
updates.Merge(e.Value);
}
}
foreach (KeyValuePair<string, BinaryDocValuesFieldUpdates> e in dvUpdates.BinaryDVUpdates)
{
DocValuesFieldUpdates updates;
if (!MergingDVUpdates.TryGetValue(e.Key, out updates))
{
MergingDVUpdates[e.Key] = e.Value;
}
else
{
updates.Merge(e.Value);
}
}
}
// create a new map, keeping only the gens that are in use
IDictionary<long, ISet<string>> genUpdatesFiles = Info.UpdatesFiles;
IDictionary<long, ISet<string>> newGenUpdatesFiles = new Dictionary<long, ISet<string>>();
long fieldInfosGen = Info.FieldInfosGen;
foreach (FieldInfo fi in fieldInfos)
{
long dvGen = fi.DocValuesGen;
if (dvGen != -1 && !newGenUpdatesFiles.ContainsKey(dvGen))
{
if (dvGen == fieldInfosGen)
{
newGenUpdatesFiles[fieldInfosGen] = trackingDir.CreatedFiles;
}
else
{
newGenUpdatesFiles[dvGen] = genUpdatesFiles[dvGen];
}
}
}
Info.GenUpdatesFiles = newGenUpdatesFiles;
// wrote new files, should checkpoint()
Writer.Checkpoint();
// if there is a reader open, reopen it to reflect the updates
if (Reader != null)
{
SegmentReader newReader = new SegmentReader(Info, Reader, LiveDocs_Renamed, Info.Info.DocCount - Info.DelCount - PendingDeleteCount_Renamed);
bool reopened = false;
try
{
Reader.DecRef();
Reader = newReader;
reopened = true;
}
finally
{
if (!reopened)
{
newReader.DecRef();
}
}
}
}
}
示例4: Release
public virtual void Release(SegmentReader sr)
{
lock (this)
{
Debug.Assert(Info == sr.SegmentInfo);
sr.DecRef();
}
}
示例5: Release
/// <summary> Release the segment reader (i.e. decRef it and close if there
/// are no more references.
/// </summary>
/// <param name="sr">
/// </param>
/// <throws> IOException </throws>
public virtual void Release(SegmentReader sr, bool drop)
{
lock (this)
{
bool pooled = readerMap.Contains(sr.GetSegmentInfo());
System.Diagnostics.Debug.Assert(!pooled | readerMap[sr.GetSegmentInfo()] == sr);
// Drop caller's ref
sr.DecRef();
if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.GetRefCount() == 1)))
{
// We are the last ref to this reader; since we're
// not pooling readers, we release it:
readerMap.Remove(sr.GetSegmentInfo());
// TODO: java 5
// assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
// Drop our ref -- this will commit any pending
// changes to the dir
bool success = false;
try
{
sr.Close();
success = true;
}
finally
{
if (!success && sr.hasChanges)
{
// Abandon the changes & retry closing:
sr.hasChanges = false;
try
{
sr.Close();
}
catch (System.Exception ignore)
{
// Keep throwing original exception
}
}
}
}
}
}
示例6: ReopenSegment
//.........这里部分代码省略.........
clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, si.GetDocStoreOffset(), si.docCount);
}
if (!deletionsUpToDate)
{
// load deleted docs
clone.deletedDocs = null;
clone.LoadDeletedDocs();
}
else
{
clone.deletedDocs = this.deletedDocs;
}
clone.norms = new System.Collections.Hashtable();
if (!normsUpToDate)
{
// load norms
for (int i = 0; i < fieldNormsChanged.Length; i++)
{
// copy unchanged norms to the cloned reader and incRef those norms
if (!fieldNormsChanged[i])
{
System.String curField = fieldInfos.FieldInfo(i).name;
Norm norm = (Norm) this.norms[curField];
norm.IncRef();
clone.norms[curField] = norm;
}
}
clone.OpenNorms(si.GetUseCompoundFile() ? cfsReader : Directory(), readBufferSize);
}
else
{
System.Collections.IEnumerator it = norms.Keys.GetEnumerator();
while (it.MoveNext())
{
System.String field = (System.String) it.Current;
Norm norm = (Norm) norms[field];
norm.IncRef();
clone.norms[field] = norm;
}
}
if (clone.singleNormStream == null)
{
for (int i = 0; i < fieldInfos.Size(); i++)
{
FieldInfo fi = fieldInfos.FieldInfo(i);
if (fi.isIndexed && !fi.omitNorms)
{
Directory d = si.GetUseCompoundFile() ? cfsReader : Directory();
System.String fileName = si.GetNormFileName(fi.number);
if (si.HasSeparateNorms(fi.number))
{
continue;
}
if (fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION))
{
clone.singleNormStream = d.OpenInput(fileName, readBufferSize);
break;
}
}
}
}
success = true;
}
finally
{
if (this.referencedSegmentReader != null)
{
// this reader shares resources with another SegmentReader,
// so we increment the other readers refCount. We don't
// increment the refCount of the norms because we did
// that already for the shared norms
clone.referencedSegmentReader = this.referencedSegmentReader;
referencedSegmentReader.IncRefReaderNotNorms();
}
else
{
// this reader wasn't reopened, so we increment this
// readers refCount
clone.referencedSegmentReader = this;
IncRefReaderNotNorms();
}
if (!success)
{
// An exception occured during reopen, we have to decRef the norms
// that we incRef'ed already and close singleNormsStream and FieldsReader
clone.DecRef();
}
}
return clone;
}
}