本文整理汇总了C#中Lucene.Net.Index.SegmentInfo.HasSeparateNorms方法的典型用法代码示例。如果您正苦于以下问题:C# SegmentInfo.HasSeparateNorms方法的具体用法?C# SegmentInfo.HasSeparateNorms怎么用?C# SegmentInfo.HasSeparateNorms使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.SegmentInfo
的用法示例。
在下文中一共展示了SegmentInfo.HasSeparateNorms方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: HasSeparateNorms
internal static bool HasSeparateNorms(SegmentInfo si)
{
return si.HasSeparateNorms();
}
示例2: IsOptimized
/// <summary>Returns true if this single nfo is optimized (has no
/// pending norms or deletes, is in the same dir as the
/// writer, and matches the current compound file setting
/// </summary>
private bool IsOptimized(IndexWriter writer, SegmentInfo info)
{
return !info.HasDeletions() && !info.HasSeparateNorms() && info.dir == writer.GetDirectory() && info.GetUseCompoundFile() == useCompoundFile;
}
示例3: IsOptimized
/// <summary>Returns true if this single info is optimized (has no
/// pending norms or deletes, is in the same dir as the
/// writer, and matches the current compound file setting
/// </summary>
private bool IsOptimized(SegmentInfo info)
{
bool hasDeletions = writer.NumDeletedDocs(info) > 0;
return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.GetDirectory() &&
(info.GetUseCompoundFile() == useCompoundFile || noCFSRatio < 1.0);
}
示例4: ReopenSegment
//.........这里部分代码省略.........
{
clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, si.GetDocStoreOffset(), si.docCount);
}
if (!deletionsUpToDate)
{
// load deleted docs
clone.deletedDocs = null;
clone.LoadDeletedDocs();
}
else
{
clone.deletedDocs = this.deletedDocs;
}
clone.norms = new System.Collections.Hashtable();
if (!normsUpToDate)
{
// load norms
for (int i = 0; i < fieldNormsChanged.Length; i++)
{
// copy unchanged norms to the cloned reader and incRef those norms
if (!fieldNormsChanged[i])
{
System.String curField = fieldInfos.FieldInfo(i).name;
Norm norm = (Norm) this.norms[curField];
norm.IncRef();
clone.norms[curField] = norm;
}
}
clone.OpenNorms(si.GetUseCompoundFile() ? cfsReader : Directory(), readBufferSize);
}
else
{
System.Collections.IEnumerator it = norms.Keys.GetEnumerator();
while (it.MoveNext())
{
System.String field = (System.String) it.Current;
Norm norm = (Norm) norms[field];
norm.IncRef();
clone.norms[field] = norm;
}
}
if (clone.singleNormStream == null)
{
for (int i = 0; i < fieldInfos.Size(); i++)
{
FieldInfo fi = fieldInfos.FieldInfo(i);
if (fi.isIndexed && !fi.omitNorms)
{
Directory d = si.GetUseCompoundFile() ? cfsReader : Directory();
System.String fileName = si.GetNormFileName(fi.number);
if (si.HasSeparateNorms(fi.number))
{
continue;
}
if (fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION))
{
clone.singleNormStream = d.OpenInput(fileName, readBufferSize);
break;
}
}
}
}
success = true;
}
finally
{
if (this.referencedSegmentReader != null)
{
// this reader shares resources with another SegmentReader,
// so we increment the other readers refCount. We don't
// increment the refCount of the norms because we did
// that already for the shared norms
clone.referencedSegmentReader = this.referencedSegmentReader;
referencedSegmentReader.IncRefReaderNotNorms();
}
else
{
// this reader wasn't reopened, so we increment this
// readers refCount
clone.referencedSegmentReader = this;
IncRefReaderNotNorms();
}
if (!success)
{
// An exception occured during reopen, we have to decRef the norms
// that we incRef'ed already and close singleNormsStream and FieldsReader
clone.DecRef();
}
}
return clone;
}
}