本文整理汇总了C#中Lucene.Net.Index.AtomicReader.GetTermVectors方法的典型用法代码示例。如果您正苦于以下问题:C# AtomicReader.GetTermVectors方法的具体用法?C# AtomicReader.GetTermVectors怎么用?C# AtomicReader.GetTermVectors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.AtomicReader
的用法示例。
在下文中一共展示了AtomicReader.GetTermVectors方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestTermVectors
/// <summary>
/// Test term vectors.
/// @lucene.experimental
/// </summary>
public static Status.TermVectorStatus TestTermVectors(AtomicReader reader, TextWriter infoStream, bool verbose, bool crossCheckTermVectors)
{
Status.TermVectorStatus status = new Status.TermVectorStatus();
FieldInfos fieldInfos = reader.FieldInfos;
Bits onlyDocIsDeleted = new FixedBitSet(1);
try
{
if (infoStream != null)
{
infoStream.Write(" test: term vectors........");
}
DocsEnum docs = null;
DocsAndPositionsEnum postings = null;
// Only used if crossCheckTermVectors is true:
DocsEnum postingsDocs = null;
DocsAndPositionsEnum postingsPostings = null;
Bits liveDocs = reader.LiveDocs;
Fields postingsFields;
// TODO: testTermsIndex
if (crossCheckTermVectors)
{
postingsFields = reader.Fields;
}
else
{
postingsFields = null;
}
TermsEnum termsEnum = null;
TermsEnum postingsTermsEnum = null;
for (int j = 0; j < reader.MaxDoc; ++j)
{
// Intentionally pull/visit (but don't count in
// stats) deleted documents to make sure they too
// are not corrupt:
Fields tfv = reader.GetTermVectors(j);
// TODO: can we make a IS(FIR) that searches just
// this term vector... to pass for searcher?
if (tfv != null)
{
// First run with no deletions:
CheckFields(tfv, null, 1, fieldInfos, false, true, infoStream, verbose);
// Again, with the one doc deleted:
CheckFields(tfv, onlyDocIsDeleted, 1, fieldInfos, false, true, infoStream, verbose);
// Only agg stats if the doc is live:
bool doStats = liveDocs == null || liveDocs.Get(j);
if (doStats)
{
status.DocCount++;
}
foreach (string field in tfv)
{
if (doStats)
{
status.TotVectors++;
}
// Make sure FieldInfo thinks this field is vector'd:
FieldInfo fieldInfo = fieldInfos.FieldInfo(field);
if (!fieldInfo.HasVectors())
{
throw new Exception("docID=" + j + " has term vectors for field=" + field + " but FieldInfo has storeTermVector=false");
}
if (crossCheckTermVectors)
{
Terms terms = tfv.Terms(field);
termsEnum = terms.Iterator(termsEnum);
bool postingsHasFreq = fieldInfo.FieldIndexOptions >= FieldInfo.IndexOptions.DOCS_AND_FREQS;
bool postingsHasPayload = fieldInfo.HasPayloads();
bool vectorsHasPayload = terms.HasPayloads();
Terms postingsTerms = postingsFields.Terms(field);
if (postingsTerms == null)
{
throw new Exception("vector field=" + field + " does not exist in postings; doc=" + j);
}
postingsTermsEnum = postingsTerms.Iterator(postingsTermsEnum);
bool hasProx = terms.HasOffsets() || terms.HasPositions();
BytesRef term = null;
while ((term = termsEnum.Next()) != null)
{
if (hasProx)
{
//.........这里部分代码省略.........
示例2: Warm
public override void Warm(AtomicReader reader)
{
long startTime = DateTime.Now.Millisecond;
int indexedCount = 0;
int docValuesCount = 0;
int normsCount = 0;
foreach (FieldInfo info in reader.FieldInfos)
{
if (info.Indexed)
{
reader.Terms(info.Name);
indexedCount++;
if (info.HasNorms())
{
reader.GetNormValues(info.Name);
normsCount++;
}
}
if (info.HasDocValues())
{
switch (info.DocValuesType)
{
case DocValuesType_e.NUMERIC:
reader.GetNumericDocValues(info.Name);
break;
case DocValuesType_e.BINARY:
reader.GetBinaryDocValues(info.Name);
break;
case DocValuesType_e.SORTED:
reader.GetSortedDocValues(info.Name);
break;
case DocValuesType_e.SORTED_SET:
reader.GetSortedSetDocValues(info.Name);
break;
default:
Debug.Assert(false); // unknown dv type
break;
}
docValuesCount++;
}
}
reader.Document(0);
reader.GetTermVectors(0);
if (InfoStream.IsEnabled("SMSW"))
{
InfoStream.Message("SMSW", "Finished warming segment: " + reader + ", indexed=" + indexedCount + ", docValues=" + docValuesCount + ", norms=" + normsCount + ", time=" + (DateTime.Now.Millisecond - startTime));
}
}