本文整理汇总了C#中Lucene.Net.Index.AtomicReader.MaxDoc方法的典型用法代码示例。如果您正苦于以下问题:C# AtomicReader.MaxDoc方法的具体用法?C# AtomicReader.MaxDoc怎么用?C# AtomicReader.MaxDoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.AtomicReader
的用法示例。
在下文中一共展示了AtomicReader.MaxDoc方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: AssertingAtomicReader
public AssertingAtomicReader(AtomicReader @in)
: base(@in)
{
// check some basic reader sanity
Debug.Assert(@in.MaxDoc() >= 0);
Debug.Assert(@in.NumDocs() <= @in.MaxDoc());
Debug.Assert(@in.NumDeletedDocs() + @in.NumDocs() == @in.MaxDoc());
Debug.Assert([email protected]() || @in.NumDeletedDocs() > 0 && @in.NumDocs() < @in.MaxDoc());
}
示例2: Build
/// <summary>
/// Creates a <seealso cref="DocMap"/> instance appropriate for
/// this reader.
/// </summary>
public static DocMap Build(AtomicReader reader)
{
int maxDoc = reader.MaxDoc();
if (!reader.HasDeletions())
{
return new NoDelDocMap(maxDoc);
}
Bits liveDocs = reader.LiveDocs;
return Build(maxDoc, liveDocs);
}
示例3: SlowMinShouldMatchScorer
internal SlowMinShouldMatchScorer(BooleanWeight weight, AtomicReader reader, IndexSearcher searcher)
: base(weight)
{
this.Dv = reader.GetSortedSetDocValues("dv");
this.MaxDoc = reader.MaxDoc();
BooleanQuery bq = (BooleanQuery)weight.Query;
this.MinNrShouldMatch = bq.MinimumNumberShouldMatch;
this.Sims = new SimScorer[(int)Dv.ValueCount];
foreach (BooleanClause clause in bq.Clauses)
{
Debug.Assert(!clause.Prohibited);
Debug.Assert(!clause.Required);
Term term = ((TermQuery)clause.Query).Term;
long ord = Dv.LookupTerm(term.Bytes());
if (ord >= 0)
{
bool success = Ords.Add(ord);
Debug.Assert(success); // no dups
TermContext context = TermContext.Build(reader.Context, term);
SimWeight w = weight.Similarity.ComputeWeight(1f, searcher.CollectionStatistics("field"), searcher.TermStatistics(term, context));
var dummy = w.ValueForNormalization; // ignored
w.Normalize(1F, 1F);
Sims[(int)ord] = weight.Similarity.DoSimScorer(w, (AtomicReaderContext)reader.Context);
}
}
}
示例4: AllDeletedFilterReader
public AllDeletedFilterReader(AtomicReader @in)
: base(@in)
{
LiveDocs_Renamed = new Bits_MatchNoBits(@in.MaxDoc());
Debug.Assert(MaxDoc() == 0 || HasDeletions());
}
示例5: Uninvert
/// <summary>
/// Call this only once (if you subclass!) </summary>
protected internal virtual void Uninvert(AtomicReader reader, Bits liveDocs, BytesRef termPrefix)
{
FieldInfo info = reader.FieldInfos.FieldInfo(Field);
if (info != null && info.HasDocValues())
{
throw new InvalidOperationException("Type mismatch: " + Field + " was indexed as " + info.DocValuesType);
}
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
long startTime = DateTime.Now.Millisecond;
Prefix = termPrefix == null ? null : BytesRef.DeepCopyOf(termPrefix);
int maxDoc = reader.MaxDoc();
int[] index = new int[maxDoc]; // immediate term numbers, or the index into the byte[] representing the last number
int[] lastTerm = new int[maxDoc]; // last term we saw for this document
sbyte[][] bytes = new sbyte[maxDoc][]; // list of term numbers for the doc (delta encoded vInts)
Fields fields = reader.Fields();
if (fields == null)
{
// No terms
return;
}
Terms terms = fields.Terms(Field);
if (terms == null)
{
// No terms
return;
}
TermsEnum te = terms.Iterator(null);
BytesRef seekStart = termPrefix != null ? termPrefix : new BytesRef();
//System.out.println("seekStart=" + seekStart.utf8ToString());
if (te.SeekCeil(seekStart) == TermsEnum.SeekStatus.END)
{
// No terms match
return;
}
// If we need our "term index wrapper", these will be
// init'd below:
IList<BytesRef> indexedTerms = null;
PagedBytes indexedTermsBytes = null;
bool testedOrd = false;
// we need a minimum of 9 bytes, but round up to 12 since the space would
// be wasted with most allocators anyway.
sbyte[] tempArr = new sbyte[12];
//
// enumerate all terms, and build an intermediate form of the un-inverted field.
//
// During this intermediate form, every document has a (potential) byte[]
// and the int[maxDoc()] array either contains the termNumber list directly
// or the *end* offset of the termNumber list in it's byte array (for faster
// appending and faster creation of the final form).
//
// idea... if things are too large while building, we could do a range of docs
// at a time (but it would be a fair amount slower to build)
// could also do ranges in parallel to take advantage of multiple CPUs
// OPTIONAL: remap the largest df terms to the lowest 128 (single byte)
// values. this requires going over the field first to find the most
// frequent terms ahead of time.
int termNum = 0;
DocsEnum = null;
// Loop begins with te positioned to first term (we call
// seek above):
for (; ; )
{
BytesRef t = te.Term();
if (t == null || (termPrefix != null && !StringHelper.StartsWith(t, termPrefix)))
{
break;
}
//System.out.println("visit term=" + t.utf8ToString() + " " + t + " termNum=" + termNum);
if (!testedOrd)
{
try
{
OrdBase = (int)te.Ord();
//System.out.println("got ordBase=" + ordBase);
}
catch (System.NotSupportedException uoe)
{
// Reader cannot provide ord support, so we wrap
// our own support by creating our own terms index:
indexedTerms = new List<BytesRef>();
indexedTermsBytes = new PagedBytes(15);
//System.out.println("NO ORDS");
}
testedOrd = true;
}
VisitTerm(te, termNum);
//.........这里部分代码省略.........
示例6: Verify
private void Verify(AtomicReader r, int[][] idToOrds, BytesRef[] termsArray, BytesRef prefixRef)
{
DocTermOrds dto = new DocTermOrds(r, r.LiveDocs, "field", prefixRef, int.MaxValue, TestUtil.NextInt(Random(), 2, 10));
FieldCache_Fields.Ints docIDToID = FieldCache_Fields.DEFAULT.GetInts(r, "id", false);
/*
for(int docID=0;docID<subR.MaxDoc();docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
*/
if (VERBOSE)
{
Console.WriteLine("TEST: verify prefix=" + (prefixRef == null ? "null" : prefixRef.Utf8ToString()));
Console.WriteLine("TEST: all TERMS:");
TermsEnum allTE = MultiFields.GetTerms(r, "field").Iterator(null);
int ord = 0;
while (allTE.Next() != null)
{
Console.WriteLine(" ord=" + (ord++) + " term=" + allTE.Term().Utf8ToString());
}
}
//final TermsEnum te = subR.Fields().Terms("field").iterator();
TermsEnum te = dto.GetOrdTermsEnum(r);
if (dto.NumTerms() == 0)
{
if (prefixRef == null)
{
Assert.IsNull(MultiFields.GetTerms(r, "field"));
}
else
{
Terms terms = MultiFields.GetTerms(r, "field");
if (terms != null)
{
TermsEnum termsEnum = terms.Iterator(null);
TermsEnum.SeekStatus result = termsEnum.SeekCeil(prefixRef);
if (result != TermsEnum.SeekStatus.END)
{
Assert.IsFalse(StringHelper.StartsWith(termsEnum.Term(), prefixRef), "term=" + termsEnum.Term().Utf8ToString() + " matches prefix=" + prefixRef.Utf8ToString());
}
else
{
// ok
}
}
else
{
// ok
}
}
return;
}
if (VERBOSE)
{
Console.WriteLine("TEST: TERMS:");
te.SeekExact(0);
while (true)
{
Console.WriteLine(" ord=" + te.Ord() + " term=" + te.Term().Utf8ToString());
if (te.Next() == null)
{
break;
}
}
}
SortedSetDocValues iter = dto.GetIterator(r);
for (int docID = 0; docID < r.MaxDoc(); docID++)
{
if (VERBOSE)
{
Console.WriteLine("TEST: docID=" + docID + " of " + r.MaxDoc() + " (id=" + docIDToID.Get(docID) + ")");
}
iter.Document = docID;
int[] answers = idToOrds[docIDToID.Get(docID)];
int upto = 0;
long ord;
while ((ord = iter.NextOrd()) != SortedSetDocValues.NO_MORE_ORDS)
{
te.SeekExact(ord);
BytesRef expected = termsArray[answers[upto++]];
if (VERBOSE)
{
Console.WriteLine(" exp=" + expected.Utf8ToString() + " actual=" + te.Term().Utf8ToString());
}
Assert.AreEqual(expected, te.Term(), "expected=" + expected.Utf8ToString() + " actual=" + te.Term().Utf8ToString() + " ord=" + ord);
}
Assert.AreEqual(answers.Length, upto);
}
}