本文整理汇总了C#中Lucene.Net.Index.IndexReader.MaxDoc方法的典型用法代码示例。如果您正苦于以下问题:C# IndexReader.MaxDoc方法的具体用法?C# IndexReader.MaxDoc怎么用?C# IndexReader.MaxDoc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.MaxDoc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Add
/// <summary>Add an IndexReader whose stored fields will not be returned. This can
/// accellerate search when stored fields are only needed from a subset of
/// the IndexReaders.
///
/// </summary>
/// <throws> IllegalArgumentException if not all indexes contain the same number </throws>
/// <summary> of documents
/// </summary>
/// <throws> IllegalArgumentException if not all indexes have the same value </throws>
/// <summary> of {@link IndexReader#MaxDoc()}
/// </summary>
public virtual void Add(IndexReader reader, bool ignoreStoredFields)
{
if (readers.Count == 0)
{
this.maxDoc = reader.MaxDoc();
this.numDocs = reader.NumDocs();
this.hasDeletions = reader.HasDeletions();
}
if (reader.MaxDoc() != maxDoc)
// check compatibility
throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
if (reader.NumDocs() != numDocs)
throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
System.Collections.IEnumerator i = reader.GetFieldNames(IndexReader.FieldOption.ALL).GetEnumerator();
while (i.MoveNext())
{
System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) i.Current;
// update fieldToReader map
System.String field = fi.Key.ToString();
if (fieldToReader[field] == null)
fieldToReader[field] = reader;
}
if (!ignoreStoredFields)
storedFieldReaders.Add(reader); // add to storedFieldReaders
readers.Add(reader);
}
示例2: TermToBitSet
public OpenBitSetDISI TermToBitSet(string term, IndexReader indexReader)
{
var facetQuery = new TermQuery(new Term(this.Field, term));
var facetQueryFilter = new CachingWrapperFilter(new QueryWrapperFilter(facetQuery));
var bitSet = new OpenBitSetDISI(facetQueryFilter.GetDocIdSet(indexReader).Iterator(), indexReader.MaxDoc());
return bitSet;
}
示例3: SetNextReader
public override void SetNextReader(IndexReader reader, int docBase)
{
currentReaderValues = new int[reader.MaxDoc()];
for (int i = 0; i < currentReaderValues.Length; i++)
{
currentReaderValues[i] = random.Next();
}
}
示例4: GetDocIdSet
public override DocIdSet GetDocIdSet(IndexReader reader)
{
double[] latIndex = FieldCache_Fields.DEFAULT.GetDoubles(reader, _latField);
double[] lngIndex = FieldCache_Fields.DEFAULT.GetDoubles(reader, _lngField);
int docBase = NextDocBase;
NextDocBase += reader.MaxDoc();
return new LatLongFilteredDocIdSet(StartingFilter.GetDocIdSet(reader), latIndex, lngIndex, DistanceLookupCache, _lat, _lng, Distance, docBase, Distances);
}
示例5: Bits
public override BitArray Bits(IndexReader reader)
{
BitArray bitArray = new BitArray(reader.MaxDoc());
TermDocs termDocs = reader.TermDocs(new Term("score", "5"));
while (termDocs.Next())
{
bitArray.Set(termDocs.Doc(), true);
}
return bitArray;
}
示例6: Bits
public override BitArray Bits(IndexReader reader)
{
if (done)
{
throw new NotSupportedException("Called twice");
}
BitArray bitArray = new BitArray(reader.MaxDoc());
done = true;
return bitArray;
}
示例7: FindMatchesInQuery
private List<FacetMatch> FindMatchesInQuery(Facet facet, Filter query, Filter filter, IndexReader indexReader)
{
var matches = facet.Values.Select(value =>
{
var bitsQuery = new OpenBitSetDISI(query.GetDocIdSet(indexReader).Iterator(), indexReader.MaxDoc());
bitsQuery.And(value.Item2);
if (filter != null)
{
//TODO: Remove this hard coded value (1000)
var bitsFilter = new OpenBitSetDISI(filter.GetDocIdSet(indexReader).Iterator(), 1000);
bitsQuery.And(bitsFilter);
}
var count = bitsQuery.Cardinality();
return new FacetMatch() { Count = count, Value = value.Item1, Id = facet.Id };
}).ToList();
return matches;
}
示例8: GetDocIdSet
/// <summary>
/// Get the DocIdSet.
/// </summary>
/// <param name="reader">Applcible reader.</param>
/// <returns>The set.</returns>
public override DocIdSet GetDocIdSet(IndexReader reader)
{
OpenBitSet result = new OpenBitSet(reader.MaxDoc());
TermDocs td = reader.TermDocs();
try
{
foreach (Term t in this.terms)
{
td.Seek(t);
while (td.Next())
{
result.Set(td.Doc());
}
}
}
finally
{
td.Close();
}
return result;
}
示例9: CorrectBits
private OpenBitSet CorrectBits(IndexReader reader)
{
OpenBitSet bits = new OpenBitSet(reader.MaxDoc()); //assume all are INvalid
Term startTerm = new Term(fieldName);
TermEnum te = reader.Terms(startTerm);
if (te != null)
{
Term currTerm = te.Term();
while ((currTerm != null) && (currTerm.Field() == startTerm.Field())) //term fieldnames are interned
{
int lastDoc = -1;
//set non duplicates
TermDocs td = reader.TermDocs(currTerm);
if (td.Next())
{
if (keepMode == KM_USE_FIRST_OCCURRENCE)
{
bits.Set(td.Doc());
}
else
{
do
{
lastDoc = td.Doc();
} while (td.Next());
bits.Set(lastDoc);
}
}
if (!te.Next())
{
break;
}
currTerm = te.Term();
}
}
return bits;
}
示例10: GetDocIdSet
public override DocIdSet GetDocIdSet(IndexReader reader)
{
var bits = new OpenBitSet(reader.MaxDoc());
TermDocs termDocs = reader.TermDocs();
List<double> area = _shape.Area;
int sz = area.Count;
// iterate through each boxid
for (int i = 0; i < sz; i++)
{
double boxId = area[i];
termDocs.Seek(new Term(_fieldName, NumericUtils.DoubleToPrefixCoded(boxId)));
// iterate through all documents
// which have this boxId
while (termDocs.Next())
{
bits.FastSet(termDocs.Doc());
}
}
return bits;
}
示例11: CopyFieldsWithDeletions
private int CopyFieldsWithDeletions(FieldSelector fieldSelectorMerge, FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
{
int docCount = 0;
int maxDoc = reader.MaxDoc();
if (matchingFieldsReader != null)
{
// We can bulk-copy because the fieldInfos are "congruent"
for (int j = 0; j < maxDoc; )
{
if (reader.IsDeleted(j))
{
// skip deleted docs
++j;
continue;
}
// We can optimize this case (doing a bulk byte copy) since the field
// numbers are identical
int start = j, numDocs = 0;
do
{
j++;
numDocs++;
if (j >= maxDoc)
break;
if (reader.IsDeleted(j))
{
j++;
break;
}
}
while (numDocs < MAX_RAW_MERGE_DOCS);
IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
docCount += numDocs;
checkAbort.Work(300 * numDocs);
}
}
else
{
for (int j = 0; j < maxDoc; j++)
{
if (reader.IsDeleted(j))
{
// skip deleted docs
continue;
}
// NOTE: it's very important to first assign to doc then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
Document doc = reader.Document(j, fieldSelectorMerge);
fieldsWriter.AddDocument(doc);
docCount++;
checkAbort.Work(300);
}
}
return docCount;
}
示例12: VerifyEquals
public static void VerifyEquals(IndexReader r1, IndexReader r2, System.String idField)
{
Assert.AreEqual(r1.NumDocs(), r2.NumDocs());
bool hasDeletes = !(r1.MaxDoc() == r2.MaxDoc() && r1.NumDocs() == r1.MaxDoc());
int[] r2r1 = new int[r2.MaxDoc()]; // r2 id to r1 id mapping
TermDocs termDocs1 = r1.TermDocs();
TermDocs termDocs2 = r2.TermDocs();
// create mapping from id2 space to id2 based on idField
idField = StringHelper.Intern(idField);
TermEnum termEnum = r1.Terms(new Term(idField, ""));
do
{
Term term = termEnum.Term();
if (term == null || (System.Object) term.Field() != (System.Object) idField)
break;
termDocs1.Seek(termEnum);
if (!termDocs1.Next())
{
// This doc is deleted and wasn't replaced
termDocs2.Seek(termEnum);
Assert.IsFalse(termDocs2.Next());
continue;
}
int id1 = termDocs1.Doc();
Assert.IsFalse(termDocs1.Next());
termDocs2.Seek(termEnum);
Assert.IsTrue(termDocs2.Next());
int id2 = termDocs2.Doc();
Assert.IsFalse(termDocs2.Next());
r2r1[id2] = id1;
// verify stored fields are equivalent
try
{
VerifyEquals(r1.Document(id1), r2.Document(id2));
}
catch (System.Exception t)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term=" + term);
System.Console.Out.WriteLine(" d1=" + r1.Document(id1));
System.Console.Out.WriteLine(" d2=" + r2.Document(id2));
throw t;
}
try
{
// verify term vectors are equivalent
VerifyEquals(r1.GetTermFreqVectors(id1), r2.GetTermFreqVectors(id2));
}
catch (System.Exception e)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2);
TermFreqVector[] tv1 = r1.GetTermFreqVectors(id1);
System.Console.Out.WriteLine(" d1=" + tv1);
if (tv1 != null)
for (int i = 0; i < tv1.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv1[i]);
}
TermFreqVector[] tv2 = r2.GetTermFreqVectors(id2);
System.Console.Out.WriteLine(" d2=" + tv2);
if (tv2 != null)
for (int i = 0; i < tv2.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv2[i]);
}
throw e;
}
}
while (termEnum.Next());
termEnum.Close();
// Verify postings
TermEnum termEnum1 = r1.Terms(new Term("", ""));
TermEnum termEnum2 = r2.Terms(new Term("", ""));
// pack both doc and freq into single element for easy sorting
long[] info1 = new long[r1.NumDocs()];
long[] info2 = new long[r2.NumDocs()];
for (; ; )
{
Term term1, term2;
// iterate until we get some docs
int len1;
for (; ; )
{
len1 = 0;
term1 = termEnum1.Term();
//.........这里部分代码省略.........
示例13: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
// Delete by term
//System.Collections.IEnumerator iter = new System.Collections.Hashtable(deletesFlushed.terms).GetEnumerator();
System.Collections.IEnumerator iter = deletesFlushed.terms.GetEnumerator();
TermDocs docs = reader.TermDocs();
try
{
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Term term = (Term) entry.Key;
// LUCENE-2086: we should be iterating a TreeMap,
// here, so terms better be in order:
System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
docs.Seek(term);
int limit = ((BufferedDeletes.Num) entry.Value).GetNum();
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
}
finally
{
docs.Close();
}
// Delete by docID
iter = deletesFlushed.docIDs.GetEnumerator();
while (iter.MoveNext())
{
int docID = ((System.Int32) iter.Current);
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = new System.Collections.Hashtable(deletesFlushed.queries).GetEnumerator();
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Query query = (Query) entry.Key;
int limit = ((System.Int32) entry.Value);
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader, true, false);
if (scorer != null)
{
while (true)
{
int doc = scorer.NextDoc();
if (((long) docIDStart) + doc >= limit)
break;
reader.DeleteDocument(doc);
any = true;
}
}
}
searcher.Close();
return any;
}
}
示例14: ComputeDistances
protected internal double[] ComputeDistances(IndexReader reader)
{
double[] retArray = null;
var termDocs = reader.TermDocs();
var termEnum = reader.Terms(new Term(Constants.SpatialShapeFieldName));
try
{
do
{
Term term = termEnum.Term();
if (term == null)
break;
Debug.Assert(Constants.SpatialShapeFieldName.Equals(term.Field()));
Shape termval;
try
{
termval = SpatialIndex.RavenSpatialContext.ReadShape(term.Text()); // read shape
}
catch (InvalidShapeException)
{
continue;
}
var pt = termval as Point;
if (pt == null)
continue;
var distance = SpatialIndex.RavenSpatialContext.GetDistCalc().Distance(pt, originPt);
if (retArray == null)
// late init
retArray = new double[reader.MaxDoc()];
termDocs.Seek(termEnum);
while (termDocs.Next())
{
retArray[termDocs.Doc()] = distance;
}
} while (termEnum.Next());
}
finally
{
termDocs.Close();
termEnum.Close();
}
return retArray ?? new double[reader.MaxDoc()];
}
示例15: GetLiveDocs
/// <summary>
/// Returns a single <seealso cref="Bits"/> instance for this
/// reader, merging live Documents on the
/// fly. this method will return null if the reader
/// has no deletions.
///
/// <p><b>NOTE</b>: this is a very slow way to access live docs.
/// For example, each Bits access will require a binary search.
/// It's better to get the sub-readers and iterate through them
/// yourself.
/// </summary>
public static Bits GetLiveDocs(IndexReader reader)
{
if (reader.HasDeletions())
{
IList<AtomicReaderContext> leaves = reader.Leaves();
int size = leaves.Count;
Debug.Assert(size > 0, "A reader with deletions must have at least one leave");
if (size == 1)
{
return leaves[0].AtomicReader.LiveDocs;
}
Bits[] liveDocs = new Bits[size];
int[] starts = new int[size + 1];
for (int i = 0; i < size; i++)
{
// record all liveDocs, even if they are null
AtomicReaderContext ctx = leaves[i];
liveDocs[i] = ctx.AtomicReader.LiveDocs;
starts[i] = ctx.DocBase;
}
starts[size] = reader.MaxDoc();
return new MultiBits(liveDocs, starts, true);
}
else
{
return null;
}
}