本文整理汇总了C#中Lucene.Net.Index.IndexReader.NumDocs方法的典型用法代码示例。如果您正苦于以下问题:C# IndexReader.NumDocs方法的具体用法?C# IndexReader.NumDocs怎么用?C# IndexReader.NumDocs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.NumDocs方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Add
/// <summary>Add an IndexReader whose stored fields will not be returned. This can
/// accellerate search when stored fields are only needed from a subset of
/// the IndexReaders.
///
/// </summary>
/// <throws> IllegalArgumentException if not all indexes contain the same number </throws>
/// <summary> of documents
/// </summary>
/// <throws> IllegalArgumentException if not all indexes have the same value </throws>
/// <summary> of {@link IndexReader#MaxDoc()}
/// </summary>
public virtual void Add(IndexReader reader, bool ignoreStoredFields)
{
if (readers.Count == 0)
{
this.maxDoc = reader.MaxDoc();
this.numDocs = reader.NumDocs();
this.hasDeletions = reader.HasDeletions();
}
if (reader.MaxDoc() != maxDoc)
// check compatibility
throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
if (reader.NumDocs() != numDocs)
throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
System.Collections.IEnumerator i = reader.GetFieldNames(IndexReader.FieldOption.ALL).GetEnumerator();
while (i.MoveNext())
{
System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) i.Current;
// update fieldToReader map
System.String field = fi.Key.ToString();
if (fieldToReader[field] == null)
fieldToReader[field] = reader;
}
if (!ignoreStoredFields)
storedFieldReaders.Add(reader); // add to storedFieldReaders
readers.Add(reader);
}
示例2: GetIdfWeightedTerms
/// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
///
/// </summary>
/// <param name="query">Query to extract term texts from</param>
/// <param name="reader">used to compute IDF which can be used to a) score selected fragments better
/// b) use graded highlights eg chaning intensity of font color</param>
/// <param name="fieldName">the field on which Inverse Document Frequency (IDF) calculations are based</param>
/// <returns> an array of the terms used in a query, plus their weights.</returns>
public static WeightedTerm[] GetIdfWeightedTerms(Query query, IndexReader reader, string fieldName)
{
WeightedTerm[] terms = GetTerms(query, false, fieldName);
int totalNumDocs = reader.NumDocs();
foreach (WeightedTerm t in terms)
{
try
{
int docFreq = reader.DocFreq(new Term(fieldName, t.Term));
// docFreq counts deletes
if (totalNumDocs < docFreq)
{
docFreq = totalNumDocs;
}
//IDF algorithm taken from DefaultSimilarity class
var idf = (float)(Math.Log((float)totalNumDocs / (double)(docFreq + 1)) + 1.0);
t.Weight *= idf;
}
catch (IOException e)
{
//ignore
}
}
return terms;
}
示例3: GetDocIdSet
public override DocIdSet GetDocIdSet(IndexReader reader)
{
OpenBitSet bitSet = new OpenBitSet(reader.NumDocs());
TermDocs termDocs = reader.TermDocs(new Term("TenantId", _tenantId));
while (termDocs.Next())
{
if (termDocs.Freq > 0)
{
bitSet.Set(termDocs.Doc);
}
}
return bitSet;
}
示例4: Execute
public override IEnumerable<Row> Execute(IEnumerable<Row> rows)
{
if (_indexDirectory == null)
yield break;
try {
_reader = IndexReader.Open(_indexDirectory, true);
}
catch (Exception) {
Warn("Failed to open lucene index in {0}.", _indexDirectory.Directory.FullName);
yield break;
}
var docCount = _reader.NumDocs();
Info("Found {0} documents in lucene index.", docCount);
for (var i = 0; i < docCount; i++) {
if (_reader.IsDeleted(i))
continue;
var doc = _reader.Document(i);
var row = new Row();
foreach (var field in doc.GetFields().Where(field => field.IsStored)) {
switch (field.Name) {
case "dropped":
row[field.Name] = Convert.ToBoolean(field.StringValue);
break;
default:
row[field.Name] = field.StringValue;
break;
}
}
yield return row;
}
}
示例5: AddStopWords
/*
* Automatically adds stop words for the given field with terms exceeding the maxPercentDocs
*
* @param reader The {@link IndexReader} which will be consulted to identify potential stop words that
* exceed the required document frequency
* @param fieldName The field for which stopwords will be added
* @param maxPercentDocs The maximum percentage (between 0.0 and 1.0) of index documents which
* contain a term, after which the word is considered to be a stop word.
* @return The number of stop words identified.
* @throws IOException
*/
public int AddStopWords(IndexReader reader, String fieldName, float maxPercentDocs)
{
return AddStopWords(reader, fieldName, (int) (reader.NumDocs() * maxPercentDocs));
}
示例6: Add
/// <summary>Add an IndexReader whose stored fields will not be returned. This can
/// accellerate search when stored fields are only needed from a subset of
/// the IndexReaders.
///
/// </summary>
/// <throws> IllegalArgumentException if not all indexes contain the same number </throws>
/// <summary> of documents
/// </summary>
/// <throws> IllegalArgumentException if not all indexes have the same value </throws>
/// <summary> of {@link IndexReader#MaxDoc()}
/// </summary>
/// <throws> IOException if there is a low-level IO error </throws>
public virtual void Add(IndexReader reader, bool ignoreStoredFields)
{
EnsureOpen();
if (readers.Count == 0)
{
this.maxDoc = reader.MaxDoc();
this.numDocs = reader.NumDocs();
this.hasDeletions = reader.HasDeletions();
}
if (reader.MaxDoc() != maxDoc)
// check compatibility
throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
if (reader.NumDocs() != numDocs)
throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
System.Collections.Generic.ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
readerToFields[reader] = fields;
System.Collections.IEnumerator i = fields.GetEnumerator();
while (i.MoveNext())
{
// update fieldToReader map
System.String field = (System.String) i.Current;
if (fieldToReader[field] == null)
fieldToReader[field] = reader;
}
if (!ignoreStoredFields)
storedFieldReaders.Add(reader); // add to storedFieldReaders
readers.Add(reader);
if (incRefReaders)
{
reader.IncRef();
}
decrefOnClose.Add(incRefReaders);
}
示例7: SetItemCount
protected void SetItemCount (IndexReader reader)
{
last_item_count = reader.NumDocs ();
}
示例8: VerifyEquals
public static void VerifyEquals(IndexReader r1, IndexReader r2, System.String idField)
{
Assert.AreEqual(r1.NumDocs(), r2.NumDocs());
bool hasDeletes = !(r1.MaxDoc() == r2.MaxDoc() && r1.NumDocs() == r1.MaxDoc());
int[] r2r1 = new int[r2.MaxDoc()]; // r2 id to r1 id mapping
TermDocs termDocs1 = r1.TermDocs();
TermDocs termDocs2 = r2.TermDocs();
// create mapping from id2 space to id2 based on idField
idField = StringHelper.Intern(idField);
TermEnum termEnum = r1.Terms(new Term(idField, ""));
do
{
Term term = termEnum.Term();
if (term == null || (System.Object) term.Field() != (System.Object) idField)
break;
termDocs1.Seek(termEnum);
if (!termDocs1.Next())
{
// This doc is deleted and wasn't replaced
termDocs2.Seek(termEnum);
Assert.IsFalse(termDocs2.Next());
continue;
}
int id1 = termDocs1.Doc();
Assert.IsFalse(termDocs1.Next());
termDocs2.Seek(termEnum);
Assert.IsTrue(termDocs2.Next());
int id2 = termDocs2.Doc();
Assert.IsFalse(termDocs2.Next());
r2r1[id2] = id1;
// verify stored fields are equivalent
try
{
VerifyEquals(r1.Document(id1), r2.Document(id2));
}
catch (System.Exception t)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term=" + term);
System.Console.Out.WriteLine(" d1=" + r1.Document(id1));
System.Console.Out.WriteLine(" d2=" + r2.Document(id2));
throw t;
}
try
{
// verify term vectors are equivalent
VerifyEquals(r1.GetTermFreqVectors(id1), r2.GetTermFreqVectors(id2));
}
catch (System.Exception e)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2);
TermFreqVector[] tv1 = r1.GetTermFreqVectors(id1);
System.Console.Out.WriteLine(" d1=" + tv1);
if (tv1 != null)
for (int i = 0; i < tv1.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv1[i]);
}
TermFreqVector[] tv2 = r2.GetTermFreqVectors(id2);
System.Console.Out.WriteLine(" d2=" + tv2);
if (tv2 != null)
for (int i = 0; i < tv2.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv2[i]);
}
throw e;
}
}
while (termEnum.Next());
termEnum.Close();
// Verify postings
TermEnum termEnum1 = r1.Terms(new Term("", ""));
TermEnum termEnum2 = r2.Terms(new Term("", ""));
// pack both doc and freq into single element for easy sorting
long[] info1 = new long[r1.NumDocs()];
long[] info2 = new long[r2.NumDocs()];
for (; ; )
{
Term term1, term2;
// iterate until we get some docs
int len1;
for (; ; )
{
len1 = 0;
term1 = termEnum1.Term();
//.........这里部分代码省略.........
示例9: Add
/// <summary>Add an IndexReader whose stored fields will not be returned. This can
/// accellerate search when stored fields are only needed from a subset of
/// the IndexReaders.
///
/// </summary>
/// <throws> IllegalArgumentException if not all indexes contain the same number </throws>
/// <summary> of documents
/// </summary>
/// <throws> IllegalArgumentException if not all indexes have the same value </throws>
/// <summary> of <see cref="IndexReader.MaxDoc" />
/// </summary>
/// <throws> IOException if there is a low-level IO error </throws>
public virtual void Add(IndexReader reader, bool ignoreStoredFields)
{
EnsureOpen();
if (readers.Count == 0)
{
this.maxDoc = reader.MaxDoc;
this.numDocs = reader.NumDocs();
this.hasDeletions = reader.HasDeletions;
}
if (reader.MaxDoc != maxDoc)
// check compatibility
throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
if (reader.NumDocs() != numDocs)
throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
readerToFields[reader] = fields;
foreach(var field in fields)
{
// update fieldToReader map
// Do a containskey firt to mimic java behavior
if (!fieldToReader.ContainsKey(field) || fieldToReader[field] == null)
fieldToReader[field] = reader;
}
if (!ignoreStoredFields)
storedFieldReaders.Add(reader); // add to storedFieldReaders
readers.Add(reader);
if (incRefReaders)
{
reader.IncRef();
}
decrefOnClose.Add(incRefReaders);
}
示例10: AssertIndexEquals
public static void AssertIndexEquals(IndexReader index1, IndexReader index2)
{
Assert.AreEqual(index1.NumDocs(), index2.NumDocs(), "IndexReaders have different values for numDocs.");
Assert.AreEqual(index1.MaxDoc, index2.MaxDoc, "IndexReaders have different values for maxDoc.");
Assert.AreEqual(index1.HasDeletions, index2.HasDeletions, "Only one IndexReader has deletions.");
Assert.AreEqual(index1.IsOptimized(), index2.IsOptimized(), "Only one index is optimized.");
// check field names
System.Collections.Generic.ICollection<string> fieldsNames1 = index1.GetFieldNames(FieldOption.ALL);
System.Collections.Generic.ICollection<string> fieldsNames2 = index1.GetFieldNames(FieldOption.ALL);
System.Collections.Generic.ICollection<IFieldable> fields1 = null;
System.Collections.Generic.ICollection<IFieldable> fields2 = null;
Assert.AreEqual(fieldsNames1.Count, fieldsNames2.Count, "IndexReaders have different numbers of fields.");
System.Collections.IEnumerator it1 = fieldsNames1.GetEnumerator();
System.Collections.IEnumerator it2 = fieldsNames2.GetEnumerator();
while (it1.MoveNext() && it2.MoveNext())
{
Assert.AreEqual((System.String) it1.Current, (System.String) it2.Current, "Different field names.");
}
// check norms
it1 = fieldsNames1.GetEnumerator();
while (it1.MoveNext())
{
System.String curField = (System.String) it1.Current;
byte[] norms1 = index1.Norms(curField);
byte[] norms2 = index2.Norms(curField);
if (norms1 != null && norms2 != null)
{
Assert.AreEqual(norms1.Length, norms2.Length);
for (int i = 0; i < norms1.Length; i++)
{
Assert.AreEqual(norms1[i], norms2[i], "Norm different for doc " + i + " and field '" + curField + "'.");
}
}
else
{
Assert.AreSame(norms1, norms2);
}
}
// check deletions
for (int i = 0; i < index1.MaxDoc; i++)
{
Assert.AreEqual(index1.IsDeleted(i), index2.IsDeleted(i), "Doc " + i + " only deleted in one index.");
}
// check stored fields
for (int i = 0; i < index1.MaxDoc; i++)
{
if (!index1.IsDeleted(i))
{
Document doc1 = index1.Document(i);
Document doc2 = index2.Document(i);
fields1 = doc1.GetFields();
fields2 = doc2.GetFields();
Assert.AreEqual(fields1.Count, fields2.Count, "Different numbers of fields for doc " + i + ".");
it1 = fields1.GetEnumerator();
it2 = fields2.GetEnumerator();
while (it1.MoveNext() && it2.MoveNext())
{
Field curField1 = (Field) it1.Current;
Field curField2 = (Field) it2.Current;
Assert.AreEqual(curField1.Name, curField2.Name, "Different fields names for doc " + i + ".");
Assert.AreEqual(curField1.StringValue, curField2.StringValue, "Different field values for doc " + i + ".");
}
}
}
// check dictionary and posting lists
TermEnum enum1 = index1.Terms();
TermEnum enum2 = index2.Terms();
TermPositions tp1 = index1.TermPositions();
TermPositions tp2 = index2.TermPositions();
while (enum1.Next())
{
Assert.IsTrue(enum2.Next());
Assert.AreEqual(enum1.Term, enum2.Term, "Different term in dictionary.");
tp1.Seek(enum1.Term);
tp2.Seek(enum1.Term);
while (tp1.Next())
{
Assert.IsTrue(tp2.Next());
Assert.AreEqual(tp1.Doc, tp2.Doc, "Different doc id in postinglist of term " + enum1.Term + ".");
Assert.AreEqual(tp1.Freq, tp2.Freq, "Different term frequence in postinglist of term " + enum1.Term + ".");
for (int i = 0; i < tp1.Freq; i++)
{
Assert.AreEqual(tp1.NextPosition(), tp2.NextPosition(), "Different positions in postinglist of term " + enum1.Term + ".");
}
}
}
}
示例11: DoExecute
protected override SearchResult DoExecute(Query query, bool allVersions, IndexReader idxReader, Stopwatch timer)
{
var numDocs = idxReader.NumDocs();
var start = this.LucQuery.Skip;
var maxtop = numDocs - start;
if (maxtop < 1)
return SearchResult.Empty;
var user = this.LucQuery.User;
var currentUser = AccessProvider.Current.GetCurrentUser();
if (user == null)
user = currentUser;
var isCurrentUser = user.Id == currentUser.Id;
int top = this.LucQuery.Top != 0 ? this.LucQuery.Top : this.LucQuery.PageSize;
if (top == 0)
top = int.MaxValue;
var searcher = new IndexSearcher(idxReader);
var p = new SearchParams
{
query = query,
allVersions = allVersions,
searcher = searcher,
user = user,
isCurrentUser = isCurrentUser,
skip = start,
timer = timer,
top = top
};
SearchResult r = null;
SearchResult r1 = null;
try
{
var defaultTops = SenseNet.ContentRepository.Storage.StorageContext.Search.DefaultTopAndGrowth;
var howManyList = new List<int>(defaultTops);
if (howManyList[howManyList.Count - 1] == 0)
howManyList[howManyList.Count - 1] = int.MaxValue;
if (top < int.MaxValue)
{
var howMany = (top < int.MaxValue / 2) ? top * 2 : int.MaxValue; // numDocs; // * 4; // * 2;
if ((long)howMany > maxtop)
howMany = maxtop - start;
while (howManyList.Count > 0)
{
if (howMany < howManyList[0])
break;
howManyList.RemoveAt(0);
}
howManyList.Insert(0, howMany);
}
for (var i = 0; i < howManyList.Count; i++)
{
var defaultTop = howManyList[i];
if (defaultTop == 0)
defaultTop = numDocs;
p.howMany = defaultTop;
p.useHowMany = i < howManyList.Count - 1;
var maxSize = i == 0 ? numDocs : r.totalCount;
p.collectorSize = Math.Min(defaultTop, maxSize - p.skip) + p.skip;
r1 = Search(p);
if (i == 0)
r = r1;
else
r.Add(r1);
p.skip += r.nextIndex;
p.top = top - r.result.Count;
if (r.result.Count >= top || r.result.Count >= r.totalCount)
break;
}
p.timer.Stop();
return r;
}
finally
{
if (searcher != null)
{
searcher.Close();
searcher = null;
}
}
}
示例12: BuildSearchers
// Returns true if there are docs to search and creates the readers and searchers
// in that case. Otherwise, returns false.
private bool BuildSearchers (out IndexReader primary_reader,
out LNS.IndexSearcher primary_searcher,
out IndexReader secondary_reader,
out LNS.IndexSearcher secondary_searcher)
{
primary_searcher = null;
secondary_reader = null;
secondary_searcher = null;
primary_reader = LuceneCommon.GetReader (PrimaryStore);
if (primary_reader.NumDocs() == 0) {
ReleaseReader (primary_reader);
primary_reader = null;
return false;
}
primary_searcher = new LNS.IndexSearcher (primary_reader);
if (SecondaryStore != null) {
secondary_reader = LuceneCommon.GetReader (SecondaryStore);
if (secondary_reader.NumDocs () == 0) {
ReleaseReader (secondary_reader);
secondary_reader = null;
}
}
if (secondary_reader != null)
secondary_searcher = new LNS.IndexSearcher (secondary_reader);
return true;
}