本文整理汇总了C#中Lucene.Net.Index.IndexReader.TermDocs方法的典型用法代码示例。如果您正苦于以下问题:C# IndexReader.TermDocs方法的具体用法?C# IndexReader.TermDocs怎么用?C# IndexReader.TermDocs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.TermDocs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MatchAllScorer
internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w, byte[] norms):base(similarity)
{
InitBlock(enclosingInstance);
this.termDocs = reader.TermDocs(null);
score = w.Value;
this.norms = norms;
}
示例2: Bits
public override BitArray Bits(IndexReader reader)
{
BitArray bitArray = new BitArray(reader.MaxDoc());
TermDocs termDocs = reader.TermDocs(new Term("score", "5"));
while (termDocs.Next())
{
bitArray.Set(termDocs.Doc(), true);
}
return bitArray;
}
示例3: Count
public static int Count(Term t, IndexReader r)
{
int count = 0;
TermDocs td = r.TermDocs(t);
while (td.Next())
{
td.Doc();
count++;
}
td.Close();
return count;
}
示例4: GetDocIdSet
public override DocIdSet GetDocIdSet(IndexReader reader)
{
OpenBitSet bitSet = new OpenBitSet(reader.NumDocs());
TermDocs termDocs = reader.TermDocs(new Term("TenantId", _tenantId));
while (termDocs.Next())
{
if (termDocs.Freq > 0)
{
bitSet.Set(termDocs.Doc);
}
}
return bitSet;
}
示例5: GetDocIdSet
/// <summary>
/// Get the DocIdSet.
/// </summary>
/// <param name="reader">Applcible reader.</param>
/// <returns>The set.</returns>
public override DocIdSet GetDocIdSet(IndexReader reader)
{
OpenBitSet result = new OpenBitSet(reader.MaxDoc());
TermDocs td = reader.TermDocs();
try
{
foreach (Term t in this.terms)
{
td.Seek(t);
while (td.Next())
{
result.Set(td.Doc());
}
}
}
finally
{
td.Close();
}
return result;
}
示例6: FillCache
private static Dictionary<string, int[]> FillCache(IndexReader reader, int docBase, string field)
{
using (var termDocs = reader.TermDocs())
{
var items = new Dictionary<string, int[]>();
var docsForTerm = new List<int>();
using (var termEnum = reader.Terms(new Term(field)))
{
do
{
if (termEnum.Term == null || field != termEnum.Term.Field)
break;
Term term = termEnum.Term;
if (LowPrecisionNumber(term.Field, term.Text))
continue;
var totalDocCountIncludedDeletes = termEnum.DocFreq();
termDocs.Seek(termEnum.Term);
while (termDocs.Next() && totalDocCountIncludedDeletes > 0)
{
var curDoc = termDocs.Doc;
totalDocCountIncludedDeletes -= 1;
if (reader.IsDeleted(curDoc))
continue;
docsForTerm.Add(curDoc + docBase);
}
docsForTerm.Sort();
items[term.Text] = docsForTerm.ToArray();
docsForTerm.Clear();
} while (termEnum.Next());
}
return items;
}
}
示例7: GetDocIdSet
public override DocIdSet GetDocIdSet(IndexReader reader)
{
var bits = new OpenBitSet(reader.MaxDoc());
TermDocs termDocs = reader.TermDocs();
List<double> area = _shape.Area;
int sz = area.Count;
// iterate through each boxid
for (int i = 0; i < sz; i++)
{
double boxId = area[i];
termDocs.Seek(new Term(_fieldName, NumericUtils.DoubleToPrefixCoded(boxId)));
// iterate through all documents
// which have this boxId
while (termDocs.Next())
{
bits.FastSet(termDocs.Doc());
}
}
return bits;
}
示例8: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
// Delete by term
IEnumerator<KeyValuePair<object, object>> iter = deletesFlushed.terms.GetEnumerator();
while (iter.MoveNext())
{
KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
Term term = (Term)entry.Key;
TermDocs docs = reader.TermDocs(term);
if (docs != null)
{
int limit = ((BufferedDeletes.Num)entry.Value).GetNum();
try
{
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
finally
{
docs.Close();
}
}
}
// Delete by docID
IEnumerator<object> iter2 = deletesFlushed.docIDs.GetEnumerator();
while (iter2.MoveNext())
{
int docID = (int)iter2.Current;
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = deletesFlushed.queries.GetEnumerator();
while (iter.MoveNext())
{
KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
Query query = (Query)entry.Key;
int limit = (int)entry.Value;
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader);
while (scorer.Next())
{
int docID = scorer.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
searcher.Close();
return any;
}
}
示例9: TermDocs
protected internal virtual TermDocs TermDocs(IndexReader reader)
{
return reader.TermDocs();
}
示例10: ReadAllEntriesFromIndex
public static RavenJObject[] ReadAllEntriesFromIndex(IndexReader reader)
{
if (reader.MaxDoc > 512 * 1024)
{
throw new InvalidOperationException("Refusing to extract all index entires from an index with " + reader.MaxDoc +
" entries, because of the probable time / memory costs associated with that." +
Environment.NewLine +
"Viewing Index Entries are a debug tool, and should not be used on indexes of this size. You might want to try Luke, instead.");
}
var results = new RavenJObject[reader.MaxDoc];
using (var termDocs = reader.TermDocs())
using (var termEnum = reader.Terms())
{
while (termEnum.Next())
{
var term = termEnum.Term;
if (term == null)
break;
var text = term.Text;
termDocs.Seek(termEnum);
for (int i = 0; i < termEnum.DocFreq() && termDocs.Next(); i++)
{
RavenJObject result = results[termDocs.Doc];
if (result == null)
results[termDocs.Doc] = result = new RavenJObject();
var propertyName = term.Field;
if (propertyName.EndsWith("_ConvertToJson") ||
propertyName.EndsWith("_IsArray"))
continue;
if (result.ContainsKey(propertyName))
{
switch (result[propertyName].Type)
{
case JTokenType.Array:
((RavenJArray)result[propertyName]).Add(text);
break;
case JTokenType.String:
result[propertyName] = new RavenJArray
{
result[propertyName],
text
};
break;
default:
throw new ArgumentException("No idea how to handle " + result[propertyName].Type);
}
}
else
{
result[propertyName] = text;
}
}
}
}
return results;
}
示例11: FindRecentResults
private static ArrayList FindRecentResults (IndexReader primary_reader,
IndexReader secondary_reader,
BetterBitArray primary_matches,
Dictionary<int, Hit> hits_by_id,
int max_results,
ref int total_number_of_matches,
HitFilter hit_filter,
string index_name)
{
Stopwatch b = new Stopwatch ();
b.Start ();
int count = 0;
Document doc;
ArrayList all_docs = null;
TopScores top_docs = null;
TermDocs term_docs = null;
if (primary_matches.TrueCount > max_results)
top_docs = new TopScores (max_results);
else
all_docs = new ArrayList (primary_matches.TrueCount);
if (secondary_reader != null)
term_docs = secondary_reader.TermDocs ();
for (int match_index = primary_matches.Count; ; match_index --) {
// Walk across the matches backwards, since newer
// documents are more likely to be at the end of
// the index.
match_index = primary_matches.GetPreviousTrueIndex (match_index);
if (match_index < 0)
break;
count++;
doc = primary_reader.Document (match_index, fields_timestamp_uri);
// Check the timestamp --- if we have already reached our
// limit, we might be able to reject it immediately.
string timestamp_str;
long timestamp_num = 0;
timestamp_str = doc.Get ("Timestamp");
if (timestamp_str == null) {
Logger.Log.Warn ("No timestamp on {0}!", GetUriFromDocument (doc));
} else {
timestamp_num = Int64.Parse (doc.Get ("Timestamp"));
if (top_docs != null && ! top_docs.WillAccept (timestamp_num))
continue;
}
// Get the actual hit now
// doc was created with only 2 fields, so first get the complete lucene document for primary document.
// Also run our hit_filter now, if we have one. Since we insist of returning max_results
// most recent hits, any hits that would be filtered out should happen now and not later.
Hit hit = CreateHit (primary_reader.Document (match_index), secondary_reader, term_docs);
if (hit_filter != null && ! hit_filter (hit)) {
if (Debug)
Log.Debug ("Filtered out {0}", hit.Uri);
total_number_of_matches --;
continue;
}
hits_by_id [match_index] = hit;
// Add the document to the appropriate data structure.
// We use the timestamp_num as the score, so high
// scores correspond to more-recent timestamps.
if (all_docs != null)
all_docs.Add (hit);
else
top_docs.Add (timestamp_num, hit);
}
if (term_docs != null)
term_docs.Close ();
b.Stop ();
if (Debug)
Log.Debug (">>> {0}: Instantiated and scanned {1} documents in {2}", index_name, count, b);
if (all_docs != null) {
// Sort results before sending
all_docs.Sort ();
return all_docs;
} else {
return top_docs.TopScoringObjects;
}
}
示例12: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
// Delete by term
//System.Collections.IEnumerator iter = new System.Collections.Hashtable(deletesFlushed.terms).GetEnumerator();
System.Collections.IEnumerator iter = deletesFlushed.terms.GetEnumerator();
TermDocs docs = reader.TermDocs();
try
{
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Term term = (Term) entry.Key;
// LUCENE-2086: we should be iterating a TreeMap,
// here, so terms better be in order:
System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
docs.Seek(term);
int limit = ((BufferedDeletes.Num) entry.Value).GetNum();
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
}
finally
{
docs.Close();
}
// Delete by docID
iter = deletesFlushed.docIDs.GetEnumerator();
while (iter.MoveNext())
{
int docID = ((System.Int32) iter.Current);
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = new System.Collections.Hashtable(deletesFlushed.queries).GetEnumerator();
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Query query = (Query) entry.Key;
int limit = ((System.Int32) entry.Value);
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader, true, false);
if (scorer != null)
{
while (true)
{
int doc = scorer.NextDoc();
if (((long) docIDStart) + doc >= limit)
break;
reader.DeleteDocument(doc);
any = true;
}
}
}
searcher.Close();
return any;
}
}
示例13: ApplyDeletesSelectively
// Apply buffered delete terms to the segment just flushed from ram
// apply appropriately so that a delete term is only applied to
// the documents buffered before it, not those buffered after it.
private void ApplyDeletesSelectively(System.Collections.Hashtable deleteTerms, System.Collections.IList deleteIds, IndexReader reader)
{
System.Collections.IEnumerator iter = new System.Collections.Hashtable(deleteTerms).GetEnumerator();
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Term term = (Term) entry.Key;
TermDocs docs = reader.TermDocs(term);
if (docs != null)
{
int num = ((DocumentsWriter.Num) entry.Value).GetNum();
try
{
while (docs.Next())
{
int doc = docs.Doc();
if (doc >= num)
{
break;
}
reader.DeleteDocument(doc);
}
}
finally
{
docs.Close();
}
}
}
if (deleteIds.Count > 0)
{
iter = deleteIds.GetEnumerator();
while (iter.MoveNext())
{
reader.DeleteDocument(((System.Int32) iter.Current));
}
}
}
示例14: Load
public virtual void Load(string fieldName, IndexReader reader, TermListFactory listFactory)
{
string field = string.Intern(fieldName);
int maxDoc = reader.MaxDoc;
if (orderArray == null) // we want to reuse the memory
{
orderArray = NewInstance(termCountSize, maxDoc);
}
else
{
orderArray.EnsureCapacity(maxDoc); // no need to fill to 0, we are reseting the data anyway
}
List<int> minIDList = new List<int>();
List<int> maxIDList = new List<int>();
List<int> freqList = new List<int>();
int length = maxDoc + 1;
ITermValueList list = listFactory == null ? new TermStringList() : listFactory.CreateTermList();
TermDocs termDocs = reader.TermDocs();
TermEnum termEnum = reader.Terms(new Term(field));
int t = 0; // current term number
list.Add(null);
minIDList.Add(-1);
maxIDList.Add(-1);
freqList.Add(0);
//int df = 0;
t++;
try
{
do
{
Term term = termEnum.Term;
if (term == null || string.CompareOrdinal(term.Field, field) != 0)
break;
if (t >= orderArray.MaxValue())
{
throw new System.IO.IOException("maximum number of value cannot exceed: " + orderArray.MaxValue());
}
// Alexey: well, we could get now more than one term per document. Effectively, we could build facet againsts tokenized field
/*// we expect that there is at most one term per document
if (t >= length)
{
throw new RuntimeException("there are more terms than " + "documents in field \"" + field + "\", but it's impossible to sort on " + "tokenized fields");
}*/
// store term text
list.Add(term.Text);
termDocs.Seek(termEnum);
// freqList.add(termEnum.docFreq()); // doesn't take into account deldocs
int minID = -1;
int maxID = -1;
int df = 0;
if (termDocs.Next())
{
df++;
int docid = termDocs.Doc;
orderArray.Add(docid, t);
minID = docid;
while (termDocs.Next())
{
df++;
docid = termDocs.Doc;
orderArray.Add(docid, t);
}
maxID = docid;
}
freqList.Add(df);
minIDList.Add(minID);
maxIDList.Add(maxID);
t++;
} while (termEnum.Next());
}
finally
{
termDocs.Dispose();
termEnum.Dispose();
}
list.Seal();
this.valArray = list;
this.freqs = freqList.ToArray();
this.minIDs = minIDList.ToArray();
this.maxIDs = maxIDList.ToArray();
}
示例15: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
// Delete by term
TermDocs docs = reader.TermDocs();
try
{
foreach(KeyValuePair<Term,BufferedDeletes.Num> entry in deletesFlushed.terms)
{
Term term = entry.Key;
// LUCENE-2086: we should be iterating a TreeMap,
// here, so terms better be in order:
System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
docs.Seek(term);
int limit = entry.Value.GetNum();
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
}
finally
{
docs.Close();
}
// Delete by docID
foreach(int docID in deletesFlushed.docIDs)
{
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
foreach(KeyValuePair<Query,int> entry in new Support.Dictionary<Query,int>(deletesFlushed.queries))
{
Query query = entry.Key;
int limit = entry.Value;
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader, true, false);
if (scorer != null)
{
while (true)
{
int doc = scorer.NextDoc();
if (((long) docIDStart) + doc >= limit)
break;
reader.DeleteDocument(doc);
any = true;
}
}
}
searcher.Close();
return any;
}
}