本文整理汇总了C#中Lucene.Net.Index.IndexReader.Document方法的典型用法代码示例。如果您正苦于以下问题:C# IndexReader.Document方法的具体用法?C# IndexReader.Document怎么用?C# IndexReader.Document使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexReader
的用法示例。
在下文中一共展示了IndexReader.Document方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Generate
public override IComparable Generate(IndexReader reader, int doc)
{
var ravenDoc = reader.Document(doc);
var payingTagField = ravenDoc.GetField("PayingTag_" + CustomTagId);
var queriedPayingTag = payingTagField != null && Boolean.Parse(payingTagField.StringValue);
var tagValue = Int32.Parse(ravenDoc.GetField("TagId").StringValue);
var pointsValue = Int32.Parse(ravenDoc.GetField("Points").StringValue);
CustomerDocumentOrderWithRandomEffect.OrderCategory cat;
if (tagValue == CustomTagId && queriedPayingTag )
{
cat = CustomerDocumentOrderWithRandomEffect.OrderCategory.TagAndPaying;
}
else if (queriedPayingTag)
{
cat = CustomerDocumentOrderWithRandomEffect.OrderCategory.OnlyPaying;
}
else if (tagValue == CustomTagId )
{
cat = CustomerDocumentOrderWithRandomEffect.OrderCategory.OnlyTag;
}
else
{
cat = CustomerDocumentOrderWithRandomEffect.OrderCategory.NoneOfTheAbove;
}
return new CustomerDocumentOrderWithRandomEffect()
{
Category = cat,
Points = pointsValue
};
}
示例2: MakeLatestVersionLookupPerReader
static void MakeLatestVersionLookupPerReader(IDictionary<string, Tuple<NuGetVersion, string, int>> lookup, IndexReader reader, string readerName, bool includePrerelease, bool includeUnlisted)
{
for (int n = 0; n < reader.MaxDoc; n++)
{
if (reader.IsDeleted(n))
{
continue;
}
Document document = reader.Document(n);
NuGetVersion version = GetVersion(document);
if (version == null)
{
continue;
}
bool isListed = GetListed(document);
if (isListed || includeUnlisted)
{
if (!version.IsPrerelease || includePrerelease)
{
string id = GetId(document);
if (id == null)
{
continue;
}
Tuple<NuGetVersion, string, int> existingVersion;
if (lookup.TryGetValue(id, out existingVersion))
{
if (version > existingVersion.Item1)
{
lookup[id] = Tuple.Create(version, readerName, n);
}
}
else
{
lookup.Add(id, Tuple.Create(version, readerName, n));
}
}
}
}
}
示例3: ProcessReader
void ProcessReader(IndexReader indexReader, string readerName, ref int perIndexDocumentNumber)
{
for (int perReaderDocumentNumber = 0; perReaderDocumentNumber < indexReader.MaxDoc; perReaderDocumentNumber++)
{
if (indexReader.IsDeleted(perReaderDocumentNumber))
{
ProcessDocument(indexReader, readerName, perReaderDocumentNumber, perIndexDocumentNumber, null, isDelete: true);
}
else
{
Document document = indexReader.Document(perReaderDocumentNumber);
ProcessDocument(indexReader, readerName, perReaderDocumentNumber, perIndexDocumentNumber, document, isDelete: false);
}
perIndexDocumentNumber++;
}
}
示例4: Execute
public override IEnumerable<Row> Execute(IEnumerable<Row> rows)
{
if (_indexDirectory == null)
yield break;
try {
_reader = IndexReader.Open(_indexDirectory, true);
}
catch (Exception) {
Warn("Failed to open lucene index in {0}.", _indexDirectory.Directory.FullName);
yield break;
}
var docCount = _reader.NumDocs();
Info("Found {0} documents in lucene index.", docCount);
for (var i = 0; i < docCount; i++) {
if (_reader.IsDeleted(i))
continue;
var doc = _reader.Document(i);
var row = new Row();
foreach (var field in doc.GetFields().Where(field => field.IsStored)) {
switch (field.Name) {
case "dropped":
row[field.Name] = Convert.ToBoolean(field.StringValue);
break;
default:
row[field.Name] = field.StringValue;
break;
}
}
yield return row;
}
}
示例5: GetFieldValues
protected virtual String[] GetFieldValues(IndexReader reader, int docId, String fieldName)
{
Document doc = reader.Document(docId, new MapFieldSelector(new String[] { fieldName }));
return doc.GetValues(fieldName); // according to Document class javadoc, this never returns null
}
示例6: VerifyEquals
public static void VerifyEquals(IndexReader r1, IndexReader r2, System.String idField)
{
Assert.AreEqual(r1.NumDocs(), r2.NumDocs());
bool hasDeletes = !(r1.MaxDoc() == r2.MaxDoc() && r1.NumDocs() == r1.MaxDoc());
int[] r2r1 = new int[r2.MaxDoc()]; // r2 id to r1 id mapping
TermDocs termDocs1 = r1.TermDocs();
TermDocs termDocs2 = r2.TermDocs();
// create mapping from id2 space to id2 based on idField
idField = StringHelper.Intern(idField);
TermEnum termEnum = r1.Terms(new Term(idField, ""));
do
{
Term term = termEnum.Term();
if (term == null || (System.Object) term.Field() != (System.Object) idField)
break;
termDocs1.Seek(termEnum);
if (!termDocs1.Next())
{
// This doc is deleted and wasn't replaced
termDocs2.Seek(termEnum);
Assert.IsFalse(termDocs2.Next());
continue;
}
int id1 = termDocs1.Doc();
Assert.IsFalse(termDocs1.Next());
termDocs2.Seek(termEnum);
Assert.IsTrue(termDocs2.Next());
int id2 = termDocs2.Doc();
Assert.IsFalse(termDocs2.Next());
r2r1[id2] = id1;
// verify stored fields are equivalent
try
{
VerifyEquals(r1.Document(id1), r2.Document(id2));
}
catch (System.Exception t)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term=" + term);
System.Console.Out.WriteLine(" d1=" + r1.Document(id1));
System.Console.Out.WriteLine(" d2=" + r2.Document(id2));
throw t;
}
try
{
// verify term vectors are equivalent
VerifyEquals(r1.GetTermFreqVectors(id1), r2.GetTermFreqVectors(id2));
}
catch (System.Exception e)
{
System.Console.Out.WriteLine("FAILED id=" + term + " id1=" + id1 + " id2=" + id2);
TermFreqVector[] tv1 = r1.GetTermFreqVectors(id1);
System.Console.Out.WriteLine(" d1=" + tv1);
if (tv1 != null)
for (int i = 0; i < tv1.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv1[i]);
}
TermFreqVector[] tv2 = r2.GetTermFreqVectors(id2);
System.Console.Out.WriteLine(" d2=" + tv2);
if (tv2 != null)
for (int i = 0; i < tv2.Length; i++)
{
System.Console.Out.WriteLine(" " + i + ": " + tv2[i]);
}
throw e;
}
}
while (termEnum.Next());
termEnum.Close();
// Verify postings
TermEnum termEnum1 = r1.Terms(new Term("", ""));
TermEnum termEnum2 = r2.Terms(new Term("", ""));
// pack both doc and freq into single element for easy sorting
long[] info1 = new long[r1.NumDocs()];
long[] info2 = new long[r2.NumDocs()];
for (; ; )
{
Term term1, term2;
// iterate until we get some docs
int len1;
for (; ; )
{
len1 = 0;
term1 = termEnum1.Term();
//.........这里部分代码省略.........
示例7: GetFields
protected virtual Field[] GetFields(IndexReader reader, int docId, String fieldName)
{
// according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field???
Document doc = reader.Document(docId, new MapFieldSelector(new String[] { fieldName }));
return doc.GetFields(fieldName); // according to Document class javadoc, this never returns null
}
示例8: CompareChildHits
private void CompareChildHits(IndexReader r, IndexReader joinR, TopDocs results, TopDocs joinResults)
{
assertEquals(results.TotalHits, joinResults.TotalHits);
assertEquals(results.ScoreDocs.Length, joinResults.ScoreDocs.Length);
for (int hitCount = 0; hitCount < results.ScoreDocs.Length; hitCount++)
{
ScoreDoc hit = results.ScoreDocs[hitCount];
ScoreDoc joinHit = joinResults.ScoreDocs[hitCount];
Document doc1 = r.Document(hit.Doc);
Document doc2 = joinR.Document(joinHit.Doc);
assertEquals("hit " + hitCount + " differs", doc1.Get("childID"), doc2.Get("childID"));
// don't compare scores -- they are expected to differ
assertTrue(hit is FieldDoc);
assertTrue(joinHit is FieldDoc);
FieldDoc hit0 = (FieldDoc)hit;
FieldDoc joinHit0 = (FieldDoc)joinHit;
assertArrayEquals(hit0.Fields, joinHit0.Fields);
}
}
示例9: CompareHits
private void CompareHits(IndexReader r, IndexReader joinR, TopDocs results, TopGroups<int> joinResults)
{
// results is 'complete'; joinResults is a subset
int resultUpto = 0;
int joinGroupUpto = 0;
ScoreDoc[] hits = results.ScoreDocs;
IGroupDocs<int>[] groupDocs = joinResults.Groups;
while (joinGroupUpto < groupDocs.Length)
{
IGroupDocs<int> group = groupDocs[joinGroupUpto++];
ScoreDoc[] groupHits = group.ScoreDocs;
assertNotNull(group.GroupValue);
Document parentDoc = joinR.Document(group.GroupValue);
string parentID = parentDoc.Get("parentID");
//System.out.println("GROUP groupDoc=" + group.groupDoc + " parent=" + parentDoc);
assertNotNull(parentID);
assertTrue(groupHits.Length > 0);
for (int hitIDX = 0; hitIDX < groupHits.Length; hitIDX++)
{
Document nonJoinHit = r.Document(hits[resultUpto++].Doc);
Document joinHit = joinR.Document(groupHits[hitIDX].Doc);
assertEquals(parentID, nonJoinHit.Get("parentID"));
assertEquals(joinHit.Get("childID"), nonJoinHit.Get("childID"));
}
if (joinGroupUpto < groupDocs.Length)
{
// Advance non-join hit to the next parentID:
//System.out.println(" next joingroupUpto=" + joinGroupUpto + " gd.Length=" + groupDocs.Length + " parentID=" + parentID);
while (true)
{
assertTrue(resultUpto < hits.Length);
if (!parentID.Equals(r.Document(hits[resultUpto].Doc).Get("parentID")))
{
break;
}
resultUpto++;
}
}
}
}
示例10: ApplyFacetValueHit
private void ApplyFacetValueHit(FacetValue facetValue, Facet value, int docId, ParsedRange parsedRange, IndexReader indexReader)
{
facetValue.Hits++;
if (
IndexQuery.IsDistinct == false &&
(value.Aggregation == FacetAggregation.Count || value.Aggregation == FacetAggregation.None)
)
{
return;
}
FacetValueState set;
if (matches.TryGetValue(facetValue, out set) == false)
{
matches[facetValue] = set = new FacetValueState
{
Docs = new HashSet<int>(),
Facet = value,
Range = parsedRange
};
}
if (IndexQuery.IsDistinct)
{
if(IndexQuery.FieldsToFetch.Length == 0)
throw new InvalidOperationException("Cannot process distinct facet query without specifying which fields to distinct upon.");
if (set.AlreadySeen == null)
set.AlreadySeen = new HashSet<StringCollectionValue>();
var document = indexReader.Document(docId);
var fields = new List<string>();
foreach (var fieldName in IndexQuery.FieldsToFetch)
{
foreach (var field in document.GetFields(fieldName))
{
if (field.StringValue == null)
continue;
fields.Add(field.StringValue);
}
}
if (fields.Count == 0)
throw new InvalidOperationException("Cannot apply distinct facet on [" + string.Join(", ", IndexQuery.FieldsToFetch) +
"], did you forget to store them in the index? ");
if (set.AlreadySeen.Add(new StringCollectionValue(fields)) == false)
{
facetValue.Hits--;// already seen, cancel this
return;
}
}
set.Docs.Add(docId);
}
示例11: CheckDbAndIndex
private int CheckDbAndIndex(DbDataReader dbreader, IndexReader ixreader, List<Difference> result)
{
var versionId = dbreader.GetInt32(0);
var dbNodeTimestamp = dbreader.GetInt64(1);
var dbVersionTimestamp = dbreader.GetInt64(2);
var termDocs = ixreader.TermDocs(new Lucene.Net.Index.Term(LucObject.FieldName.VersionId, Lucene.Net.Util.NumericUtils.IntToPrefixCoded(versionId)));
Lucene.Net.Documents.Document doc = null;
int docid = -1;
if (termDocs.Next())
{
docid = termDocs.Doc();
doc = ixreader.Document(docid);
var indexNodeTimestamp = ParseLong(doc.Get(LucObject.FieldName.NodeTimestamp));
var indexVersionTimestamp = ParseLong(doc.Get(LucObject.FieldName.VersionTimestamp));
var nodeId = ParseInt(doc.Get(LucObject.FieldName.NodeId));
var version = doc.Get(LucObject.FieldName.Version);
var p = doc.Get(LucObject.FieldName.Path);
if (termDocs.Next())
{
result.Add(new Difference(IndexDifferenceKind.MoreDocument)
{
DocId = docid,
NodeId = nodeId,
VersionId = versionId,
Version = version,
Path = p,
DbNodeTimestamp = dbNodeTimestamp,
DbVersionTimestamp = dbVersionTimestamp,
IxNodeTimestamp = indexNodeTimestamp,
IxVersionTimestamp = indexVersionTimestamp,
});
}
if (dbVersionTimestamp != indexVersionTimestamp)
{
result.Add(new Difference(IndexDifferenceKind.DifferentVersionTimestamp)
{
DocId = docid,
VersionId = versionId,
DbNodeTimestamp = dbNodeTimestamp,
DbVersionTimestamp = dbVersionTimestamp,
IxNodeTimestamp = indexNodeTimestamp,
IxVersionTimestamp = indexVersionTimestamp,
NodeId = nodeId,
Version = version,
Path = p
});
}
if (dbNodeTimestamp != indexNodeTimestamp)
{
var ok = false;
var isLastDraft = doc.Get(LucObject.FieldName.IsLastDraft);
if (isLastDraft != BooleanIndexHandler.YES)
{
var latestDocs = ixreader.TermDocs(new Lucene.Net.Index.Term(LucObject.FieldName.NodeId, Lucene.Net.Util.NumericUtils.IntToPrefixCoded(nodeId)));
Lucene.Net.Documents.Document latestDoc = null;
while (latestDocs.Next())
{
var latestdocid = latestDocs.Doc();
var d = ixreader.Document(latestdocid);
if (d.Get(LucObject.FieldName.IsLastDraft) != BooleanIndexHandler.YES)
continue;
latestDoc = d;
break;
}
var latestPath = latestDoc.Get(LucObject.FieldName.Path);
if (latestPath == p)
ok = true;
}
if (!ok)
{
result.Add(new Difference(IndexDifferenceKind.DifferentNodeTimestamp)
{
DocId = docid,
VersionId = versionId,
DbNodeTimestamp = dbNodeTimestamp,
DbVersionTimestamp = dbVersionTimestamp,
IxNodeTimestamp = indexNodeTimestamp,
IxVersionTimestamp = indexVersionTimestamp,
NodeId = nodeId,
Version = version,
Path = p
});
}
}
}
else
{
result.Add(new Difference(IndexDifferenceKind.NotInIndex)
{
DocId = docid,
VersionId = versionId,
DbNodeTimestamp = dbNodeTimestamp,
DbVersionTimestamp = dbVersionTimestamp,
});
}
return docid;
}
示例12: AssertStoredFieldsEquals
/// <summary>
/// checks that stored fields of all documents are the same
/// </summary>
public void AssertStoredFieldsEquals(string info, IndexReader leftReader, IndexReader rightReader)
{
Debug.Assert(leftReader.MaxDoc == rightReader.MaxDoc);
for (int i = 0; i < leftReader.MaxDoc; i++)
{
Document leftDoc = leftReader.Document(i);
Document rightDoc = rightReader.Document(i);
// TODO: I think this is bogus because we don't document what the order should be
// from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
// in whatever way it wants (e.g. maybe it packs related fields together or something)
// To fix this, we sort the fields in both documents by name, but
// we still assume that all instances with same name are in order:
Comparison<IndexableField> comp = (a, b) => String.Compare(a.Name(), b.Name(), StringComparison.Ordinal);
leftDoc.Fields.Sort(comp);
rightDoc.Fields.Sort(comp);
var leftIterator = leftDoc.GetEnumerator();
var rightIterator = rightDoc.GetEnumerator();
while (leftIterator.MoveNext())
{
Assert.IsTrue(rightIterator.MoveNext(), info);
AssertStoredFieldEquals(info, leftIterator.Current, rightIterator.Current);
}
Assert.IsFalse(rightIterator.MoveNext(), info);
}
}
示例13: PerformSearchQuery
/// <summary>
/// Executes the search query build by the parser or another source.
/// </summary>
/// <param name="query">Query to execute on the index</param>
/// <param name="indexReader">The reader used to read from the index</param>
/// <returns>Returns the found search results</returns>
private IEnumerable<SearchHit> PerformSearchQuery(Query query, IndexReader indexReader)
{
var foundItems = new List<SearchHit>();
IndexSearcher searcher = new IndexSearcher(indexReader);
var results = searcher.Search(query, Int16.MaxValue);
foreach (var scoreDoc in results.ScoreDocs)
{
var document = indexReader.Document(scoreDoc.Doc);
// Collect the search results from the index.
foundItems.Add(new SearchHit()
{
Id = Int32.Parse(document.Get("id")),
Name = document.Get("recipe-name"),
Description = document.Get("recipe-description")
});
}
return foundItems;
}
示例14: GetTokenStream
//convenience method
public static TokenStream GetTokenStream(IndexReader reader, int docId, String field, Analyzer analyzer)
{
Document doc = reader.Document(docId);
return GetTokenStream(doc, field, analyzer);
}
示例15: CopyFieldsWithDeletions
private int CopyFieldsWithDeletions(FieldSelector fieldSelectorMerge, FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
{
int docCount = 0;
int maxDoc = reader.MaxDoc();
if (matchingFieldsReader != null)
{
// We can bulk-copy because the fieldInfos are "congruent"
for (int j = 0; j < maxDoc; )
{
if (reader.IsDeleted(j))
{
// skip deleted docs
++j;
continue;
}
// We can optimize this case (doing a bulk byte copy) since the field
// numbers are identical
int start = j, numDocs = 0;
do
{
j++;
numDocs++;
if (j >= maxDoc)
break;
if (reader.IsDeleted(j))
{
j++;
break;
}
}
while (numDocs < MAX_RAW_MERGE_DOCS);
IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
docCount += numDocs;
checkAbort.Work(300 * numDocs);
}
}
else
{
for (int j = 0; j < maxDoc; j++)
{
if (reader.IsDeleted(j))
{
// skip deleted docs
continue;
}
// NOTE: it's very important to first assign to doc then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
Document doc = reader.Document(j, fieldSelectorMerge);
fieldsWriter.AddDocument(doc);
docCount++;
checkAbort.Work(300);
}
}
return docCount;
}