本文整理汇总了C#中Lucene.Net.Index.AtomicReader.Document方法的典型用法代码示例。如果您正苦于以下问题:C# AtomicReader.Document方法的具体用法?C# AtomicReader.Document怎么用?C# AtomicReader.Document使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.AtomicReader
的用法示例。
在下文中一共展示了AtomicReader.Document方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestStoredFields
/// <summary>
/// Test stored fields.
/// @lucene.experimental
/// </summary>
public static Status.StoredFieldStatus TestStoredFields(AtomicReader reader, TextWriter infoStream)
{
Status.StoredFieldStatus status = new Status.StoredFieldStatus();
try
{
if (infoStream != null)
{
infoStream.Write(" test: stored fields.......");
}
// Scan stored fields for all documents
Bits liveDocs = reader.LiveDocs;
for (int j = 0; j < reader.MaxDoc; ++j)
{
// Intentionally pull even deleted documents to
// make sure they too are not corrupt:
Document doc = reader.Document(j);
if (liveDocs == null || liveDocs.Get(j))
{
status.DocCount++;
status.TotFields += doc.Fields.Count;
}
}
// Validate docCount
if (status.DocCount != reader.NumDocs)
{
throw new Exception("docCount=" + status.DocCount + " but saw " + status.DocCount + " undeleted docs");
}
Msg(infoStream, "OK [" + status.TotFields + " total field count; avg " + ((((float)status.TotFields) / status.DocCount)).ToString(CultureInfo.InvariantCulture.NumberFormat) + " fields per doc]");
}
catch (Exception e)
{
Msg(infoStream, "ERROR [" + Convert.ToString(e.Message) + "]");
status.Error = e;
if (infoStream != null)
{
// LUCENENET NOTE: Some tests rely on the error type being in
// the message. We can't get the error type with StackTrace, we
// need ToString() for that.
infoStream.WriteLine(e.ToString());
//infoStream.WriteLine(e.StackTrace);
}
}
return status;
}
示例2: Warm
public override void Warm(AtomicReader reader)
{
long startTime = DateTime.Now.Millisecond;
int indexedCount = 0;
int docValuesCount = 0;
int normsCount = 0;
foreach (FieldInfo info in reader.FieldInfos)
{
if (info.Indexed)
{
reader.Terms(info.Name);
indexedCount++;
if (info.HasNorms())
{
reader.GetNormValues(info.Name);
normsCount++;
}
}
if (info.HasDocValues())
{
switch (info.DocValuesType)
{
case DocValuesType_e.NUMERIC:
reader.GetNumericDocValues(info.Name);
break;
case DocValuesType_e.BINARY:
reader.GetBinaryDocValues(info.Name);
break;
case DocValuesType_e.SORTED:
reader.GetSortedDocValues(info.Name);
break;
case DocValuesType_e.SORTED_SET:
reader.GetSortedSetDocValues(info.Name);
break;
default:
Debug.Assert(false); // unknown dv type
break;
}
docValuesCount++;
}
}
reader.Document(0);
reader.GetTermVectors(0);
if (InfoStream.IsEnabled("SMSW"))
{
InfoStream.Message("SMSW", "Finished warming segment: " + reader + ", indexed=" + indexedCount + ", docValues=" + docValuesCount + ", norms=" + normsCount + ", time=" + (DateTime.Now.Millisecond - startTime));
}
}
示例3: Warm
public override void Warm(AtomicReader reader)
{
if (VERBOSE)
{
Console.WriteLine("TEST: now warm merged reader=" + reader);
}
OuterInstance.Warmed[(SegmentCoreReaders)reader.CoreCacheKey] = true;
int maxDoc = reader.MaxDoc;
Bits liveDocs = reader.LiveDocs;
int sum = 0;
int inc = Math.Max(1, maxDoc / 50);
for (int docID = 0; docID < maxDoc; docID += inc)
{
if (liveDocs == null || liveDocs.Get(docID))
{
Document doc = reader.Document(docID);
sum += doc.Fields.Count;
}
}
IndexSearcher searcher = OuterInstance.NewSearcher(reader);
sum += searcher.Search(new TermQuery(new Term("body", "united")), 10).TotalHits;
if (VERBOSE)
{
Console.WriteLine("TEST: warm visited " + sum + " fields");
}
}
示例4: Split
/**
* Split a given index into 3 indexes for training, test and cross validation tasks respectively
*
* @param originalIndex an {@link AtomicReader} on the source index
* @param trainingIndex a {@link Directory} used to write the training index
* @param testIndex a {@link Directory} used to write the test index
* @param crossValidationIndex a {@link Directory} used to write the cross validation index
* @param analyzer {@link Analyzer} used to create the new docs
* @param fieldNames names of fields that need to be put in the new indexes or <code>null</code> if all should be used
* @throws IOException if any writing operation fails on any of the indexes
*/
public void Split(AtomicReader originalIndex, Directory trainingIndex, Directory testIndex, Directory crossValidationIndex, Analyzer analyzer, params string[] fieldNames)
{
// create IWs for train / test / cv IDXs
IndexWriter testWriter = new IndexWriter(testIndex, new IndexWriterConfig(Util.Version.LUCENE_CURRENT, analyzer));
IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(Util.Version.LUCENE_CURRENT, analyzer));
IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(Util.Version.LUCENE_CURRENT, analyzer));
try
{
int size = originalIndex.MaxDoc;
IndexSearcher indexSearcher = new IndexSearcher(originalIndex);
TopDocs topDocs = indexSearcher.Search(new MatchAllDocsQuery(), Int32.MaxValue);
// set the type to be indexed, stored, with term vectors
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.StoreTermVectors = true;
ft.StoreTermVectorOffsets = true;
ft.StoreTermVectorPositions = true;
int b = 0;
// iterate over existing documents
foreach (ScoreDoc scoreDoc in topDocs.ScoreDocs)
{
// create a new document for indexing
Document doc = new Document();
if (fieldNames != null && fieldNames.Length > 0)
{
foreach (String fieldName in fieldNames)
{
doc.Add(new Field(fieldName, originalIndex.Document(scoreDoc.Doc).GetField(fieldName).ToString(), ft));
}
}
else
{
foreach (IndexableField storableField in originalIndex.Document(scoreDoc.Doc).Fields)
{
if (storableField.ReaderValue != null)
{
doc.Add(new Field(storableField.Name(), storableField.ReaderValue, ft));
}
else if (storableField.BinaryValue() != null)
{
doc.Add(new Field(storableField.Name(), storableField.BinaryValue(), ft));
}
else if (storableField.StringValue != null)
{
doc.Add(new Field(storableField.Name(), storableField.StringValue, ft));
}
else if (storableField.NumericValue != null)
{
doc.Add(new Field(storableField.Name(), storableField.NumericValue.ToString(), ft));
}
}
}
// add it to one of the IDXs
if (b % 2 == 0 && testWriter.MaxDoc < size * _testRatio)
{
testWriter.AddDocument(doc);
}
else if (cvWriter.MaxDoc < size * _crossValidationRatio)
{
cvWriter.AddDocument(doc);
}
else
{
trainingWriter.AddDocument(doc);
}
b++;
}
}
catch (Exception e)
{
throw new IOException("Exceptio in DatasetSplitter", e);
}
finally
{
testWriter.Commit();
cvWriter.Commit();
trainingWriter.Commit();
// close IWs
testWriter.Dispose();
cvWriter.Dispose();
trainingWriter.Dispose();
}
}