本文整理汇总了C#中Lucene.Net.Documents.Document.GetFields方法的典型用法代码示例。如果您正苦于以下问题:C# Document.GetFields方法的具体用法?C# Document.GetFields怎么用?C# Document.GetFields使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Documents.Document
的用法示例。
在下文中一共展示了Document.GetFields方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SearchResult
public SearchResult(Document doc, float score)
{
Fields = new Dictionary<string, string>();
string id = doc.Get("id");
if (string.IsNullOrEmpty(id))
{
id = doc.Get("__NodeId");
}
Id = int.Parse(id);
Score = score;
//we can use lucene to find out the fields which have been stored for this particular document
//I'm not sure if it'll return fields that have null values though
var fields = doc.GetFields();
//ignore our internal fields though
foreach (Field field in fields.Cast<Field>())
{
string fieldName = field.Name();
Fields.Add(fieldName, doc.Get(fieldName));
//Examine returns some fields as e.g. __FieldName rather than fieldName
if (fieldName.StartsWith(LuceneIndexer.SpecialFieldPrefix))
{
int offset = LuceneIndexer.SpecialFieldPrefix.Length;
string tidiedFieldName = Char.ToLower(fieldName[offset]) + fieldName.Substring(offset + 1);
if (!Fields.ContainsKey(tidiedFieldName))
{
Fields.Add(tidiedFieldName, doc.Get(fieldName));
}
}
}
}
示例2: FromDocument
public static IndexDocumentData FromDocument(Document doc)
{
return new IndexDocumentData()
{
Package = PackageJson.FromJson(JObject.Parse(doc.GetField("Data").StringValue)),
Checksum = Int32.Parse(doc.GetFieldable("Checksum").StringValue),
Feeds = doc.GetFields("CuratedFeed").Select(f => f.StringValue).ToList()
};
}
示例3: GetIndexesFromDocument
/// <summary>
/// Gets the indexes from the document fields
/// </summary>
private IList<string> GetIndexesFromDocument(Document tweetDoc)
{
Field[] indexesFields = tweetDoc.GetFields(Settings.FIELD_TWEET_INDEXES);
List<string> indexes = new List<string>();
foreach (Field field in indexesFields)
{
indexes.Add(field.StringValue());
}
return indexes;
}
示例4: DocToDict
public static IDictionary<string, string> DocToDict(Document doc, float score = 0)
{
var dict = new Dictionary<string, string>();
foreach (var field in doc.GetFields()) {
if (field.IsStored)
dict[field.Name] = doc.Get(field.Name);
}
dict["rank"] = score.ToString(CultureInfo.InvariantCulture);
return dict;
}
示例5: IndexDocuments
public override void IndexDocuments(AbstractViewGenerator viewGenerator, IEnumerable<object> documents, WorkContext context, IStorageActionsAccessor actions, DateTime minimumTimestamp)
{
var count = 0;
Write(context, (indexWriter, analyzer, stats) =>
{
var processedKeys = new HashSet<string>();
var batchers = context.IndexUpdateTriggers.Select(x => x.CreateBatcher(name))
.Where(x => x != null)
.ToList();
var documentsWrapped = documents.Select((dynamic doc) =>
{
if(doc.__document_id == null)
throw new ArgumentException(string.Format("Cannot index something which doesn't have a document id, but got: '{0}'", doc));
count++;
string documentId = doc.__document_id.ToString();
if (processedKeys.Add(documentId) == false)
return doc;
batchers.ApplyAndIgnoreAllErrors(
exception =>
{
logIndexing.WarnException(
string.Format("Error when executed OnIndexEntryDeleted trigger for index '{0}', key: '{1}'",
name, documentId),
exception);
context.AddError(name,
documentId,
exception.Message
);
},
trigger => trigger.OnIndexEntryDeleted(documentId));
indexWriter.DeleteDocuments(new Term(Constants.DocumentIdFieldName, documentId.ToLowerInvariant()));
return doc;
});
var anonymousObjectToLuceneDocumentConverter = new AnonymousObjectToLuceneDocumentConverter(indexDefinition);
var luceneDoc = new Document();
var documentIdField = new Field(Constants.DocumentIdFieldName, "dummy", Field.Store.YES, Field.Index.ANALYZED_NO_NORMS);
foreach (var doc in RobustEnumerationIndex(documentsWrapped, viewGenerator.MapDefinitions, actions, context, stats))
{
count++;
float boost;
var indexingResult = GetIndexingResult(doc, anonymousObjectToLuceneDocumentConverter, out boost);
if (indexingResult.NewDocId != null && indexingResult.ShouldSkip == false)
{
count += 1;
luceneDoc.GetFields().Clear();
luceneDoc.SetBoost(boost);
documentIdField.SetValue(indexingResult.NewDocId.ToLowerInvariant());
luceneDoc.Add(documentIdField);
foreach (var field in indexingResult.Fields)
{
luceneDoc.Add(field);
}
batchers.ApplyAndIgnoreAllErrors(
exception =>
{
logIndexing.WarnException(
string.Format( "Error when executed OnIndexEntryCreated trigger for index '{0}', key: '{1}'",
name, indexingResult.NewDocId),
exception);
context.AddError(name,
indexingResult.NewDocId,
exception.Message
);
},
trigger => trigger.OnIndexEntryCreated(indexingResult.NewDocId, luceneDoc));
LogIndexedDocument(indexingResult.NewDocId, luceneDoc);
AddDocumentToIndex(indexWriter, luceneDoc, analyzer);
}
stats.IndexingSuccesses++;
}
batchers.ApplyAndIgnoreAllErrors(
e =>
{
logIndexing.WarnException("Failed to dispose on index update trigger", e);
context.AddError(name, null, e.Message);
},
x => x.Dispose());
return count;
});
logIndexing.Debug("Indexed {0} documents for {1}", count, name);
}
示例6: TestBinaryFields
public virtual void TestBinaryFields()
{
Directory dir = new RAMDirectory();
byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 10; i++)
{
AddDoc(writer, "document number " + (i + 1));
AddDocumentWithFields(writer);
AddDocumentWithDifferentFields(writer);
AddDocumentWithTermVectorFields(writer);
}
writer.Close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("bin1", bin, Field.Store.YES));
doc.Add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Close();
IndexReader reader = IndexReader.Open(dir, false);
doc = reader.Document(reader.MaxDoc - 1);
Field[] fields = doc.GetFields("bin1");
Assert.IsNotNull(fields);
Assert.AreEqual(1, fields.Length);
Field b1 = fields[0];
Assert.IsTrue(b1.IsBinary);
byte[] data1 = b1.GetBinaryValue();
Assert.AreEqual(bin.Length, b1.BinaryLength);
for (int i = 0; i < bin.Length; i++)
{
Assert.AreEqual(bin[i], data1[i + b1.BinaryOffset]);
}
var lazyFields = Support.Compatibility.SetFactory.CreateHashSet<string>();
lazyFields.Add("bin1");
FieldSelector sel = new SetBasedFieldSelector(Support.Compatibility.SetFactory.CreateHashSet<string>(), lazyFields);
doc = reader.Document(reader.MaxDoc - 1, sel);
IFieldable[] fieldables = doc.GetFieldables("bin1");
Assert.IsNotNull(fieldables);
Assert.AreEqual(1, fieldables.Length);
IFieldable fb1 = fieldables[0];
Assert.IsTrue(fb1.IsBinary);
Assert.AreEqual(bin.Length, fb1.BinaryLength);
data1 = fb1.GetBinaryValue();
Assert.AreEqual(bin.Length, fb1.BinaryLength);
for (int i = 0; i < bin.Length; i++)
{
Assert.AreEqual(bin[i], data1[i + fb1.BinaryOffset]);
}
reader.Close();
// force optimize
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
writer.Optimize();
writer.Close();
reader = IndexReader.Open(dir, false);
doc = reader.Document(reader.MaxDoc - 1);
fields = doc.GetFields("bin1");
Assert.IsNotNull(fields);
Assert.AreEqual(1, fields.Length);
b1 = fields[0];
Assert.IsTrue(b1.IsBinary);
data1 = b1.GetBinaryValue();
Assert.AreEqual(bin.Length, b1.BinaryLength);
for (int i = 0; i < bin.Length; i++)
{
Assert.AreEqual(bin[i], data1[i + b1.BinaryOffset]);
}
reader.Close();
}
示例7: VerifyEquals
public static void VerifyEquals(Document d1, Document d2)
{
System.Collections.IList ff1 = d1.GetFields();
System.Collections.IList ff2 = d2.GetFields();
SupportClass.CollectionsHelper.Sort(ff1, fieldNameComparator);
SupportClass.CollectionsHelper.Sort(ff2, fieldNameComparator);
if (ff1.Count != ff2.Count)
{
System.Console.Out.WriteLine(SupportClass.CollectionsHelper.CollectionToString(ff1));
System.Console.Out.WriteLine(SupportClass.CollectionsHelper.CollectionToString(ff2));
Assert.AreEqual(ff1.Count, ff2.Count);
}
for (int i = 0; i < ff1.Count; i++)
{
Fieldable f1 = (Fieldable) ff1[i];
Fieldable f2 = (Fieldable) ff2[i];
if (f1.IsBinary())
{
System.Diagnostics.Debug.Assert(f2.IsBinary());
//TODO
}
else
{
System.String s1 = f1.StringValue();
System.String s2 = f2.StringValue();
if (!s1.Equals(s2))
{
// print out whole doc on error
System.Console.Out.WriteLine(SupportClass.CollectionsHelper.CollectionToString(ff1));
System.Console.Out.WriteLine(SupportClass.CollectionsHelper.CollectionToString(ff2));
Assert.AreEqual(s1, s2);
}
}
}
}
示例8: AddDocumentToIndex
protected void AddDocumentToIndex(RavenIndexWriter currentIndexWriter, Document luceneDoc, Analyzer analyzer)
{
Analyzer newAnalyzer = AnalyzerGenerators.Aggregate(analyzer,
(currentAnalyzer, generator) =>
{
Analyzer generateAnalyzer =
generator.Value.GenerateAnalyzerForIndexing(name, luceneDoc,
currentAnalyzer);
if (generateAnalyzer != currentAnalyzer &&
currentAnalyzer != analyzer)
currentAnalyzer.Close();
return generateAnalyzer;
});
try
{
if (indexExtensions.Count > 0)
currentlyIndexDocuments.Add(CloneDocument(luceneDoc));
currentIndexWriter.AddDocument(luceneDoc, newAnalyzer);
foreach (var fieldable in luceneDoc.GetFields())
{
using (fieldable.ReaderValue) // dispose all the readers
{
}
}
}
finally
{
if (newAnalyzer != analyzer)
newAnalyzer.Close();
}
}
示例9: LogIndexedDocument
protected void LogIndexedDocument(string key, Document luceneDoc)
{
if (logIndexing.IsDebugEnabled)
{
var fieldsForLogging = luceneDoc.GetFields().Cast<IFieldable>().Select(x => new
{
Name = x.Name,
Value = x.IsBinary ? "<binary>" : x.StringValue,
Indexed = x.IsIndexed,
Stored = x.IsStored,
});
var sb = new StringBuilder();
foreach (var fieldForLogging in fieldsForLogging)
{
sb.Append("\t").Append(fieldForLogging.Name)
.Append(" ")
.Append(fieldForLogging.Indexed ? "I" : "-")
.Append(fieldForLogging.Stored ? "S" : "-")
.Append(": ")
.Append(fieldForLogging.Value)
.AppendLine();
}
logIndexing.Debug("Indexing on {0} result in index {1} gave document: {2}", key, name,
sb.ToString());
}
}
示例10: RetrieveDocument
protected override IndexQueryResult RetrieveDocument(Document document, FieldsToFetch fieldsToFetch, ScoreDoc score)
{
fieldsToFetch.EnsureHasField(Constants.ReduceKeyFieldName);
if (fieldsToFetch.HasExplicitFieldsToFetch)
{
return base.RetrieveDocument(document, fieldsToFetch, score);
}
var field = document.GetField(Constants.ReduceValueFieldName);
if (field == null)
{
fieldsToFetch = fieldsToFetch.CloneWith(document.GetFields().Select(x => x.Name).ToArray());
return base.RetrieveDocument(document, fieldsToFetch, score);
}
var projection = RavenJObject.Parse(field.StringValue);
if (fieldsToFetch.FetchAllStoredFields)
{
var fields = new HashSet<string>(document.GetFields().Select(x => x.Name));
fields.Remove(Constants.ReduceKeyFieldName);
var documentFromFields = new RavenJObject();
AddFieldsToDocument(document, fields, documentFromFields);
foreach (var kvp in projection)
{
documentFromFields[kvp.Key] = kvp.Value;
}
projection = documentFromFields;
}
return new IndexQueryResult
{
Projection = projection,
Score = score.Score,
ReduceVal = field.StringValue
};
}
示例11: Add
public void Add(Document doc)
{
Requires.NotNull("searchDocument", doc);
if (doc.GetFields().Count > 0)
{
try
{
Writer.AddDocument(doc);
}
catch (OutOfMemoryException)
{
lock (_writerLock)
{
// as suggested by Lucene's doc
DisposeWriter();
Writer.AddDocument(doc);
}
}
}
}
示例12: FillTagsValues
private static void FillTagsValues(Document doc, SearchResult result)
{
foreach (var field in doc.GetFields())
{
if (field.StringValue == null) continue;
int intField;
switch (field.Name)
{
case Constants.UniqueKeyTag:
result.UniqueKey = field.StringValue;
break;
case Constants.TitleTag:
var title = field.StringValue;
//TODO - Need better highlighting logic for Title
//result.Title = string.IsNullOrEmpty(titleSnippet) ? title : string.Format("...{0}...", titleSnippet);
result.Title = title;
break;
case Constants.BodyTag:
result.Body = field.StringValue;
break;
case Constants.DescriptionTag:
result.Description = field.StringValue;
break;
case Constants.Tag:
result.Tags = result.Tags.Concat(new string[] { field.StringValue });
break;
case Constants.PermissionsTag:
result.Permissions = field.StringValue;
break;
case Constants.QueryStringTag:
result.QueryString = field.StringValue;
break;
case Constants.UrlTag:
result.Url = field.StringValue;
break;
case Constants.SearchTypeTag:
if(int.TryParse(field.StringValue, out intField)) result.SearchTypeId = intField;
break;
case Constants.ModuleIdTag:
if (int.TryParse(field.StringValue, out intField)) result.ModuleId = intField;
break;
case Constants.ModuleDefIdTag:
if (int.TryParse(field.StringValue, out intField)) result.ModuleDefId = intField;
break;
case Constants.PortalIdTag:
if (int.TryParse(field.StringValue, out intField)) result.PortalId = intField;
break;
case Constants.AuthorIdTag:
if (int.TryParse(field.StringValue, out intField)) result.AuthorUserId = intField;
break;
case Constants.RoleIdTag:
if (int.TryParse(field.StringValue, out intField)) result.RoleId = intField;
break;
case Constants.AuthorNameTag:
result.AuthorName = field.StringValue;
break;
case Constants.TabIdTag:
if (int.TryParse(field.StringValue, out intField)) result.TabId = intField;
break;
case Constants.ModifiedTimeTag:
DateTime modifiedTimeUtc;
DateTime.TryParseExact(field.StringValue, Constants.DateTimeFormat, null, DateTimeStyles.None, out modifiedTimeUtc);
result.ModifiedTimeUtc = modifiedTimeUtc;
break;
default:
if (field.Name.StartsWith(Constants.NumericKeyPrefixTag))
{
var key = field.Name.Substring(Constants.NumericKeyPrefixTag.Length);
if (int.TryParse(field.StringValue, out intField))
{
if (!result.NumericKeys.ContainsKey(key))
result.NumericKeys.Add(key, intField);
}
}
else if (field.Name.StartsWith(Constants.KeywordsPrefixTag))
{
var key = field.Name.Substring(Constants.KeywordsPrefixTag.Length);
if (!result.Keywords.ContainsKey(key))
result.Keywords.Add(key, field.StringValue);
}
break;
}
}
}
示例13: CloneDocument
private static Document CloneDocument(Document luceneDoc)
{
var clonedDocument = new Document();
foreach (AbstractField field in luceneDoc.GetFields())
{
var numericField = field as NumericField;
if (numericField != null)
{
var clonedNumericField = new NumericField(numericField.Name(),
numericField.IsStored() ? Field.Store.YES : Field.Store.NO,
numericField.IsIndexed());
var numericValue = numericField.GetNumericValue();
if (numericValue is int)
{
clonedNumericField.SetIntValue((int)numericValue);
}
if (numericValue is long)
{
clonedNumericField.SetLongValue((long)numericValue);
}
if (numericValue is double)
{
clonedNumericField.SetDoubleValue((double)numericValue);
}
if (numericValue is float)
{
clonedNumericField.SetFloatValue((float)numericValue);
}
clonedDocument.Add(clonedNumericField);
}
else
{
var clonedField = new Field(field.Name(), field.BinaryValue(),
field.IsStored() ? Field.Store.YES : Field.Store.NO);
clonedDocument.Add(clonedField);
}
}
return clonedDocument;
}
示例14: CreateSearchResult
protected SearchResult CreateSearchResult(Document doc, float score)
{
string id = doc.Get("id");
if (string.IsNullOrEmpty(id))
{
id = doc.Get(LuceneIndexer.IndexNodeIdFieldName);
}
var sr = new SearchResult()
{
Id = int.Parse(id),
Score = score
};
//we can use lucene to find out the fields which have been stored for this particular document
//I'm not sure if it'll return fields that have null values though
var fields = doc.GetFields();
//ignore our internal fields though
foreach (Field field in fields.Cast<Field>())
{
sr.Fields.Add(field.Name(), doc.Get(field.Name()));
}
return sr;
}
示例15: Add
/// <summary>Adds field info for a Document. </summary>
public void Add(Document doc)
{
lock (this)
{
System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
foreach(IFieldable field in fields)
{
Add(field.Name, field.IsIndexed, field.IsTermVectorStored,
field.IsStorePositionWithTermVector, field.IsStoreOffsetWithTermVector, field.OmitNorms,
false, field.OmitTermFreqAndPositions);
}
}
}