本文整理汇总了C#中Raven.Database.Indexing.Index.CreateAnalyzer方法的典型用法代码示例。如果您正苦于以下问题:C# Index.CreateAnalyzer方法的具体用法?C# Index.CreateAnalyzer怎么用?C# Index.CreateAnalyzer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Raven.Database.Indexing.Index
的用法示例。
在下文中一共展示了Index.CreateAnalyzer方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PerformSearch
private void PerformSearch(IHttpContext context, string indexName, Index index, MoreLikeThisQueryParameters parameters)
{
IndexSearcher searcher;
using (Database.IndexStorage.GetCurrentIndexSearcher(indexName, out searcher))
{
var documentQuery = new BooleanQuery();
if (!string.IsNullOrEmpty(parameters.DocumentId))
{
documentQuery.Add(new TermQuery(new Term(Constants.DocumentIdFieldName, parameters.DocumentId)),
Lucene.Net.Search.BooleanClause.Occur.MUST);
}
foreach (string key in parameters.MapGroupFields.Keys)
{
documentQuery.Add(new TermQuery(new Term(key, parameters.MapGroupFields[key])),
Lucene.Net.Search.BooleanClause.Occur.MUST);
}
var td = searcher.Search(documentQuery, 1);
// get the current Lucene docid for the given RavenDB doc ID
if (td.ScoreDocs.Length == 0)
{
context.SetStatusToNotFound();
context.WriteJson(new {Error = "Document " + parameters.DocumentId + " could not be found"});
return;
}
var ir = searcher.GetIndexReader();
var mlt = new RavenMoreLikeThis(ir);
AssignParameters(mlt, parameters);
if (!string.IsNullOrWhiteSpace(parameters.StopWordsDocumentId))
{
var stopWordsDoc = Database.Get(parameters.StopWordsDocumentId, null);
if (stopWordsDoc == null)
{
context.SetStatusToNotFound();
context.WriteJson(
new
{
Error = "Stop words document " + parameters.StopWordsDocumentId + " could not be found"
});
return;
}
var stopWords = stopWordsDoc.DataAsJson.JsonDeserialization<StopWordsSetup>().StopWords;
mlt.SetStopWords(new Hashtable(stopWords.ToDictionary(x => x.ToLower())));
}
var fieldNames = parameters.Fields ?? GetFieldNames(ir);
mlt.SetFieldNames(fieldNames);
var toDispose = new List<Action>();
PerFieldAnalyzerWrapper perFieldAnalyzerWrapper = null;
try
{
perFieldAnalyzerWrapper = index.CreateAnalyzer(new LowerCaseKeywordAnalyzer(), toDispose, true);
mlt.SetAnalyzer(perFieldAnalyzerWrapper);
var mltQuery = mlt.Like(td.ScoreDocs[0].doc);
var tsdc = TopScoreDocCollector.create(context.GetPageSize(Database.Configuration.MaxPageSize), true);
searcher.Search(mltQuery, tsdc);
var hits = tsdc.TopDocs().ScoreDocs;
var jsonDocuments = GetJsonDocuments(parameters, searcher, indexName, hits, td.ScoreDocs[0].doc);
var result = new MultiLoadResult();
var includedEtags = new List<byte>(jsonDocuments.SelectMany(x => x.Etag.Value.ToByteArray()));
includedEtags.AddRange(Database.GetIndexEtag(indexName, null).ToByteArray());
var loadedIds = new HashSet<string>(jsonDocuments.Select(x => x.Key));
var addIncludesCommand = new AddIncludesCommand(Database, GetRequestTransaction(context), (etag, includedDoc) =>
{
includedEtags.AddRange(etag.ToByteArray());
result.Includes.Add(includedDoc);
}, context.Request.QueryString.GetValues("include") ?? new string[0], loadedIds);
foreach (var jsonDocumet in jsonDocuments)
{
result.Results.Add(jsonDocumet.ToJson());
addIncludesCommand.Execute(jsonDocumet.DataAsJson);
}
Guid computedEtag;
using (var md5 = MD5.Create())
{
var computeHash = md5.ComputeHash(includedEtags.ToArray());
computedEtag = new Guid(computeHash);
}
if (context.MatchEtag(computedEtag))
{
context.SetStatusToNotModified();
return;
}
context.Response.AddHeader("ETag", computedEtag.ToString());
context.WriteJson(result);
}
//.........这里部分代码省略.........