本文整理汇总了C#中Lucene.Net.Search.IndexSearcher.SetDefaultFieldSortScoring方法的典型用法代码示例。如果您正苦于以下问题:C# IndexSearcher.SetDefaultFieldSortScoring方法的具体用法?C# IndexSearcher.SetDefaultFieldSortScoring怎么用?C# IndexSearcher.SetDefaultFieldSortScoring使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Search.IndexSearcher
的用法示例。
在下文中一共展示了IndexSearcher.SetDefaultFieldSortScoring方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: DisplayResults
public void DisplayResults(Query query, Sort sort)
{
using(var indexSearcher = new IndexSearcher(directory, true))
{
indexSearcher.SetDefaultFieldSortScoring(true, false);
var results = indexSearcher.Search(query, null, 20, sort);
Console.WriteLine("\nResults for: {0} sorted by {1}", query, sort);
Console.WriteLine();
}
}
示例2: GetAllIndexRecords
// search methods
public static IEnumerable<LuceneSearchModel> GetAllIndexRecords()
{
// validate search index
if (!System.IO.Directory.EnumerateFiles(_luceneDir).Any()) return new List<LuceneSearchModel>();
// set up lucene searcher
var searcher = new IndexSearcher(_directory, false);
var reader = IndexReader.Open(_directory, false);
searcher.SetDefaultFieldSortScoring(true, true);
var docs = new List<Document>();
var term = reader.TermDocs();
while (term.Next()) docs.Add(searcher.Doc(term.Doc));
reader.Dispose();
searcher.Dispose();
return _mapLuceneToDataList(docs);
}
示例3: _search
// main search method
private static IEnumerable<LuceneSearchModel> _search(string searchQuery, int amountToTake, string searchField = "")
{
// validation
if (string.IsNullOrEmpty(searchQuery.Replace("*", "").Replace("?", ""))) return new List<LuceneSearchModel>();
// set up lucene searcher
using (var searcher = new IndexSearcher(_directory, false))
{
const int hitsLimit = 1000;
var analyzer = new StandardAnalyzer(Version.LUCENE_30);
// search by single field
if (!string.IsNullOrEmpty(searchField))
{
var parser = new QueryParser(Version.LUCENE_30, searchField, analyzer);
var query = parseQuery(searchQuery, parser);
searcher.SetDefaultFieldSortScoring(true, true);
var hits = searcher.Search(query, hitsLimit).ScoreDocs;
var results = _mapLuceneToDataList(hits, searcher, amountToTake);
analyzer.Close();
searcher.Dispose();
return results;
}
// search by multiple fields (ordered by RELEVANCE)
else
{
var parser = new MultiFieldQueryParser(Version.LUCENE_30, new[] { AppConstants.LucId, AppConstants.LucTopicName, AppConstants.LucTopicTags, AppConstants.LucPostContent }, analyzer);
var query = parseQuery(searchQuery, parser);
searcher.SetDefaultFieldSortScoring(true, true);
var hits = searcher.Search(query, null, hitsLimit, Sort.INDEXORDER).ScoreDocs;
var results = _mapLuceneToDataList(hits, searcher, amountToTake);
analyzer.Close();
searcher.Dispose();
return results;
}
}
}
示例4: ExecuteQuery
private TopDocs ExecuteQuery(IndexSearcher indexSearcher, Query luceneQuery, int start, int pageSize,
IndexQuery indexQuery)
{
var sort = indexQuery.GetSort(parent.indexDefinition, parent.viewGenerator);
if (pageSize == Int32.MaxValue && sort == null) // we want all docs, no sorting required
{
var gatherAllCollector = new GatherAllCollector();
indexSearcher.Search(luceneQuery, gatherAllCollector);
return gatherAllCollector.ToTopDocs();
}
int absFullPage = Math.Abs(pageSize + start); // need to protect against ridicilously high values of pageSize + start that overflow
var minPageSize = Math.Max(absFullPage, 1);
// NOTE: We get Start + Pagesize results back so we have something to page on
if (sort != null)
{
try
{
//indexSearcher.SetDefaultFieldSortScoring (sort.GetSort().Contains(SortField.FIELD_SCORE), false);
indexSearcher.SetDefaultFieldSortScoring(true, false);
var ret = indexSearcher.Search(luceneQuery, null, minPageSize, sort);
return ret;
}
finally
{
indexSearcher.SetDefaultFieldSortScoring(false, false);
}
}
return indexSearcher.Search(luceneQuery, null, minPageSize);
}
示例5: GetIndex
// create an index of all the documents, or just the x, or just the y documents
private Searcher GetIndex(bool even, bool odd)
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(1000);
for (int i = 0; i < data.Length; ++i)
{
if (((i % 2) == 0 && even) || ((i % 2) == 1 && odd))
{
Document doc = new Document();
doc.Add(new Field("tracer", data[i][0], Field.Store.YES, Field.Index.NO));
doc.Add(new Field("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
if (data[i][2] != null)
doc.Add(new Field("int", data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][3] != null)
doc.Add(new Field("float", data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][4] != null)
doc.Add(new Field("string", data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][5] != null)
doc.Add(new Field("custom", data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][6] != null)
doc.Add(new Field("i18n", data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][7] != null)
doc.Add(new Field("long", data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][8] != null)
doc.Add(new Field("double", data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][9] != null)
doc.Add(new Field("short", data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][10] != null)
doc.Add(new Field("byte", data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][11] != null)
doc.Add(new Field("parser", data[i][11], Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.SetBoost(2); // produce some scores above 1.0
writer.AddDocument(doc);
}
}
//writer.optimize ();
writer.Close();
IndexSearcher s = new IndexSearcher(indexStore);
s.SetDefaultFieldSortScoring(true, true);
return s;
}
示例6: Create
//.........这里部分代码省略.........
doc.Add(doubleField);
doc2.Add(doubleField);
// we use two diff string fields so our FieldCache usage
// is less suspicious to cache inspection
Field stringField = new Field("string", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringField);
Field stringFieldIdx = new Field("stringIdx", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringFieldIdx);
// doc2 doesn't have stringField or stringFieldIdx, so we get nulls
for (int i = 0; i < NUM_DOCS; i++)
{
id.SetValue("" + i);
if (i % 1000 == 0)
{
contents.SetValue("a b c z");
}
else if (i % 100 == 0)
{
contents.SetValue("a b c y");
}
else if (i % 10 == 0)
{
contents.SetValue("a b c x");
}
else
{
contents.SetValue("a b c");
}
byteField.SetValue("" + NextInt((sbyte) System.SByte.MinValue, (sbyte) System.SByte.MaxValue));
if (NextInt(10) == 3)
{
shortField.SetValue("" + System.Int16.MinValue);
}
else if (NextInt(10) == 7)
{
shortField.SetValue("" + System.Int16.MaxValue);
}
else
{
shortField.SetValue("" + NextInt(System.Int16.MinValue, System.Int16.MaxValue));
}
if (NextInt(10) == 3)
{
intField.SetValue("" + System.Int32.MinValue);
}
else if (NextInt(10) == 7)
{
intField.SetValue("" + System.Int32.MaxValue);
}
else
{
intField.SetValue("" + this.r.Next());
}
if (NextInt(10) == 3)
{
longField.SetValue("" + System.Int64.MinValue);
}
else if (NextInt(10) == 7)
{
longField.SetValue("" + System.Int64.MaxValue);
}
else
{
longField.SetValue("" + this.r.Next(System.Int32.MaxValue));
}
floatField.SetValue("" + (float) this.r.NextDouble());
doubleField.SetValue("" + this.r.NextDouble());
if (i % 197 == 0)
{
writer.AddDocument(doc2);
}
else
{
System.String r = RandomString(NextInt(20));
stringField.SetValue(r);
stringFieldIdx.SetValue(r);
writer.AddDocument(doc);
}
}
writer.Close();
searcherMultiSegment = new IndexSearcher(dir);
searcherMultiSegment.SetDefaultFieldSortScoring(true, true);
dir2 = new MockRAMDirectory(dir);
writer = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.Optimize();
writer.Close();
searcherSingleSegment = new IndexSearcher(dir2);
searcherSingleSegment.SetDefaultFieldSortScoring(true, true);
dir3 = new MockRAMDirectory(dir);
writer = new IndexWriter(dir3, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.Optimize(3);
writer.Close();
searcherFewSegment = new IndexSearcher(dir3);
searcherFewSegment.SetDefaultFieldSortScoring(true, true);
}
示例7: ExecuteQuery
private TopDocs ExecuteQuery(IndexSearcher indexSearcher, Query luceneQuery, int start, int pageSize,
IndexQuery indexQuery)
{
var sort = indexQuery.GetSort(parent.indexDefinition);
if (pageSize == Int32.MaxValue) // we want all docs
{
var gatherAllCollector = new GatherAllCollector();
indexSearcher.Search(luceneQuery, gatherAllCollector);
return gatherAllCollector.ToTopDocs();
}
var minPageSize = Math.Max(pageSize + start, 1);
// NOTE: We get Start + Pagesize results back so we have something to page on
if (sort != null)
{
try
{
//indexSearcher.SetDefaultFieldSortScoring (sort.GetSort().Contains(SortField.FIELD_SCORE), false);
indexSearcher.SetDefaultFieldSortScoring(true, false);
var ret = indexSearcher.Search(luceneQuery, null, minPageSize, sort);
return ret;
}
finally
{
indexSearcher.SetDefaultFieldSortScoring(false, false);
}
}
return indexSearcher.Search(luceneQuery, null, minPageSize);
}
示例8: TestNormalization
private void TestNormalization(int nDocs, System.String message)
{
Query query = new TermQuery(new Term("contents", "doc0"));
RAMDirectory ramDirectory1;
IndexSearcher indexSearcher1;
ScoreDoc[] hits;
ramDirectory1 = new MockRAMDirectory();
// First put the documents in the same index
InitIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
InitIndex(ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1 = new IndexSearcher(ramDirectory1);
indexSearcher1.SetDefaultFieldSortScoring(true, true);
hits = indexSearcher1.Search(query, null, 1000).scoreDocs;
Assert.AreEqual(2, hits.Length, message);
// Store the scores for use later
float[] scores = new float[]{hits[0].score, hits[1].score};
Assert.IsTrue(scores[0] > scores[1], message);
indexSearcher1.Close();
ramDirectory1.Close();
hits = null;
RAMDirectory ramDirectory2;
IndexSearcher indexSearcher2;
ramDirectory1 = new MockRAMDirectory();
ramDirectory2 = new MockRAMDirectory();
// Now put the documents in a different index
InitIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
InitIndex(ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1 = new IndexSearcher(ramDirectory1);
indexSearcher1.SetDefaultFieldSortScoring(true, true);
indexSearcher2 = new IndexSearcher(ramDirectory2);
indexSearcher2.SetDefaultFieldSortScoring(true, true);
Searcher searcher = GetMultiSearcherInstance(new Searcher[]{indexSearcher1, indexSearcher2});
hits = searcher.Search(query, null, 1000).scoreDocs;
Assert.AreEqual(2, hits.Length, message);
// The scores should be the same (within reason)
Assert.AreEqual(scores[0], hits[0].score, 1e-6, message); // This will a document from ramDirectory1
Assert.AreEqual(scores[1], hits[1].score, 1e-6, message); // This will a document from ramDirectory2
// Adding a Sort.RELEVANCE object should not change anything
hits = searcher.Search(query, null, 1000, Sort.RELEVANCE).scoreDocs;
Assert.AreEqual(2, hits.Length, message);
Assert.AreEqual(scores[0], hits[0].score, 1e-6, message); // This will a document from ramDirectory1
Assert.AreEqual(scores[1], hits[1].score, 1e-6, message); // This will a document from ramDirectory2
searcher.Close();
ramDirectory1.Close();
ramDirectory2.Close();
}
示例9: ExecuteQuery
private TopDocs ExecuteQuery(IndexSearcher indexSearcher, Query luceneQuery, int start, int pageSize,
IndexQuery indexQuery)
{
if (pageSize == Int32.MaxValue) // we want all docs
{
var gatherAllCollector = new GatherAllCollector();
indexSearcher.Search(luceneQuery, gatherAllCollector);
return gatherAllCollector.ToTopDocs();
}
var minPageSize = Math.Max(pageSize + start, 1);
if (indexQuery.SortedFields != null || indexQuery.SortedFields.Count() > 0)
{
if (indexQuery.SortByAggregation == SortFieldAggregation.UseInOrder)
{
Sort sort = indexQuery.GetSort(parent.indexDefinition);
// NOTE: We get Start + Pagesize results back so we have something to page on
if (sort != null)
{
try
{
//indexSearcher.SetDefaultFieldSortScoring (sort.GetSort().Contains(SortField.FIELD_SCORE), false);
indexSearcher.SetDefaultFieldSortScoring(true, false);
var ret = indexSearcher.Search(luceneQuery, null, minPageSize, sort);
return ret;
}
finally
{
indexSearcher.SetDefaultFieldSortScoring(false, false);
}
}
}
else if (indexQuery.SortByAggregation == SortFieldAggregation.UseMaximum
|| indexQuery.SortByAggregation == SortFieldAggregation.UseMinimum)
{
var minMaxSortAggregationQuery = new AggregateFieldsScoreQuery(
luceneQuery,
indexQuery.SortedFields.Select(f => f.Field).ToArray(),
indexQuery.SortByAggregation);
return indexSearcher.Search(minMaxSortAggregationQuery, null, minPageSize);
}
}
return indexSearcher.Search(luceneQuery, null, minPageSize);
}