本文整理汇总了C#中Lucene.Net.Search.IndexSearcher.Search方法的典型用法代码示例。如果您正苦于以下问题:C# IndexSearcher.Search方法的具体用法?C# IndexSearcher.Search怎么用?C# IndexSearcher.Search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Search.IndexSearcher
的用法示例。
在下文中一共展示了IndexSearcher.Search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Search
public ActionResult Search(string query)
{
ViewData["Message"] = "query : " + query;
var searcher = new IndexSearcher(
new Lucene.Net.Store.SimpleFSDirectory(new DirectoryInfo(Configuration.IndexDirectory)),
readOnly: true);
var fieldsToSearchIn = new[] {Configuration.Fields.Name, Configuration.Fields.Description};
var queryanalizer = new MultiFieldQueryParser(Version.LUCENE_CURRENT,
fieldsToSearchIn,
new BrazilianAnalyzer());
var numberOfResults = 10;
var top10Results = searcher.Search(queryanalizer.Parse(query), numberOfResults);
var docs = new List<DocumentViewModel>();
foreach (var scoreDoc in top10Results.scoreDocs)
{
var document = searcher.Doc(scoreDoc.doc);
var name = document.GetField(Configuration.Fields.Name).StringValue();
var description = document.GetField(Configuration.Fields.Description).StringValue();
var link = document.GetField(Configuration.Fields.Link).StringValue();
docs.Add(new DocumentViewModel(name, description, link));
}
return View(new SearchViewModel(docs));
}
示例2: TestBasic
public virtual void TestBasic()
{
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "value", Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Close();
TermQuery termQuery = new TermQuery(new Term("field", "value"));
// should not throw exception with primitive query
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocs hits = searcher.Search(new MatchAllDocsQuery(), qwf, 10);
Assert.AreEqual(1, hits.totalHits);
// should not throw exception with complex primitive query
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.Add(termQuery, Occur.MUST);
booleanQuery.Add(new TermQuery(new Term("field", "missing")), Occur.MUST_NOT);
qwf = new QueryWrapperFilter(termQuery);
hits = searcher.Search(new MatchAllDocsQuery(), qwf, 10);
Assert.AreEqual(1, hits.totalHits);
// should not throw exception with non primitive Query (doesn't implement
// Query#createWeight)
qwf = new QueryWrapperFilter(new FuzzyQuery(new Term("field", "valu")));
hits = searcher.Search(new MatchAllDocsQuery(), qwf, 10);
Assert.AreEqual(1, hits.totalHits);
}
示例3: TestQuery
public virtual void TestQuery()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true);
AddDoc("one", iw);
AddDoc("two", iw);
AddDoc("three four", iw);
iw.Close();
IndexSearcher is_Renamed = new IndexSearcher(dir);
Hits hits = is_Renamed.Search(new MatchAllDocsQuery());
Assert.AreEqual(3, hits.Length());
// some artificial queries to trigger the use of skipTo():
BooleanQuery bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq);
Assert.AreEqual(3, hits.Length());
bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new TermQuery(new Term("key", "three")), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq);
Assert.AreEqual(1, hits.Length());
// delete a document:
is_Renamed.GetIndexReader().DeleteDocument(0);
hits = is_Renamed.Search(new MatchAllDocsQuery());
Assert.AreEqual(2, hits.Length());
is_Renamed.Close();
}
示例4: TestTermSearch
public void TestTermSearch()
{
using (var dir = FSDirectory.Open(TestEnvironment.TestIndexDirectory))
using (var indexSearcher = new IndexSearcher(dir))
{
var termSubjectAnt = new Term("subject", "ant");
var termQuerySubjectAnt = new TermQuery(termSubjectAnt);
var topDocsSubjectAnt = indexSearcher.Search(termQuerySubjectAnt, 10);
// title=Ant in Action
// subject=apache ant build tool junit java development
Assert.Equal(1, topDocsSubjectAnt.TotalHits);
var termSubjectJUnit = new Term("subject", "junit");
var termQuerySubjectJUnit = new TermQuery(termSubjectJUnit);
var topDocsSubjectJUnit = indexSearcher.Search(termQuerySubjectJUnit, 10);
// ExplainResults(indexSearcher, termQuerySubjectJUnit, topDocsSubjectJUnit);
// title=JUnit in Action, Second Edition
// subject=junit unit testing mock objects
// title=Ant in Action
// subject=apache ant build tool junit java development
Assert.Equal(2, topDocsSubjectJUnit.TotalHits); // Ants in Action, "JUnit in Action, Second Edition"
}
}
示例5: _search
// main search method
private static IEnumerable<SampleData> _search(string searchQuery, string searchField = "") {
// validation
if (string.IsNullOrEmpty(searchQuery.Replace("*", "").Replace("?", ""))) return new List<SampleData>();
// set up lucene searcher
using (var searcher = new IndexSearcher(_directory, false)) {
var hits_limit = 1000;
var analyzer = new StandardAnalyzer(Version.LUCENE_30);
// search by single field
if (!string.IsNullOrEmpty(searchField)) {
var parser = new QueryParser(Version.LUCENE_30, searchField, analyzer);
var query = parseQuery(searchQuery, parser);
var hits = searcher.Search(query, hits_limit).ScoreDocs;
var results = _mapLuceneToDataList(hits, searcher);
analyzer.Close();
searcher.Dispose();
return results;
}
// search by multiple fields (ordered by RELEVANCE)
else {
var parser = new MultiFieldQueryParser
(Version.LUCENE_30, new[] {"Id", "Name", "Description"}, analyzer);
var query = parseQuery(searchQuery, parser);
var hits = searcher.Search(query, null, hits_limit, Sort.INDEXORDER).ScoreDocs;
var results = _mapLuceneToDataList(hits, searcher);
analyzer.Close();
searcher.Dispose();
return results;
}
}
}
示例6: TestBefore
public virtual void TestBefore()
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
long now = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
Document doc = new Document();
// add time that is in the past
doc.Add(new Field("datefield", DateTools.TimeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// filter that should preserve matches
//DateFilter df1 = DateFilter.Before("datefield", now);
TermRangeFilter df1 = new TermRangeFilter("datefield", DateTools.TimeToString(now - 2000, DateTools.Resolution.MILLISECOND), DateTools.TimeToString(now, DateTools.Resolution.MILLISECOND), false, true);
// filter that should discard matches
//DateFilter df2 = DateFilter.Before("datefield", now - 999999);
TermRangeFilter df2 = new TermRangeFilter("datefield", DateTools.TimeToString(0, DateTools.Resolution.MILLISECOND), DateTools.TimeToString(now - 2000, DateTools.Resolution.MILLISECOND), true, false);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
ScoreDoc[] result;
// ensure that queries return expected results without DateFilter first
result = searcher.Search(query1, null, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query2, null, 1000).ScoreDocs;
Assert.AreEqual(1, result.Length);
// run queries with DateFilter
result = searcher.Search(query1, df1, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query1, df2, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query2, df1, 1000).ScoreDocs;
Assert.AreEqual(1, result.Length);
result = searcher.Search(query2, df2, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
}
示例7: Find
/// <summary>
/// Searches the index.
/// </summary>
/// <param name="queryText"></param>
/// <param name="keywordFilter">A Hashtable where the key is the fieldname of the keyword and
/// the value the keyword itself.</param>
/// <param name="pageIndex"></param>
/// <param name="pageSize"></param>
/// <returns></returns>
public SearchResultCollection Find(string queryText, Hashtable keywordFilter, int pageIndex, int pageSize)
{
long startTicks = DateTime.Now.Ticks;
Hits hits;
try
{
Query query = MultiFieldQueryParser.Parse(queryText, new string[] {"title", "contents"}, new StandardAnalyzer());
IndexSearcher searcher = new IndexSearcher(this._indexDirectory);
if (keywordFilter != null && keywordFilter.Count > 0)
{
QueryFilter qf = BuildQueryFilterFromKeywordFilter(keywordFilter);
hits = searcher.Search(query, qf);
}
else
{
hits = searcher.Search(query);
}
int start = pageIndex*pageSize;
int end = (pageIndex + 1)*pageSize;
if (hits.Length() <= end)
{
end = hits.Length();
}
SearchResultCollection results = new SearchResultCollection();
results.TotalCount = hits.Length();
results.PageIndex = pageIndex;
for (int i = start; i < end; i++)
{
SearchResult result = new SearchResult();
result.Title = hits.Doc(i).Get("title");
result.Summary = hits.Doc(i).Get("summary");
result.Author = hits.Doc(i).Get("author");
result.ModuleType = hits.Doc(i).Get("moduletype");
result.Path = hits.Doc(i).Get("path");
result.Category = hits.Doc(i).Get("category");
result.DateCreated = DateTime.Parse((hits.Doc(i).Get("datecreated")));
result.Score = hits.Score(i);
result.Boost = hits.Doc(i).GetBoost();
result.SectionId = Int32.Parse(hits.Doc(i).Get("sectionid"));
results.Add(result);
}
searcher.Close();
results.ExecutionTime = DateTime.Now.Ticks - startTicks;
return results;
}
catch (Exception ex)
{
throw new SearchException("Error while performing full-text search.", ex);
}
}
示例8: TestBefore
public virtual void TestBefore()
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
long now = (long) (DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalMilliseconds;
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
// add time that is in the past
doc.Add(new Field("datefield", Lucene.Net.Documents.DateTools.TimeToString(now - 1000 * 100000, Lucene.Net.Documents.DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.Add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
//DateFilter df1 = DateFilter.Before("datefield", now);
RangeFilter df1 = new RangeFilter("datefield", Lucene.Net.Documents.DateTools.TimeToString(now - 2000 * 100000, Lucene.Net.Documents.DateTools.Resolution.MILLISECOND), Lucene.Net.Documents.DateTools.TimeToString(now, Lucene.Net.Documents.DateTools.Resolution.MILLISECOND), false, true);
// filter that should discard matches
//DateFilter df2 = DateFilter.Before("datefield", now - 999999);
RangeFilter df2 = new RangeFilter("datefield", Lucene.Net.Documents.DateTools.TimeToString(0, Lucene.Net.Documents.DateTools.Resolution.MILLISECOND), Lucene.Net.Documents.DateTools.TimeToString(now - 2000 * 100000, Lucene.Net.Documents.DateTools.Resolution.MILLISECOND), true, false);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
Hits result;
// ensure that queries return expected results without DateFilter first
result = searcher.Search(query1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2);
Assert.AreEqual(1, result.Length());
// run queries with DateFilter
result = searcher.Search(query1, df1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query1, df2);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2, df1);
Assert.AreEqual(1, result.Length());
result = searcher.Search(query2, df2);
Assert.AreEqual(0, result.Length());
}
示例9: TestBefore
public virtual void TestBefore()
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
long now = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
Document doc = new Document();
// add time that is in the past
doc.Add(Field.Keyword("datefield", DateField.TimeToString(now - 1000)));
doc.Add(Field.Text("body", "Today is a very sunny day in New York City"));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
DateFilter df1 = DateFilter.Before("datefield", now);
// filter that should discard matches
DateFilter df2 = DateFilter.Before("datefield", now - 999999);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
Hits result;
// ensure that queries return expected results without DateFilter first
result = searcher.Search(query1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2);
Assert.AreEqual(1, result.Length());
// run queries with DateFilter
result = searcher.Search(query1, df1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query1, df2);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2, df1);
Assert.AreEqual(1, result.Length());
result = searcher.Search(query2, df2);
Assert.AreEqual(0, result.Length());
}
示例10: Initialize_Indexes_All_Nodes
public void Initialize_Indexes_All_Nodes()
{
string elementIdForTestingSearch = _deepNodeFinder.GetNodesForIndexing()[0].Id;
int expectedNumNodes = _deepNodeFinder.GetNodesForIndexing().Length;
Assert.AreEqual("usfr-pte_NetCashFlowsProvidedUsedOperatingActivitiesDirectAbstract", elementIdForTestingSearch,
"TEST SANITY: element id for test search");
Assert.AreEqual(1595, expectedNumNodes, "TEST SANITY: Number of nodes in found in the test taxonomy");
IndexReader indexReader = IndexReader.Open(_indexMgr.LuceneDirectory_ForTesting);
Assert.AreEqual(expectedNumNodes, indexReader.NumDocs(),
"An incorrect number of documents were found in the Lucene directory after initialization");
IndexSearcher searcher = new IndexSearcher(_indexMgr.LuceneDirectory_ForTesting);
try
{
Hits results =
searcher.Search(new TermQuery(new Term(LuceneNodeIndexer.ELEMENTID_FOR_DELETING_FIELD, elementIdForTestingSearch)));
Assert.AreEqual(1, results.Length(), "Search results should only have 1 hit");
Assert.AreEqual(elementIdForTestingSearch, results.Doc(0).Get(LuceneNodeIndexer.ELEMENTID_FIELD),
"Search results yielded the wrong element!");
}
finally
{
searcher.Close();
}
}
示例11: SearchCore
private static IList<int> SearchCore(SearchFilter searchFilter, out int totalHits)
{
if (!Directory.Exists(LuceneCommon.IndexDirectory))
{
totalHits = 0;
return new int[0];
}
SortField sortField = GetSortField(searchFilter);
int numRecords = searchFilter.Skip + searchFilter.Take;
using (var directory = new LuceneFileSystem(LuceneCommon.IndexDirectory))
{
var searcher = new IndexSearcher(directory, readOnly: true);
var query = ParseQuery(searchFilter);
var filterTerm = searchFilter.IncludePrerelease ? "IsLatest" : "IsLatestStable";
var termQuery = new TermQuery(new Term(filterTerm, Boolean.TrueString));
Filter filter = new QueryWrapperFilter(termQuery);
var results = searcher.Search(query, filter: filter, n: numRecords, sort: new Sort(sortField));
var keys = results.scoreDocs.Skip(searchFilter.Skip)
.Select(c => ParseKey(searcher.Doc(c.doc).Get("Key")))
.ToList();
totalHits = results.totalHits;
searcher.Close();
return keys;
}
}
示例12: searchLucene
public Data searchLucene(Data data)
{
Search_gl search = new Search_gl();
List<string> item = new List<string>();
Lucene.Net.Store.Directory directory = FSDirectory.Open(new DirectoryInfo(Environment.CurrentDirectory + "\\LuceneIndex"));
var analyzer = new StandardAnalyzer(Version.LUCENE_29);
IndexReader reader = IndexReader.Open(directory, true);
IndexSearcher searcher = new IndexSearcher(reader);
//QueryParser queryParser = new QueryParser(Version.LUCENE_29, "summary", analyzer); //search for single field
MultiFieldQueryParser parser = new MultiFieldQueryParser(new string[] {"name", "summary"}, analyzer); //search for multifield
Query query = parser.Parse((data.getString("search")) + "*"); //cant search blank text with wildcard as first character
TopScoreDocCollector collector = TopScoreDocCollector.create(1000, true);
searcher.Search(query, collector);
ScoreDoc[] hits = collector.TopDocs().ScoreDocs;
int count = hits.Length;
for (int i = 0; i < count; i++)
{
int docId = hits[i].doc;
float score = hits[i].score;
Document doc = searcher.Doc(docId);
string id = doc.Get("id");
item.Add(id);
}
Data list = search.search(data, item.ToArray());
reader.Close();
searcher.Close();
return list;
}
示例13: searchLucene
public Data searchLucene(Data data)
{
Account_lg account = new Account_lg();
List<string> item = new List<string>();
Lucene.Net.Store.Directory directory = FSDirectory.Open(new DirectoryInfo("C:\\Visual Studio 2010\\Transaction" + "\\LuceneIndex"));
var analyzer = new StandardAnalyzer(Version.LUCENE_29);
IndexReader reader = IndexReader.Open(directory, true);
IndexSearcher searcher = new IndexSearcher(reader);
MultiFieldQueryParser parser = new MultiFieldQueryParser(Lucene.Net.Util.Version.LUCENE_29, new string[] { "name", "username" }, analyzer); //search for multifield
Query query = parser.Parse((data.getString("search")) + "*"); //cant search blank text with wildcard as first character
TopScoreDocCollector collector = TopScoreDocCollector.Create(1000, true);
searcher.Search(query, collector);
ScoreDoc[] hits = collector.TopDocs().ScoreDocs;
int count = hits.Length;
for (int i = 0; i < count; i++)
{
int docId = hits[i].Doc;
float score = hits[i].Score;
Document doc = searcher.Doc(docId);
string id = doc.Get("id");
item.Add(id);
}
Data list = account.selectUser(data, item.ToArray());
reader.Dispose();
searcher.Dispose();
return list;
}
示例14: HandleRangeFacet
private void HandleRangeFacet(string index, Facet facet, IndexQuery indexQuery, IndexSearcher currentIndexSearcher, Dictionary<string, IEnumerable<FacetValue>> results)
{
var rangeResults = new List<FacetValue>();
foreach (var range in facet.Ranges)
{
var baseQuery = database.IndexStorage.GetLuceneQuery(index, indexQuery, database.IndexQueryTriggers);
//TODO the built-in parser can't handle [NULL TO 100.0}, i.e. a mix of [ and }
//so we need to handle this ourselves (greater and less-than-or-equal)
var rangeQuery = database.IndexStorage.GetLuceneQuery(index, new IndexQuery
{
Query = facet.Name + ":" + range
}, database.IndexQueryTriggers);
var joinedQuery = new BooleanQuery();
joinedQuery.Add(baseQuery, BooleanClause.Occur.MUST);
joinedQuery.Add(rangeQuery, BooleanClause.Occur.MUST);
var topDocs = currentIndexSearcher.Search(joinedQuery, null, 1);
if (topDocs.TotalHits > 0)
{
rangeResults.Add(new FacetValue
{
Count = topDocs.TotalHits,
Range = range
});
}
}
results[facet.Name] = rangeResults;
}
示例15: Search
public ProjectData[] Search(string searchTerm)
{
IndexSearcher searcher = new IndexSearcher(luceneIndexDirectory);
IntegralCollector searcherCollector = new IntegralCollector();
// Setup the fields to search through
string[] searchfields = new string[] { "name", "vessel" };
// Build our booleanquery that will be a combination of all the queries for each individual search term
var finalQuery = new BooleanQuery();
var parser = new MultiFieldQueryParser(Lucene.Net.Util.Version.LUCENE_30, searchfields, analyzer);
// Split the search string into separate search terms by word
string[] terms = searchTerm.Split(new[] { " " }, StringSplitOptions.RemoveEmptyEntries);
foreach (string term in terms)
finalQuery.Add(parser.Parse(term.Replace("~", "") + "~"),Occur.SHOULD);
searcher.Search(finalQuery, searcherCollector);
var results = new ProjectData[searcherCollector.Docs.Count];
for (int i = 0; i < searcherCollector.Docs.Count; i++)
{
var doc = searcher.Doc(searcherCollector.Docs[i]);
results[i] = new ProjectData(doc.Get("name"), doc.Get("vessel"));
}
return results;
}