本文整理汇总了C#中Lucene.Net.Search.Query.Weight方法的典型用法代码示例。如果您正苦于以下问题:C# Query.Weight方法的具体用法?C# Query.Weight怎么用?C# Query.Weight使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Search.Query
的用法示例。
在下文中一共展示了Query.Weight方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Hits
internal Hits(Searcher s, Query q, Filter f)
{
weight = q.Weight(s);
searcher = s;
filter = f;
GetMoreDocs(50); // retrieve 100 initially
}
示例2: Hits
public /*internal*/ bool debugCheckedForDeletions = false; // for test purposes.
internal Hits(Searcher s, Query q, Filter f)
{
weight = q.Weight(s);
searcher = s;
filter = f;
nDeletions = CountDeletions(s);
GetMoreDocs(50); // retrieve 100 initially
lengthAtStart = length;
}
示例3: CreateWeight
/// <summary> creates a weight for <code>query</code></summary>
/// <returns> new weight
/// </returns>
protected internal virtual Weight CreateWeight(Query query)
{
return query.Weight(this);
}
示例4: CreateWeight
/// <summary> creates a weight for <c>query</c></summary>
/// <returns> new weight
/// </returns>
/*protected internal*/
public virtual Weight CreateWeight(Query query)
{
return query.Weight(this);
}
示例5: CheckFirstSkipTo
// check that first skip on just created scorers always goes to the right doc
private static void CheckFirstSkipTo(Query q, IndexSearcher s)
{
//System.out.println("checkFirstSkipTo: "+q);
float maxDiff = 1e-5f;
int[] lastDoc = new int[]{- 1};
s.Search(q, new AnonymousClassHitCollector1(lastDoc, q, s, maxDiff));
Weight w = q.Weight(s);
Scorer scorer = w.Scorer(s.GetIndexReader());
bool more = scorer.SkipTo(lastDoc[0] + 1);
if (more)
Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.Doc());
}
示例6: CheckSkipTo
/// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
/// and ensure a hitcollector receives same docs and scores
/// </summary>
public static void CheckSkipTo(Query q, IndexSearcher s)
{
//System.out.println("Checking "+q);
if (BooleanQuery.GetAllowDocsOutOfOrder())
return ; // in this case order of skipTo() might differ from that of next().
int skip_op = 0;
int next_op = 1;
int[][] orders = new int[][]{new int[]{next_op}, new int[]{skip_op}, new int[]{skip_op, next_op}, new int[]{next_op, skip_op}, new int[]{skip_op, skip_op, next_op, next_op}, new int[]{next_op, next_op, skip_op, skip_op}, new int[]{skip_op, skip_op, skip_op, next_op, next_op}};
for (int k = 0; k < orders.Length; k++)
{
int[] order = orders[k];
//System.out.print("Order:");for (int i = 0; i < order.length; i++) System.out.print(order[i]==skip_op ? " skip()":" next()"); System.out.println();
int[] opidx = new int[]{0};
Weight w = q.Weight(s);
Scorer scorer = w.Scorer(s.GetIndexReader());
// FUTURE: ensure scorer.doc()==-1
int[] sdoc = new int[]{- 1};
float maxDiff = 1e-5f;
s.Search(q, new AnonymousClassHitCollector(order, opidx, skip_op, scorer, sdoc, maxDiff, q, s));
// make sure next call to scorer is false.
int op = order[(opidx[0]++) % order.Length];
//System.out.println(op==skip_op ? "last: skip()":"last: next()");
bool more = op == skip_op?scorer.SkipTo(sdoc[0] + 1):scorer.Next();
Assert.IsFalse(more);
}
}
示例7: CheckSerialization
/// <summary>check that the query weight is serializable. </summary>
/// <throws> IOException if serialization check fail. </throws>
private static void CheckSerialization(Query q, Searcher s)
{
Weight w = q.Weight(s);
try
{
System.IO.MemoryStream bos = new System.IO.MemoryStream();
System.IO.BinaryWriter oos = new System.IO.BinaryWriter(bos);
System.Runtime.Serialization.Formatters.Binary.BinaryFormatter formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();
formatter.Serialize(oos.BaseStream, w);
oos.Close();
System.IO.BinaryReader ois = new System.IO.BinaryReader(new System.IO.MemoryStream(bos.ToArray()));
formatter.Deserialize(ois.BaseStream);
ois.Close();
//skip rquals() test for now - most weights don't overide equals() and we won't add this just for the tests.
//TestCase.assertEquals("writeObject(w) != w. ("+w+")",w2,w);
}
catch (System.Exception e)
{
System.IO.IOException e2 = new System.IO.IOException("Serialization failed for " + w, e);
throw e2;
}
}
示例8: CheckFirstSkipTo
// check that first skip on just created scorers always goes to the right doc
private static void CheckFirstSkipTo(Query q, IndexSearcher s)
{
//System.out.println("checkFirstSkipTo: "+q);
float maxDiff = 1e-4f; //{{Lucene.Net-2.9.1}}Intentional diversion from Java Lucene
int[] lastDoc = new int[]{- 1};
IndexReader[] lastReader = {null};
s.Search(q, new AnonymousClassCollector1(lastDoc, q, s, maxDiff, lastReader));
if(lastReader[0] != null)
{
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
IndexReader previousReader = lastReader[0];
Weight w = q.Weight(new IndexSearcher(previousReader));
Scorer scorer = w.Scorer(previousReader, true, false);
if (scorer != null)
{
bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
}
}
}
示例9: CheckSkipTo
/// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
/// and ensure a hitcollector receives same docs and scores
/// </summary>
public static void CheckSkipTo(Query q, IndexSearcher s)
{
//System.out.println("Checking "+q);
if (q.Weight(s).GetScoresDocsOutOfOrder())
return ; // in this case order of skipTo() might differ from that of next().
int skip_op = 0;
int next_op = 1;
int[][] orders = new int[][]{new int[]{next_op}, new int[]{skip_op}, new int[]{skip_op, next_op}, new int[]{next_op, skip_op}, new int[]{skip_op, skip_op, next_op, next_op}, new int[]{next_op, next_op, skip_op, skip_op}, new int[]{skip_op, skip_op, skip_op, next_op, next_op}};
for (int k = 0; k < orders.Length; k++)
{
int[] order = orders[k];
// System.out.print("Order:");for (int i = 0; i < order.length; i++)
// System.out.print(order[i]==skip_op ? " skip()":" next()");
// System.out.println();
int[] opidx = new int[]{0};
int[] lastDoc = new[] {-1};
// FUTURE: ensure scorer.doc()==-1
float maxDiff = 1e-5f;
IndexReader[] lastReader = new IndexReader[] {null};
s.Search(q, new AnonymousClassCollector(order, opidx, skip_op, lastReader, maxDiff, q, s, lastDoc));
if (lastReader[0] != null)
{
// Confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
IndexReader previousReader = lastReader[0];
Weight w = q.Weight(new IndexSearcher(previousReader));
Scorer scorer = w.Scorer(previousReader, true, false);
if (scorer != null)
{
bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
}
}
}
}
示例10: SearchPage
public static List<SearchRecord> SearchPage(out Query query, out Dictionary<string, int> statistics,List<string> filterList,int pageSize, int pageNum,bool fileInclude,bool highLight)
{
List<SearchRecord> recordList = new List<SearchRecord>();
query = GetQuery(fileInclude);
statistics = new Dictionary<string, int>();
try
{
#region Add Index Dir
//SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "begin to init searcher.");
List<IndexSearcher> searcherList = new List<IndexSearcher>();
if (searchIndexList.Count > 0)
{
foreach (IndexSet indexSet in searchIndexList)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
searcherList.Add(new IndexSearcher(indexSet.Path));
}
}
else
{
foreach (IndexSet indexSet in indexFieldsDict.Keys)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
searcherList.Add(new IndexSearcher(indexSet.Path));
}
}
if (fileInclude)
{
searcherList.Add(new IndexSearcher(fileSet.Path));
}
#endregion
//SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "begin to Search.");
ParallelMultiSearcher searcher = new ParallelMultiSearcher(searcherList.ToArray());
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
Highlighter highlighter = new Highlighter(new QueryScorer(query));
highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
#region Order by Score
//SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "Add to list.");
List<ScoreDoc> scoreDocList = new List<ScoreDoc>();
for (int i = 0; i < scoreDocs.Length; i++)
{
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
scoreDocList.Add(scoreDocs[i]);
}
//SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "Begin to sort.");
scoreDocList.Sort(delegate(ScoreDoc x, ScoreDoc y)
{
if (x.score > y.score)
return -1;
else if (x.score == y.score)
return 0;
else
return 1;
});
//SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "End sort.");
#endregion
#region Doc Statistic
int start = 0, end = scoreDocList.Count;
if (pageSize > 0 && pageNum >= 1)
{
start = pageSize * (pageNum - 1)+1;
end = pageNum * pageSize;
}
int current = 1;
SpecialFieldSelector sfSelector = new SpecialFieldSelector(SupportClass.TableFileNameField);
for (int recNum = 0; recNum < scoreDocList.Count; recNum++)
{
float score = scoreDocList[recNum].score;
if (score < searchSet.MinScore)
continue;
Document fDoc = searcher.Doc(scoreDocList[recNum].doc,sfSelector);
string caption = fDoc.Get(SupportClass.TableFileNameField);
if ((caption.Equals(SupportClass.TFNFieldValue) == false))
{
if (sfpDict.ContainsKey(caption) == false || nameIndexDict.ContainsKey(caption) == false)
{
continue;
}
}
if (statistics.ContainsKey(caption))
{
statistics[caption] = statistics[caption] + 1;
}
else
{
statistics.Add(caption, 1);
}
if (filterList != null && filterList.Count>0)
{
if (!filterList.Contains(caption))
continue;
}
#region Add Page
if (current >= start && current <= end)
{
//.........这里部分代码省略.........
示例11: HighLightSearch
public static List<SearchRecord> HighLightSearch(out Query query)
{
List<SearchRecord> recordList = new List<SearchRecord>();
query = GetQuery();
try
{
if (searchIndexList.Count > 0)
{
foreach (IndexSet indexSet in searchIndexList)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
Source source = indexDict[indexSet];
Dictionary<string, IndexField> fpDict = source.FieldDict;
//IndexSearcher searcher = new IndexSearcher(indexSet.Path);
IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
System.Console.WriteLine(query.ToString());
#endif
Highlighter highlighter = new Highlighter(new QueryScorer(query));
highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < scoreDocs.Length; i++)
{
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
Document doc = searcher.Doc(scoreDocs[i].doc);
Field[] fields = new Field[doc.GetFields().Count];
doc.GetFields().CopyTo(fields, 0);
List<SearchField> sfList = new List<SearchField>();
foreach (Field field in fields)
{
string key = field.Name();
string value = field.StringValue();
string output = SupportClass.String.DropHTML(value);
TokenStream tokenStream = analyzer.TokenStream(key, new System.IO.StringReader(output));
string result = "";
result = highlighter.GetBestFragment(tokenStream, output);
if (result != null && string.IsNullOrEmpty(result.Trim()) == false)
{
if (fpDict.ContainsKey(key))
sfList.Add(new SearchField(key, fpDict[key].Caption,value, result, field.GetBoost(), fpDict[key].IsTitle,true,fpDict[key].Order));
else
sfList.Add(new SearchField(key, key,value, result, field.GetBoost(), false,false,0));
}
else
{
if (fpDict.ContainsKey(key))
sfList.Add(new SearchField(key, fpDict[key].Caption, value, value, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
else
sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
}
}
recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
}
}
}
else
{
foreach (IndexSet indexSet in indexFieldsDict.Keys)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
Source source = indexDict[indexSet];
Dictionary<string, IndexField> fpDict = source.FieldDict;
//IndexSearcher searcher = new IndexSearcher(indexSet.Path);
IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
System.Console.WriteLine(query.ToString());
#endif
Highlighter highlighter = new Highlighter(new QueryScorer(query));
highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < scoreDocs.Length; i++)
{
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
Document doc = searcher.Doc(scoreDocs[i].doc);
Field[] fields = new Field[doc.GetFields().Count];
doc.GetFields().CopyTo(fields, 0);
List<SearchField> sfList = new List<SearchField>();
foreach (Field field in fields)
{
string key = field.Name();
string value = field.StringValue();
string output = SupportClass.String.DropHTML(value);
TokenStream tokenStream = analyzer.TokenStream(key, new System.IO.StringReader(output));
string result = "";
result = highlighter.GetBestFragment(tokenStream, output);
if (result != null && string.IsNullOrEmpty(result.Trim()) == false)
{
if (fpDict.ContainsKey(key))
sfList.Add(new SearchField(key, fpDict[key].Caption, value, result, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
else
//.........这里部分代码省略.........
示例12: SearchEx
public static List<SearchRecord> SearchEx(out Query query)
{
List<SearchRecord> recordList = new List<SearchRecord>();
query = GetQuery();
try
{
if (searchIndexList.Count > 0)
{
foreach (IndexSet indexSet in searchIndexList)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
Source source = indexDict[indexSet];
Dictionary<string, IndexField> fpDict = source.FieldDict;
//IndexSearcher searcher = new IndexSearcher(indexSet.Path);
IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
System.Console.WriteLine(query.ToString());
#endif
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < scoreDocs.Length; i++)
{
Document doc = searcher.Doc(scoreDocs[i].doc);
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
Field[] fields = new Field[doc.GetFields().Count];
doc.GetFields().CopyTo(fields, 0);
List<SearchField> sfList = new List<SearchField>();
foreach (Field field in fields)
{
if(fpDict.ContainsKey(field.Name()))
sfList.Add(new SearchField(field, fpDict[field.Name()]));
else
sfList.Add(new SearchField(field));
}
recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
}
}
}
else
{
foreach (IndexSet indexSet in indexFieldsDict.Keys)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
Source source = indexDict[indexSet];
Dictionary<string, IndexField> fpDict = source.FieldDict;
//IndexSearcher searcher = new IndexSearcher(indexSet.Path);
IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
System.Console.WriteLine(query.ToString());
#endif
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < scoreDocs.Length; i++)
{
Document doc = searcher.Doc(scoreDocs[i].doc);
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
Field[] fields = new Field[doc.GetFields().Count];
doc.GetFields().CopyTo(fields, 0);
List<SearchField> sfList = new List<SearchField>();
foreach (Field field in fields)
{
if (fpDict.ContainsKey(field.Name()))
sfList.Add(new SearchField(field, fpDict[field.Name()]));
else
sfList.Add(new SearchField(field));
}
recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
}
}
}
}
catch (Exception e)
{
SupportClass.FileUtil.WriteToLog(SupportClass.LogPath, e.StackTrace.ToString());
}
return recordList;
}
示例13: ExactFastSearch
public static List<SearchRecord> ExactFastSearch(out Query query)
{
List<SearchRecord> docList = new List<SearchRecord>();
query = null;
try
{
List<IndexReader> readerList = new List<IndexReader>();
foreach (IndexSet indexSet in searchIndexList)
{
if (indexSet.Type == IndexTypeEnum.Increment)
continue;
readerList.Add(IndexReader.Open(indexSet.Path));
}
MultiReader multiReader = new MultiReader(readerList.ToArray());
IndexSearcher searcher = new IndexSearcher(multiReader);
query = GetQuery();
#if DEBUG
System.Console.WriteLine(query.ToString());
#endif
TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < scoreDocs.Length; i++)
{
Document doc = searcher.Doc(scoreDocs[i].doc);
float score = scoreDocs[i].score;
if (score < searchSet.MinScore)
continue;
docList.Add(doc);
}
}
catch (Exception e)
{
SupportClass.FileUtil.WriteToLog(SupportClass.LogPath, e.StackTrace.ToString());
}
return docList;
}