本文整理汇总了C#中Lucene.Net.Search.IndexSearcher.GetSimilarity方法的典型用法代码示例。如果您正苦于以下问题:C# IndexSearcher.GetSimilarity方法的具体用法?C# IndexSearcher.GetSimilarity怎么用?C# IndexSearcher.GetSimilarity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Search.IndexSearcher
的用法示例。
在下文中一共展示了IndexSearcher.GetSimilarity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestKnownSetOfDocuments
public virtual void TestKnownSetOfDocuments()
{
System.String test1 = "eating chocolate in a computer lab"; //6 terms
System.String test2 = "computer in a computer lab"; //5 terms
System.String test3 = "a chocolate lab grows old"; //5 terms
System.String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
System.Collections.IDictionary test4Map = new System.Collections.Hashtable();
test4Map["chocolate"] = 3;
test4Map["lab"] = 2;
test4Map["eating"] = 1;
test4Map["computer"] = 1;
test4Map["with"] = 1;
test4Map["a"] = 1;
test4Map["colored"] = 1;
test4Map["in"] = 1;
test4Map["an"] = 1;
test4Map["computer"] = 1;
test4Map["old"] = 1;
Document testDoc1 = new Document();
SetupDoc(testDoc1, test1);
Document testDoc2 = new Document();
SetupDoc(testDoc2, test2);
Document testDoc3 = new Document();
SetupDoc(testDoc3, test3);
Document testDoc4 = new Document();
SetupDoc(testDoc4, test4);
Directory dir = new MockRAMDirectory();
try
{
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Assert.IsTrue(writer != null);
writer.AddDocument(testDoc1);
writer.AddDocument(testDoc2);
writer.AddDocument(testDoc3);
writer.AddDocument(testDoc4);
writer.Close();
IndexSearcher knownSearcher = new IndexSearcher(dir);
TermEnum termEnum = knownSearcher.reader_ForNUnit.Terms();
TermDocs termDocs = knownSearcher.reader_ForNUnit.TermDocs();
//System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
Similarity sim = knownSearcher.GetSimilarity();
while (termEnum.Next() == true)
{
Term term = termEnum.Term();
//System.out.println("Term: " + term);
termDocs.Seek(term);
while (termDocs.Next())
{
int docId = termDocs.Doc();
int freq = termDocs.Freq();
//System.out.println("Doc Id: " + docId + " freq " + freq);
TermFreqVector vector = knownSearcher.reader_ForNUnit.GetTermFreqVector(docId, "field");
float tf = sim.Tf(freq);
float idf = sim.Idf(term, knownSearcher);
//float qNorm = sim.queryNorm()
//This is fine since we don't have stop words
float lNorm = sim.LengthNorm("field", vector.GetTerms().Length);
//float coord = sim.coord()
//System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
Assert.IsTrue(vector != null);
System.String[] vTerms = vector.GetTerms();
int[] freqs = vector.GetTermFrequencies();
for (int i = 0; i < vTerms.Length; i++)
{
if (term.Text().Equals(vTerms[i]))
{
Assert.IsTrue(freqs[i] == freq);
}
}
}
//System.out.println("--------");
}
Query query = new TermQuery(new Term("field", "chocolate"));
ScoreDoc[] hits = knownSearcher.Search(query, null, 1000).ScoreDocs;
//doc 3 should be the first hit b/c it is the shortest match
Assert.IsTrue(hits.Length == 3);
float score = hits[0].score;
/*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
Assert.IsTrue(hits[0].doc == 2);
Assert.IsTrue(hits[1].doc == 3);
Assert.IsTrue(hits[2].doc == 0);
TermFreqVector vector2 = knownSearcher.reader_ForNUnit.GetTermFreqVector(hits[1].doc, "field");
Assert.IsTrue(vector2 != null);
//System.out.println("Vector: " + vector);
System.String[] terms = vector2.GetTerms();
int[] freqs2 = vector2.GetTermFrequencies();
Assert.IsTrue(terms != null && terms.Length == 10);
for (int i = 0; i < terms.Length; i++)
{
System.String term = terms[i];
//System.out.println("Term: " + term);
//.........这里部分代码省略.........
示例2: TestKnownSetOfDocuments
public virtual void TestKnownSetOfDocuments()
{
System.String[] termArray = new System.String[]{"eating", "chocolate", "in", "a", "computer", "lab", "grows", "old", "colored", "with", "an"};
System.String test1 = "eating chocolate in a computer lab"; //6 terms
System.String test2 = "computer in a computer lab"; //5 terms
System.String test3 = "a chocolate lab grows old"; //5 terms
System.String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
System.Collections.IDictionary test4Map = new System.Collections.Hashtable();
test4Map["chocolate"] = 3;
test4Map["lab"] = 2;
test4Map["eating"] = 1;
test4Map["computer"] = 1;
test4Map["with"] = 1;
test4Map["a"] = 1;
test4Map["colored"] = 1;
test4Map["in"] = 1;
test4Map["an"] = 1;
test4Map["computer"] = 1;
test4Map["old"] = 1;
Document testDoc1 = new Document();
SetupDoc(testDoc1, test1);
Document testDoc2 = new Document();
SetupDoc(testDoc2, test2);
Document testDoc3 = new Document();
SetupDoc(testDoc3, test3);
Document testDoc4 = new Document();
SetupDoc(testDoc4, test4);
Directory dir = new RAMDirectory();
try
{
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true);
Assert.IsTrue(writer != null);
writer.AddDocument(testDoc1);
writer.AddDocument(testDoc2);
writer.AddDocument(testDoc3);
writer.AddDocument(testDoc4);
writer.Close();
IndexSearcher knownSearcher = new IndexSearcher(dir);
TermEnum termEnum = knownSearcher.reader.Terms();
TermDocs termDocs = knownSearcher.reader.TermDocs();
//System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
Similarity sim = knownSearcher.GetSimilarity();
while (termEnum.Next() == true)
{
Term term = termEnum.Term();
//System.out.println("Term: " + term);
termDocs.Seek(term);
while (termDocs.Next())
{
int docId = termDocs.Doc();
int freq = termDocs.Freq();
//System.out.println("Doc Id: " + docId + " freq " + freq);
TermFreqVector vector = knownSearcher.reader.GetTermFreqVector(docId, "Field");
float tf = sim.Tf(freq);
float idf = sim.Idf(term, knownSearcher);
//float qNorm = sim.queryNorm()
//This is fine since we don't have stop words
float lNorm = sim.LengthNorm("Field", vector.GetTerms().Length);
//float coord = sim.coord()
//System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
Assert.IsTrue(vector != null);
System.String[] vTerms = vector.GetTerms();
int[] freqs = vector.GetTermFrequencies();
for (int i = 0; i < vTerms.Length; i++)
{
if (term.Text().Equals(vTerms[i]) == true)
{
Assert.IsTrue(freqs[i] == freq);
}
}
}
//System.out.println("--------");
}
Query query = new TermQuery(new Term("Field", "chocolate"));
Hits hits = knownSearcher.Search(query);
//doc 3 should be the first hit b/c it is the shortest match
Assert.IsTrue(hits.Length() == 3);
float score = hits.Score(0);
/*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
Assert.IsTrue(testDoc3.ToString().Equals(hits.Doc(0).ToString()));
Assert.IsTrue(testDoc4.ToString().Equals(hits.Doc(1).ToString()));
Assert.IsTrue(testDoc1.ToString().Equals(hits.Doc(2).ToString()));
TermFreqVector vector2 = knownSearcher.reader.GetTermFreqVector(hits.Id(1), "Field");
Assert.IsTrue(vector2 != null);
//System.out.println("Vector: " + vector);
System.String[] terms = vector2.GetTerms();
int[] freqs2 = vector2.GetTermFrequencies();
Assert.IsTrue(terms != null && terms.Length == 10);
for (int i = 0; i < terms.Length; i++)
{
System.String term = terms[i];
//.........这里部分代码省略.........
示例3: WrapUnderlyingReader
/// <summary> Given an IndexSearcher, returns a new IndexSearcher whose IndexReader
/// is a MultiReader containing the Reader of the original IndexSearcher,
/// as well as several "empty" IndexReaders -- some of which will have
/// deleted documents in them. This new IndexSearcher should
/// behave exactly the same as the original IndexSearcher.
/// </summary>
/// <param name="s">the searcher to wrap
/// </param>
/// <param name="edge">if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub
/// </param>
public static IndexSearcher WrapUnderlyingReader(IndexSearcher s, int edge)
{
IndexReader r = s.GetIndexReader();
// we can't put deleted docs before the nested reader, because
// it will throw off the docIds
IndexReader[] readers = new IndexReader[] {
edge < 0 ? r : IndexReader.Open(MakeEmptyIndex(0), true),
IndexReader.Open(MakeEmptyIndex(0), true),
new MultiReader(new IndexReader[] {
IndexReader.Open(MakeEmptyIndex(edge < 0 ? 4 : 0), true),
IndexReader.Open(MakeEmptyIndex(0), true),
0 == edge ? r : IndexReader.Open(MakeEmptyIndex(0), true)
}),
IndexReader.Open(MakeEmptyIndex(0 < edge ? 0 : 7), true),
IndexReader.Open(MakeEmptyIndex(0), true),
new MultiReader(new IndexReader[] {
IndexReader.Open(MakeEmptyIndex(0 < edge ? 0 : 5), true),
IndexReader.Open(MakeEmptyIndex(0), true),
0 < edge ? r : IndexReader.Open(MakeEmptyIndex(0), true)
})
};
IndexSearcher out_Renamed = new IndexSearcher(new MultiReader(readers));
out_Renamed.SetSimilarity(s.GetSimilarity());
return out_Renamed;
}