本文整理汇总了C#中Lucene.Net.Analysis.Analyzer类的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Analysis.Analyzer类的具体用法?C# Lucene.Net.Analysis.Analyzer怎么用?C# Lucene.Net.Analysis.Analyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Lucene.Net.Analysis.Analyzer类属于命名空间,在下文中一共展示了Lucene.Net.Analysis.Analyzer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: QueryTermVector
public QueryTermVector(System.String queryString, Analyzer analyzer)
{
if (analyzer != null)
{
TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
if (stream != null)
{
IList<string> terms = new List<string>();
try
{
bool hasMoreTokens = false;
stream.Reset();
ITermAttribute termAtt = stream.AddAttribute<ITermAttribute>();
hasMoreTokens = stream.IncrementToken();
while (hasMoreTokens)
{
terms.Add(termAtt.Term);
hasMoreTokens = stream.IncrementToken();
}
ProcessTerms(terms.ToArray());
}
catch (System.IO.IOException)
{
}
}
}
}
示例2: QueryTermVector
public QueryTermVector(System.String queryString, Analyzer analyzer)
{
if (analyzer != null)
{
TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
if (stream != null)
{
System.Collections.ArrayList terms = new System.Collections.ArrayList();
try
{
bool hasMoreTokens = false;
stream.Reset();
TermAttribute termAtt = (TermAttribute) stream.AddAttribute(typeof(TermAttribute));
hasMoreTokens = stream.IncrementToken();
while (hasMoreTokens)
{
terms.Add(termAtt.Term());
hasMoreTokens = stream.IncrementToken();
}
ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
}
catch (System.IO.IOException e)
{
}
}
}
}
示例3: DocumentWriter
/// <summary>This ctor used by test code only.
///
/// </summary>
/// <param name="directory">The directory to write the document information to
/// </param>
/// <param name="analyzer">The analyzer to use for the document
/// </param>
/// <param name="similarity">The Similarity function
/// </param>
/// <param name="maxFieldLength">The maximum number of tokens a field may have
/// </param>
public DocumentWriter(Directory directory, Analyzer analyzer, Similarity similarity, int maxFieldLength)
{
InitBlock();
this.directory = directory;
this.analyzer = analyzer;
this.similarity = similarity;
this.maxFieldLength = maxFieldLength;
}
示例4: CountHits
private void CountHits(Analyzer analyzer, string[] docs, Query q, int expected)
{
Directory d = GetDirectory(analyzer, docs);
IndexReader r = DirectoryReader.Open(d);
IndexSearcher s = new IndexSearcher(r);
TotalHitCountCollector c = new TotalHitCountCollector();
s.Search(q, c);
Assert.AreEqual(expected, c.TotalHits, q.ToString());
r.Dispose();
d.Dispose();
}
示例5: Init
/// <summary>
/// ��ǰ����ִ���
/// </summary>
public static void Init()
{
/// <summary>
/// �ִ���
/// </summary>
OneAnalyzer = new Lucene.Net.Analysis.XunLongX.XunLongAnalyzer();
/// <summary>
/// ������
/// </summary>
mSearch = new ClassSearch();
}
示例6: GetDirectory
public static Directory GetDirectory(Analyzer analyzer, string[] vals)
{
Directory directory = NewDirectory();
RandomIndexWriter writer = new RandomIndexWriter(Random(), directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetMaxBufferedDocs(TestUtil.NextInt(Random(), 100, 1000)).SetMergePolicy(NewLogMergePolicy()));
foreach (string s in vals)
{
Document d = new Document();
d.Add(NewTextField(FIELD, s, Field.Store.YES));
writer.AddDocument(d);
}
writer.Dispose();
return directory;
}
示例7: SetUp
public override void SetUp()
{
base.SetUp();
Analyzer = new MockAnalyzer(Random());
Dir = NewDirectory();
IndexWriterConfig config = NewIndexWriterConfig(TEST_VERSION_CURRENT, Analyzer);
config.SetMergePolicy(NewLogMergePolicy()); // we will use docids to validate
RandomIndexWriter writer = new RandomIndexWriter(Random(), Dir, config);
writer.AddDocument(Doc("lucene", "lucene is a very popular search engine library"));
writer.AddDocument(Doc("solr", "solr is a very popular search server and is using lucene"));
writer.AddDocument(Doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
Reader = writer.Reader;
writer.Dispose();
Searcher = NewSearcher(Reader);
}
示例8: QueryFactory
/// <summary>
/// Cstor
/// </summary>
/// <param name="queryType">The type of query the factory should return</param>
public QueryFactory(QueryType queryType)
{
this.queryType = queryType;
//create the base fields to search against
baseFieldName = new List<string>();
baseFieldName.Add("title");
baseFieldName.Add("description");
baseFieldName.Add("tags");
//create the base boost values
baseFieldBoost = new List<float>();
baseFieldBoost.Add(8f);
baseFieldBoost.Add(4f);
baseFieldBoost.Add(1f);
analyzer = new DnkAnalyzer();
}
示例9: GetAnyTokenStream
/// <summary> A convenience method that tries a number of approaches to getting a token stream.
/// The cost of finding there are no termVectors in the index is minimal (1000 invocations still
/// registers 0 ms). So this "lazy" (flexible?) approach to coding is probably acceptable
/// </summary>
/// <param name="">reader</param>
/// <param name="">docId</param>
/// <param name="">field</param>
/// <param name="">analyzer</param>
/// <returns> null if field not stored correctly
/// </returns>
/// <throws> IOException </throws>
public static TokenStream GetAnyTokenStream(IndexReader reader, int docId, string field, Analyzer analyzer)
{
TokenStream ts = null;
TermFreqVector tfv = (TermFreqVector) reader.GetTermFreqVector(docId, field);
if (tfv != null)
{
if (tfv is TermPositionVector)
{
ts = GetTokenStream((TermPositionVector) tfv);
}
}
//No token info stored so fall back to analyzing raw content
if (ts == null)
{
ts = GetTokenStream(reader, docId, field, analyzer);
}
return ts;
}
示例10: QueryTermVector
public QueryTermVector(System.String queryString, Analyzer analyzer)
{
if (analyzer != null)
{
TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
if (stream != null)
{
Token next = null;
System.Collections.ArrayList terms = new System.Collections.ArrayList();
try
{
while ((next = stream.Next()) != null)
{
terms.Add(next.TermText());
}
ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
}
catch (System.IO.IOException)
{
}
}
}
}
示例11: QueryTermVector
public QueryTermVector(System.String queryString, Analyzer analyzer)
{
if (analyzer != null)
{
TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
if (stream != null)
{
System.Collections.ArrayList terms = new System.Collections.ArrayList();
try
{
Token reusableToken = new Token();
for (Token nextToken = stream.Next(reusableToken); nextToken != null; nextToken = stream.Next(reusableToken))
{
terms.Add(nextToken.Term());
}
ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
}
catch (System.IO.IOException)
{
}
}
}
}
示例12: SetUp
public override void SetUp()
{
base.SetUp();
// prepare a small index with just a few documents.
dir = new RAMDirectory();
anlzr = new StandardAnalyzer();
IndexWriter iw = new IndexWriter(dir, anlzr);
// add docs not exactly in natural ID order, to verify we do check the order of docs by scores
int remaining = N_DOCS;
bool[] done = new bool[N_DOCS];
int i = 0;
while (remaining > 0)
{
if (done[i])
{
throw new System.Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
}
AddDoc(iw, i);
done[i] = true;
i = (i + 4) % N_DOCS;
remaining--;
}
iw.Close();
}
示例13: SetAnalyzer
/// <summary> Sets the analyzer to use. An analyzer is not required for generating a query with the
/// {@link #Like(int)} method, all other 'like' methods require an analyzer.
///
/// </summary>
/// <param name="analyzer">the analyzer to use to tokenize text.
/// </param>
public void SetAnalyzer(Analyzer analyzer)
{
this.analyzer = analyzer;
}
示例14: Parse
/// <summary> Parses a query, searching on the fields specified.
/// Use this if you need to specify certain fields as required,
/// and others as prohibited.
/// <p><pre>
/// Usage:
/// <code>
/// String[] query = {"query1", "query2", "query3"};
/// String[] fields = {"filename", "contents", "description"};
/// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
/// BooleanClause.Occur.MUST,
/// BooleanClause.Occur.MUST_NOT};
/// MultiFieldQueryParser.parse(query, fields, flags, analyzer);
/// </code>
/// </pre>
/// <p>
/// The code above would construct a query:
/// <pre>
/// <code>
/// (filename:query1) +(contents:query2) -(description:query3)
/// </code>
/// </pre>
///
/// </summary>
/// <param name="queries">Queries string to parse
/// </param>
/// <param name="fields">Fields to search on
/// </param>
/// <param name="flags">Flags describing the fields
/// </param>
/// <param name="analyzer">Analyzer to use
/// </param>
/// <throws> ParseException if query parsing fails </throws>
/// <throws> TokenMgrError if query parsing fails </throws>
/// <throws> IllegalArgumentException if the length of the queries, fields, </throws>
/// <summary> and flags array differ
/// </summary>
public static Query Parse(System.String[] queries, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
{
if (!(queries.Length == fields.Length && queries.Length == flags.Length))
throw new System.ArgumentException("queries, fields, and flags array have have different length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.Length; i++)
{
QueryParser qp = new QueryParser(fields[i], analyzer);
Query q = qp.Parse(queries[i]);
bQuery.Add(q, flags[i]);
}
return bQuery;
}
示例15: MultiFieldQueryParser
/// <deprecated> use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
/// </deprecated>
public MultiFieldQueryParser(System.String f, Analyzer a):base(f, a)
{
}