当前位置: 首页>>代码示例>>C#>>正文


C# Analysis.Analyzer类代码示例

本文整理汇总了C#中Lucene.Net.Analysis.Analyzer的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Analysis.Analyzer类的具体用法?C# Lucene.Net.Analysis.Analyzer怎么用?C# Lucene.Net.Analysis.Analyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Lucene.Net.Analysis.Analyzer类属于命名空间,在下文中一共展示了Lucene.Net.Analysis.Analyzer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: QueryTermVector

 public QueryTermVector(System.String queryString, Analyzer analyzer)
 {
     if (analyzer != null)
     {
         TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
         if (stream != null)
         {
             IList<string> terms = new List<string>();
             try
             {
                 bool hasMoreTokens = false;
                 
                 stream.Reset();
                 ITermAttribute termAtt = stream.AddAttribute<ITermAttribute>();
                 
                 hasMoreTokens = stream.IncrementToken();
                 while (hasMoreTokens)
                 {
                     terms.Add(termAtt.Term);
                     hasMoreTokens = stream.IncrementToken();
                 }
                 ProcessTerms(terms.ToArray());
             }
             catch (System.IO.IOException)
             {
             }
         }
     }
 }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:29,代码来源:QueryTermVector.cs

示例2: QueryTermVector

		public QueryTermVector(System.String queryString, Analyzer analyzer)
		{
			if (analyzer != null)
			{
				TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
				if (stream != null)
				{
					System.Collections.ArrayList terms = new System.Collections.ArrayList();
					try
					{
						bool hasMoreTokens = false;
						
						stream.Reset();
						TermAttribute termAtt = (TermAttribute) stream.AddAttribute(typeof(TermAttribute));
						
						hasMoreTokens = stream.IncrementToken();
						while (hasMoreTokens)
						{
							terms.Add(termAtt.Term());
							hasMoreTokens = stream.IncrementToken();
						}
						ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
					}
					catch (System.IO.IOException e)
					{
					}
				}
			}
		}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:29,代码来源:QueryTermVector.cs

示例3: DocumentWriter

		/// <summary>This ctor used by test code only.
		/// 
		/// </summary>
		/// <param name="directory">The directory to write the document information to
		/// </param>
		/// <param name="analyzer">The analyzer to use for the document
		/// </param>
		/// <param name="similarity">The Similarity function
		/// </param>
		/// <param name="maxFieldLength">The maximum number of tokens a field may have
		/// </param>
		public DocumentWriter(Directory directory, Analyzer analyzer, Similarity similarity, int maxFieldLength)
		{
			InitBlock();
			this.directory = directory;
			this.analyzer = analyzer;
			this.similarity = similarity;
			this.maxFieldLength = maxFieldLength;
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:19,代码来源:DocumentWriter.cs

示例4: CountHits

 private void CountHits(Analyzer analyzer, string[] docs, Query q, int expected)
 {
     Directory d = GetDirectory(analyzer, docs);
     IndexReader r = DirectoryReader.Open(d);
     IndexSearcher s = new IndexSearcher(r);
     TotalHitCountCollector c = new TotalHitCountCollector();
     s.Search(q, c);
     Assert.AreEqual(expected, c.TotalHits, q.ToString());
     r.Dispose();
     d.Dispose();
 }
开发者ID:joyanta,项目名称:lucene.net,代码行数:11,代码来源:FuzzyTermOnShortTermsTest.cs

示例5: Init

        /// <summary>
        /// ��ǰ����ִ���
        /// </summary>
        public static void Init()
        {
            /// <summary>
            /// �ִ���
            /// </summary>
             OneAnalyzer = new Lucene.Net.Analysis.XunLongX.XunLongAnalyzer();

               /// <summary>
               /// ������
               /// </summary>
            mSearch = new ClassSearch();
        }
开发者ID:kiichi7,项目名称:Search-Engine,代码行数:15,代码来源:ClassST.cs

示例6: GetDirectory

        public static Directory GetDirectory(Analyzer analyzer, string[] vals)
        {
            Directory directory = NewDirectory();
            RandomIndexWriter writer = new RandomIndexWriter(Random(), directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetMaxBufferedDocs(TestUtil.NextInt(Random(), 100, 1000)).SetMergePolicy(NewLogMergePolicy()));

            foreach (string s in vals)
            {
                Document d = new Document();
                d.Add(NewTextField(FIELD, s, Field.Store.YES));
                writer.AddDocument(d);
            }
            writer.Dispose();
            return directory;
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:14,代码来源:FuzzyTermOnShortTermsTest.cs

示例7: SetUp

 public override void SetUp()
 {
     base.SetUp();
     Analyzer = new MockAnalyzer(Random());
     Dir = NewDirectory();
     IndexWriterConfig config = NewIndexWriterConfig(TEST_VERSION_CURRENT, Analyzer);
     config.SetMergePolicy(NewLogMergePolicy()); // we will use docids to validate
     RandomIndexWriter writer = new RandomIndexWriter(Random(), Dir, config);
     writer.AddDocument(Doc("lucene", "lucene is a very popular search engine library"));
     writer.AddDocument(Doc("solr", "solr is a very popular search server and is using lucene"));
     writer.AddDocument(Doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
     Reader = writer.Reader;
     writer.Dispose();
     Searcher = NewSearcher(Reader);
 }
开发者ID:WakeflyCBass,项目名称:lucenenet,代码行数:15,代码来源:TestBooleanQueryVisitSubscorers.cs

示例8: QueryFactory

        /// <summary>
        /// Cstor
        /// </summary>
        /// <param name="queryType">The type of query the factory should return</param>
        public QueryFactory(QueryType queryType)
        {
            this.queryType = queryType;

            //create the base fields to search against
            baseFieldName = new List<string>();
            baseFieldName.Add("title");
            baseFieldName.Add("description");
            baseFieldName.Add("tags");

            //create the base boost values
            baseFieldBoost = new List<float>();
            baseFieldBoost.Add(8f);
            baseFieldBoost.Add(4f);
            baseFieldBoost.Add(1f);

            analyzer = new DnkAnalyzer();
        }
开发者ID:Letractively,项目名称:dotnetkicks,代码行数:22,代码来源:QueryFactory.cs

示例9: GetAnyTokenStream

        /// <summary> A convenience method that tries a number of approaches to getting a token stream.
        /// The cost of finding there are no termVectors in the index is minimal (1000 invocations still 
        /// registers 0 ms). So this "lazy" (flexible?) approach to coding is probably acceptable
        /// </summary>
        /// <param name="">reader</param>
        /// <param name="">docId</param>
        /// <param name="">field</param>
        /// <param name="">analyzer</param>
        /// <returns> null if field not stored correctly 
        /// </returns>
        /// <throws>  IOException </throws>
        public static TokenStream GetAnyTokenStream(IndexReader reader, int docId, string field, Analyzer analyzer)
        {
            TokenStream ts = null;

            TermFreqVector tfv = (TermFreqVector) reader.GetTermFreqVector(docId, field);
            if (tfv != null)
            {
                if (tfv is TermPositionVector)
                {
                    ts = GetTokenStream((TermPositionVector) tfv);
                }
            }
            //No token info stored so fall back to analyzing raw content
            if (ts == null)
            {
                ts = GetTokenStream(reader, docId, field, analyzer);
            }
            return ts;
        }
开发者ID:usmanghani,项目名称:Misc,代码行数:30,代码来源:TokenSources.cs

示例10: QueryTermVector

		public QueryTermVector(System.String queryString, Analyzer analyzer)
		{
			if (analyzer != null)
			{
				TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
				if (stream != null)
				{
					Token next = null;
					System.Collections.ArrayList terms = new System.Collections.ArrayList();
					try
					{
						while ((next = stream.Next()) != null)
						{
							terms.Add(next.TermText());
						}
						ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
					}
					catch (System.IO.IOException)
					{
					}
				}
			}
		}
开发者ID:zweib730,项目名称:beagrep,代码行数:23,代码来源:QueryTermVector.cs

示例11: QueryTermVector

 public QueryTermVector(System.String queryString, Analyzer analyzer)
 {
     if (analyzer != null)
     {
         TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
         if (stream != null)
         {
             System.Collections.ArrayList terms = new System.Collections.ArrayList();
             try
             {
                 Token reusableToken = new Token();
                 for (Token nextToken = stream.Next(reusableToken); nextToken != null; nextToken = stream.Next(reusableToken))
                 {
                     terms.Add(nextToken.Term());
                 }
                 ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
             }
             catch (System.IO.IOException)
             {
             }
         }
     }
 }
开发者ID:cqm0609,项目名称:lucene-file-finder,代码行数:23,代码来源:QueryTermVector.cs

示例12: SetUp

		public override void  SetUp()
		{
			base.SetUp();
			// prepare a small index with just a few documents.  
			dir = new RAMDirectory();
			anlzr = new StandardAnalyzer();
			IndexWriter iw = new IndexWriter(dir, anlzr);
			// add docs not exactly in natural ID order, to verify we do check the order of docs by scores
			int remaining = N_DOCS;
			bool[] done = new bool[N_DOCS];
			int i = 0;
			while (remaining > 0)
			{
				if (done[i])
				{
					throw new System.Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
				}
				AddDoc(iw, i);
				done[i] = true;
				i = (i + 4) % N_DOCS;
				remaining--;
			}
			iw.Close();
		}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:24,代码来源:FunctionTestSetup.cs

示例13: SetAnalyzer

 /// <summary> Sets the analyzer to use. An analyzer is not required for generating a query with the
 /// {@link #Like(int)} method, all other 'like' methods require an analyzer.
 /// 
 /// </summary>
 /// <param name="analyzer">the analyzer to use to tokenize text.
 /// </param>
 public void  SetAnalyzer(Analyzer analyzer)
 {
     this.analyzer = analyzer;
 }
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:10,代码来源:MoreLikeThis.cs

示例14: Parse

		/// <summary> Parses a query, searching on the fields specified.
		/// Use this if you need to specify certain fields as required,
		/// and others as prohibited.
		/// <p><pre>
		/// Usage:
		/// <code>
		/// String[] query = {"query1", "query2", "query3"};
		/// String[] fields = {"filename", "contents", "description"};
		/// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
		/// BooleanClause.Occur.MUST,
		/// BooleanClause.Occur.MUST_NOT};
		/// MultiFieldQueryParser.parse(query, fields, flags, analyzer);
		/// </code>
		/// </pre>
		/// <p>
		/// The code above would construct a query:
		/// <pre>
		/// <code>
		/// (filename:query1) +(contents:query2) -(description:query3)
		/// </code>
		/// </pre>
		/// 
		/// </summary>
		/// <param name="queries">Queries string to parse
		/// </param>
		/// <param name="fields">Fields to search on
		/// </param>
		/// <param name="flags">Flags describing the fields
		/// </param>
		/// <param name="analyzer">Analyzer to use
		/// </param>
		/// <throws>  ParseException if query parsing fails </throws>
		/// <throws>  TokenMgrError if query parsing fails </throws>
		/// <throws>  IllegalArgumentException if the length of the queries, fields, </throws>
		/// <summary>  and flags array differ
		/// </summary>
		public static Query Parse(System.String[] queries, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
		{
			if (!(queries.Length == fields.Length && queries.Length == flags.Length))
				throw new System.ArgumentException("queries, fields, and flags array have have different length");
			BooleanQuery bQuery = new BooleanQuery();
			for (int i = 0; i < fields.Length; i++)
			{
				QueryParser qp = new QueryParser(fields[i], analyzer);
				Query q = qp.Parse(queries[i]);
				bQuery.Add(q, flags[i]);
			}
			return bQuery;
		}
开发者ID:ArsenShnurkov,项目名称:beagle-1,代码行数:49,代码来源:MultiFieldQueryParser.cs

示例15: MultiFieldQueryParser

		/// <deprecated> use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
		/// </deprecated>
		public MultiFieldQueryParser(System.String f, Analyzer a):base(f, a)
		{
		}
开发者ID:ArsenShnurkov,项目名称:beagle-1,代码行数:5,代码来源:MultiFieldQueryParser.cs


注:本文中的Lucene.Net.Analysis.Analyzer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。