当前位置: 首页>>代码示例>>C#>>正文


C# Analysis.Analyzer类代码示例

本文整理汇总了C#中Lucene.Net.Analysis.Analyzer的典型用法代码示例。如果您正苦于以下问题:C# Analyzer类的具体用法?C# Analyzer怎么用?C# Analyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Analyzer类属于Lucene.Net.Analysis命名空间,在下文中一共展示了Analyzer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: LuceneTesterBase

 public LuceneTesterBase(LuceneDirectory directory, LuceneAnalyzer analyzer, LuceneVersion version)
 {
     Analyzer = analyzer;
     CurrentLuceneVersion = version;
     IndexDirectory = directory;
     Debug = false;
 }
开发者ID:joshball,项目名称:Lucene.In.Action.NET,代码行数:7,代码来源:LuceneTesterBase.cs

示例2: SetAnalyzerType

        void SetAnalyzerType(Type defaultType, IEnumerable<FieldDetails> fields)
        {
            if (defaultType == null) {
                defaultType = typeof(StandardAnalyzer);
            }

            // create default analyzer
            _defaultAnalyzer = Activator.CreateInstance(defaultType) as Analyzer;
            if (_defaultAnalyzer == null) {
                throw new ArgumentException("defaultType is not an Analyzer type");
            }

            var wrapper = new PerFieldAnalyzerWrapper(_defaultAnalyzer);
            if (fields != null) {
                foreach (var fd in fields) {
                    if (fd.Field.Analyzer!=null) {
                        var fieldAnalyzer = CreateAnalyzerFromType(fd.Field.Analyzer);
                        if (fieldAnalyzer != null) {
                            wrapper.AddAnalyzer(fd.Name, fieldAnalyzer);
                        }
                    }

                }
            }
            Analyzer = wrapper;
        }
开发者ID:Toolate,项目名称:dotSearch,代码行数:26,代码来源:DocumentDetails.cs

示例3: CreateSearchIndex

        public void CreateSearchIndex()
        {
            directory = new RAMDirectory();
            analyzer = new StandardAnalyzer(Version.LUCENE_30);
            var ixw = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
            LookupTable = new Dictionary<string, BaseContent>();
            foreach (BaseContent p in Service.PoIs.ToList())
            {
                var document = new Document();
                document.Add(new Field("id", p.Id.ToString(), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
                string all = p.Name + " ";
                foreach (MetaInfo mi in p.EffectiveMetaInfo)
                {
                    string value;
                    if (mi.Type != MetaTypes.text || !p.Labels.TryGetValue(mi.Label, out value)) continue;
                    document.Add(new Field(mi.Label, value, Field.Store.YES, Field.Index.ANALYZED));
                    all += value + " ";
                }
                document.Add(new Field("All", all, Field.Store.YES, Field.Index.ANALYZED));

                LookupTable[p.Id.ToString()] = p;
                ixw.AddDocument(document);
            }
            ixw.Commit();
        }
开发者ID:TNOCS,项目名称:csTouch,代码行数:25,代码来源:LuceneSearch.cs

示例4: PreProcessUntokenizedTerms

		/// <summary>
		/// Detects untokenized fields and sets as NotAnalyzed in analyzer
		/// </summary>
		private static string PreProcessUntokenizedTerms(PerFieldAnalyzerWrapper analyzer, string query, Analyzer keywordAnlyzer)
		{
			var untokenizedMatches = untokenizedQuery.Matches(query);
			if (untokenizedMatches.Count < 1)
			{
				return query;
			}

			var sb = new StringBuilder(query);

			// KeywordAnalyzer will not tokenize the values

			// process in reverse order to leverage match string indexes
			for (int i=untokenizedMatches.Count; i>0; i--)
			{
				Match match = untokenizedMatches[i-1];

				// specify that term for this field should not be tokenized
				analyzer.AddAnalyzer(match.Groups[1].Value, keywordAnlyzer);

				Group term = match.Groups[2];

				// remove enclosing "[[" "]]" from term value (again in reverse order)
				sb.Remove(term.Index+term.Length-2, 2);
				sb.Remove(term.Index, 2);
			}

			return sb.ToString();
		}
开发者ID:jlundstocholm,项目名称:ravendb,代码行数:32,代码来源:QueryBuilder.cs

示例5: LuceneSearcher

 public LuceneSearcher(DirectoryInfo workingFolder, Analyzer analyzer)
     : base(analyzer)
 {
     _disposer = new DisposableSearcher(this);
     LuceneIndexFolder = new DirectoryInfo(Path.Combine(workingFolder.FullName, "Index"));
     InitializeDirectory();
 }
开发者ID:jclementson,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs

示例6: LuceneSearcher

 public LuceneSearcher(IndexWriter writer, Analyzer analyzer)
     : base(analyzer)
 {
     if (writer == null) throw new ArgumentNullException("writer");
     _disposer = new DisposableSearcher(this);
     _nrtWriter = writer;
 }
开发者ID:snowattitudes,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs

示例7: TestFarsiRangeFilterCollating

        public virtual void TestFarsiRangeFilterCollating(Analyzer analyzer, BytesRef firstBeg, BytesRef firstEnd, BytesRef secondBeg, BytesRef secondEnd)
        {
            Directory dir = NewDirectory();
            IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
            Document doc = new Document();
            doc.Add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
            doc.Add(new StringField("body", "body", Field.Store.YES));
            writer.AddDocument(doc);
            writer.Dispose();
            IndexReader reader = DirectoryReader.Open(dir);
            IndexSearcher searcher = new IndexSearcher(reader);
            Query query = new TermQuery(new Term("body", "body"));

            // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
            // orders the U+0698 character before the U+0633 character, so the single
            // index Term below should NOT be returned by a TermRangeFilter with a Farsi
            // Collator (or an Arabic one for the case when Farsi searcher not
            // supported).
            ScoreDoc[] result = searcher.Search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1).ScoreDocs;
            Assert.AreEqual(0, result.Length, "The index Term should not be included.");

            result = searcher.Search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1).ScoreDocs;
            Assert.AreEqual(1, result.Length, "The index Term should be included.");

            reader.Dispose();
            dir.Dispose();
        }
开发者ID:joyanta,项目名称:lucene.net,代码行数:27,代码来源:CollationTestBase.cs

示例8: ObjectQueryTranslator

        public ObjectQueryTranslator(string[] defaultFieldNames, Analyzer defaultAnalyzer)
        {
            this._defaultAnalyzer = defaultAnalyzer;
            this._defaultFieldNames = defaultFieldNames;

            _root = new BooleanQuery();
        }
开发者ID:Toolate,项目名称:dotSearch,代码行数:7,代码来源:ObjectQueryTranslator.cs

示例9: CreateIndex

        public void CreateIndex(Analyzer analayer) 
        {
            FSDirectory fsDir = new SimpleFSDirectory(new DirectoryInfo(_indexerFolder));
            IndexWriter indexWriter = new IndexWriter(fsDir, analayer, true, Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);

            string[] files = System.IO.Directory.GetFiles(_textFilesFolder, Config.FileSearchPattern, SearchOption.AllDirectories);
            foreach (string file in files)
            {
                string name = new FileInfo(file).Name;
                string content = File.ReadAllText(file);

                Document doc = new Document();
                doc.Add(new Field(Config.Field_Path, file, Field.Store.YES, Field.Index.NOT_ANALYZED));
                doc.Add(new Field(Config.Field_Name, name, Field.Store.YES, Field.Index.ANALYZED));
                doc.Add(new Field(Config.Field_Content, content, Field.Store.NO, Field.Index.ANALYZED));

                indexWriter.AddDocument(doc);

                Console.WriteLine("{0} - {1}", file, name);
            }

            indexWriter.Optimize();
            indexWriter.Dispose();

            Console.WriteLine("File count: {0}", files.Length);
        }
开发者ID:NDChen,项目名称:MyDemoCode,代码行数:26,代码来源:IndexHelper.cs

示例10: InstancePerFieldAnalyzerWrapper

 public InstancePerFieldAnalyzerWrapper()
 {
     var analyzer = new Lucene.Net.Analysis.PerFieldAnalyzerWrapper(new Synonyms.SynonymAnalyzer(new Synonyms.XmlSynonymEngine()));
     analyzer.AddAnalyzer("cota", new Lucene.Net.Analysis.KeywordAnalyzer());
     analyzer.AddAnalyzer("codigo", new Lucene.Net.Analysis.KeywordAnalyzer());
     instancePerFieldAnalyzerWrapper = analyzer;
 }
开发者ID:aureliopires,项目名称:gisa,代码行数:7,代码来源:NivelDocumentalSearcher.cs

示例11: GetParser

 public virtual QueryParser GetParser(Analyzer a)
 {
     if (a == null) a = new MockAnalyzer(Random(), MockTokenizer.SIMPLE, true);
     QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, DefaultField, a);
     qp.DefaultOperator = (QueryParserBase.OR_OPERATOR);
     return qp;
 }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:7,代码来源:TestQueryParser.cs

示例12: WriteIndex

 public static TimeSpan WriteIndex(Analyzer analyzer,IndexerSet indexer, Source source,bool create)
 {
     try
     {
         //ChineseSegAnalysis csa = new ChineseSegAnalysis(index.BasePath, index.NamePath, index.NumberPath, index.CustomPaths);
         //csa.FilterFilePath = index.FilterPath;
         //Analyzer analyzer = csa.GetAnalyzer();
         string connect = source.GetConnString();
         DateTime start;
         if (create)
         {
             DBCreateIndexer dbcIndexer = new DBCreateIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
             dbcIndexer.PrimaryKey = source.PrimaryKey;
             start = DateTime.Now;
             dbcIndexer.WriteResults(source.Query,indexer.MaxFieldLength,indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
             return DateTime.Now - start;
         }
         else
         {
             DBIncremIndexer dbiIndexer = new DBIncremIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
             dbiIndexer.PrimaryKey = source.PrimaryKey;
             start = DateTime.Now;
             dbiIndexer.WriteResults(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);                 
             return DateTime.Now - start;
         }
     }
     catch (Exception e)
     {
         throw e;
     }
 }
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:31,代码来源:IWriter.cs

示例13: WriteIndexWithEvent

 public static TimeSpan WriteIndexWithEvent(Analyzer analyzer, IndexerSet indexer, Source source, bool create,
                                         IndexCompletedEventHandler OnIndexCompleted,
                                         IndexProgressChangedEventHandler OnProgressChanged)
 {
     try
     {
         //ChineseSegAnalysis csa = new ChineseSegAnalysis(index.BasePath, index.NamePath, index.NumberPath, index.CustomPaths);
         //csa.FilterFilePath = index.FilterPath;
         //Analyzer analyzer = csa.GetAnalyzer();
         string connect = source.GetConnString();
         DateTime start;
         if (create)
         {
             DBRamCreateIndexer dbcIndexer = new DBRamCreateIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
             dbcIndexer.OnIndexCompleted += OnIndexCompleted;
             dbcIndexer.OnProgressChanged += OnProgressChanged;
             start = DateTime.Now;
             dbcIndexer.WriteResultsWithEvent(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
             return DateTime.Now - start;
         }
         else
         {
             DBRamIncremIndexer dbiIndexer = new DBRamIncremIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
             dbiIndexer.OnIndexCompleted += OnIndexCompleted;
             dbiIndexer.OnProgressChanged += OnProgressChanged;
             start = DateTime.Now;
             dbiIndexer.WriteResultsWithEvent(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
             return DateTime.Now - start;
         }
     }
     catch (Exception e)
     {
         throw e;
     }
 }
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:35,代码来源:RamIWriter.cs

示例14: LuceneSearcher

 public LuceneSearcher(Lucene.Net.Store.Directory luceneDirectory, Analyzer analyzer)
     : base(analyzer)
 {
     _disposer = new DisposableSearcher(this);
     LuceneIndexFolder = null;
     _luceneDirectory = luceneDirectory;
 }
开发者ID:bowserm,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs

示例15: InitSearchServiceAnalyzer

        private void InitSearchServiceAnalyzer(Type indexingServiceSettingsType, Analyzer defaultAnalyzer, Analyzer textAnalyzer)
        {
            var perFieldAnalyzerWrapper = new PerFieldAnalyzerWrapper(defaultAnalyzer);
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ID", new KeywordAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CULTURE", new KeywordAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_REFERENCEID", new KeywordAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_AUTHORSTORAGE", new KeywordAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CATEGORIES", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ACL", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_VIRTUALPATH", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_TYPE", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CREATED", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_MODIFIED", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_PUBLICATIONEND", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_PUBLICATIONSTART", new WhitespaceAnalyzer());
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ITEMSTATUS", new WhitespaceAnalyzer());

            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_TITLE", textAnalyzer);
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_DISPLAYTEXT", textAnalyzer);
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_AUTHORS", textAnalyzer);
            perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_DEFAULT", textAnalyzer);

            indexingServiceSettingsType
                .GetField("_analyzer", BindingFlags.Static | BindingFlags.NonPublic)
                .SetValue(null, perFieldAnalyzerWrapper);
        }
开发者ID:javafun,项目名称:EPiSearch,代码行数:26,代码来源:SearchCustomizer.cs


注:本文中的Lucene.Net.Analysis.Analyzer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。