当前位置: 首页>>代码示例>>Java>>正文


Java LowerCaseFilterFactory类代码示例

本文整理汇总了Java中org.apache.lucene.analysis.core.LowerCaseFilterFactory的典型用法代码示例。如果您正苦于以下问题:Java LowerCaseFilterFactory类的具体用法?Java LowerCaseFilterFactory怎么用?Java LowerCaseFilterFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LowerCaseFilterFactory类属于org.apache.lucene.analysis.core包,在下文中一共展示了LowerCaseFilterFactory类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: SimpleAnalyzer

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
public SimpleAnalyzer(boolean lowerCase) {
    Map<String, String> parameters = new HashMap<String, String>();
    parameters.put(PatternTokenizerFactory.PATTERN, PATTERN);
    parameters.put(PatternTokenizerFactory.GROUP, "0");
    parameters.put(AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
            version.name());
    tokenizerFactory = new PatternTokenizerFactory(parameters);
    if (lowerCase) {
        parameters = new HashMap<String, String>();
        parameters.put(AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
                version.name());
        lowerCaseFilterFactory = new LowerCaseFilterFactory(parameters);
    } else {
        lowerCaseFilterFactory = null;
    }
}
 
开发者ID:dice-group,项目名称:Palmetto,代码行数:17,代码来源:SimpleAnalyzer.java

示例2: getSearchMapping

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Factory
public SearchMapping getSearchMapping() {
    final SearchMapping mapping = new SearchMapping();
    mapping
            .analyzerDef("english", StandardTokenizerFactory.class)
                .filter(LowerCaseFilterFactory.class)
                .filter(SnowballPorterFilterFactory.class)
            .analyzerDef("german", StandardTokenizerFactory.class)
                .filter(LowerCaseFilterFactory.class)
                .filter(GermanStemFilterFactory.class);
    return mapping;
}
 
开发者ID:EuregJUG-Maas-Rhine,项目名称:site,代码行数:13,代码来源:DefaultSearchMapping.java

示例3: testLookupTokenFilter

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
public void testLookupTokenFilter() {
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("Lowercase", versionArgOnly()).getClass());
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("LOWERCASE", versionArgOnly()).getClass());
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("lowercase", versionArgOnly()).getClass());
  
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("RemoveDuplicates", versionArgOnly()).getClass());
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("REMOVEDUPLICATES", versionArgOnly()).getClass());
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("removeduplicates", versionArgOnly()).getClass());
}
 
开发者ID:europeana,项目名称:search,代码行数:10,代码来源:TestAnalysisSPILoader.java

示例4: testLookupTokenFilterClass

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
public void testLookupTokenFilterClass() {
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.lookupClass("Lowercase"));
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.lookupClass("LOWERCASE"));
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.lookupClass("lowercase"));
  
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.lookupClass("RemoveDuplicates"));
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.lookupClass("REMOVEDUPLICATES"));
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.lookupClass("removeduplicates"));
}
 
开发者ID:europeana,项目名称:search,代码行数:10,代码来源:TestAnalysisSPILoader.java

示例5: testQueryCopiedToMulti

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Test
public void testQueryCopiedToMulti() {
  SchemaField field = h.getCore().getLatestSchema().getField("content_charfilter");
  Analyzer analyzer = ((TextField)field.getType()).getMultiTermAnalyzer();
  assertTrue(analyzer instanceof TokenizerChain);
  assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
  TokenizerChain tc = (TokenizerChain) analyzer;
  for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
    assertTrue(factory instanceof LowerCaseFilterFactory);
  }

  assertTrue(tc.getCharFilterFactories().length == 1);
  assertTrue(tc.getCharFilterFactories()[0] instanceof MappingCharFilterFactory);
}
 
开发者ID:europeana,项目名称:search,代码行数:15,代码来源:MultiTermTest.java

示例6: testDefaultCopiedToMulti

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Test
public void testDefaultCopiedToMulti() {
  SchemaField field = h.getCore().getLatestSchema().getField("content_ws");
  Analyzer analyzer = ((TextField)field.getType()).getMultiTermAnalyzer();
  assertTrue(analyzer instanceof TokenizerChain);
  assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
  TokenizerChain tc = (TokenizerChain) analyzer;
  for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
    assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof LowerCaseFilterFactory));
  }

  assertTrue(tc.getCharFilterFactories() == null);

}
 
开发者ID:europeana,项目名称:search,代码行数:15,代码来源:MultiTermTest.java

示例7: getSearchMapping

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
/**
 * Gets the search mapping.
 *
 * @return the search mapping
 */
@Factory
public SearchMapping getSearchMapping() {
	final SearchMapping mapping = new SearchMapping();
	mapping.analyzerDef("ngram", StandardTokenizerFactory.class).filter(LowerCaseFilterFactory.class)
			.filter(NGramFilterFactory.class).param("minGramSize", "3").param("maxGramSize", "3")
			.analyzerDef("se", StandardTokenizerFactory.class).filter(LowerCaseFilterFactory.class)
			.filter(SwedishLightStemFilterFactory.class).analyzerDef("en", StandardTokenizerFactory.class)
			.filter(LowerCaseFilterFactory.class).filter(PorterStemFilterFactory.class)
			.entity(DocumentContentData.class).indexed().property("hjid", ElementType.FIELD).documentId().property("content", ElementType.METHOD).field().analyzer("se").store(Store.NO).analyze(Analyze.YES).property("id", ElementType.METHOD).field()
			.entity(DocumentElement.class).indexed().property("id", ElementType.FIELD).documentId().property("title", ElementType.METHOD).field().analyzer("se").store(Store.NO).analyze(Analyze.YES).property("subTitle", ElementType.METHOD).field().analyzer("se").store(Store.NO).analyze(Analyze.YES)
			.entity(DocumentStatusContainer.class).indexed().property("hjid", ElementType.FIELD).documentId().property("documentCategory", ElementType.METHOD).field().analyzer("se").store(Store.NO).analyze(Analyze.YES);

	return mapping;
}
 
开发者ID:Hack23,项目名称:cia,代码行数:20,代码来源:DataSearchMappingFactory.java

示例8: getSearchMapping

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Factory
public SearchMapping getSearchMapping() {
	SearchMapping mapping = new SearchMapping();

	mapping.analyzerDef("autocompleteEdgeAnalyzer", PatternTokenizerFactory.class)
			.tokenizerParam("pattern", "(.*)")
			.tokenizerParam("group", "1")
			.filter(LowerCaseFilterFactory.class)
			.filter(StopFilterFactory.class)
			.filter(EdgeNGramFilterFactory.class)
			.param("minGramSize", "3")
			.param("maxGramSize", "50")
		.analyzerDef("autocompletePhoneticAnalyzer", StandardTokenizerFactory.class)
			.filter(StandardFilterFactory.class)
			.filter(StopFilterFactory.class)
			.filter(PhoneticFilterFactory.class)
			.param("encoder", "DoubleMetaphone")
			.filter(SnowballPorterFilterFactory.class)
			.param("language", "English")
		.analyzerDef("autocompleteNGramAnalyzer", StandardTokenizerFactory.class)
			.filter(WordDelimiterFilterFactory.class)
			.filter(LowerCaseFilterFactory.class)
			.filter(NGramFilterFactory.class)
			.param("minGramSize", "3")
			.param("maxGramSize", "20")
		.analyzerDef("standardAnalyzer", StandardTokenizerFactory.class)
			.filter(LowerCaseFilterFactory.class)
		.analyzerDef("exactAnalyzer", StandardTokenizerFactory.class)
		.analyzerDef("conceptParentPidsAnalyzer", WhitespaceTokenizerFactory.class);

	return mapping;
}
 
开发者ID:jamesagnew,项目名称:hapi-fhir,代码行数:33,代码来源:LuceneSearchMappingFactory.java

示例9: testLookupTokenFilter

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
public void testLookupTokenFilter() {
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("Lowercase").getClass());
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("LOWERCASE").getClass());
  assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("lowercase").getClass());
  
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("RemoveDuplicates").getClass());
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("REMOVEDUPLICATES").getClass());
  assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("removeduplicates").getClass());
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:10,代码来源:TestAnalysisSPILoader.java

示例10: testQueryCopiedToMulti

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Test
public void testQueryCopiedToMulti() {
  SchemaField field = h.getCore().getSchema().getField("content_charfilter");
  Analyzer analyzer = ((TextField)field.getType()).getMultiTermAnalyzer();
  assertTrue(analyzer instanceof TokenizerChain);
  assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
  TokenizerChain tc = (TokenizerChain) analyzer;
  for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
    assertTrue(factory instanceof LowerCaseFilterFactory);
  }

  assertTrue(tc.getCharFilterFactories().length == 1);
  assertTrue(tc.getCharFilterFactories()[0] instanceof MappingCharFilterFactory);
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:15,代码来源:MultiTermTest.java

示例11: testDefaultCopiedToMulti

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
@Test
public void testDefaultCopiedToMulti() {
  SchemaField field = h.getCore().getSchema().getField("content_ws");
  Analyzer analyzer = ((TextField)field.getType()).getMultiTermAnalyzer();
  assertTrue(analyzer instanceof TokenizerChain);
  assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
  TokenizerChain tc = (TokenizerChain) analyzer;
  for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
    assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof LowerCaseFilterFactory));
  }

  assertTrue(tc.getCharFilterFactories() == null);

}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:15,代码来源:MultiTermTest.java

示例12: registerWithPrefix

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
protected void registerWithPrefix(String prefix, LuceneAnalyzerDefinitionRegistryBuilder builder) {
	builder.analyzer(prefix + HibernateSearchAnalyzer.KEYWORD).tokenizer(KeywordTokenizerFactory.class);
	
	builder.analyzer(prefix + HibernateSearchAnalyzer.KEYWORD_CLEAN).tokenizer(KeywordTokenizerFactory.class)
		.tokenFilter(ASCIIFoldingFilterFactory.class)
		.tokenFilter(LowerCaseFilterFactory.class);
	
	builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT).tokenizer(WhitespaceTokenizerFactory.class)
			.tokenFilter(ASCIIFoldingFilterFactory.class)
			.tokenFilter(WordDelimiterFilterFactory.class)
					.param("generateWordParts", "1")
					.param("generateNumberParts", "1")
					.param("catenateWords", "0")
					.param("catenateNumbers", "0")
					.param("catenateAll", "0")
					.param("splitOnCaseChange", "0")
					.param("splitOnNumerics", "0")
					.param("preserveOriginal", "1")
			.tokenFilter(LowerCaseFilterFactory.class);
	
	builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT_STEMMING).tokenizer(WhitespaceTokenizerFactory.class)
			.tokenFilter(ASCIIFoldingFilterFactory.class)
			.tokenFilter(WordDelimiterFilterFactory.class)
					.param("generateWordParts", "1")
					.param("generateNumberParts", "1")
					.param("catenateWords", "0")
					.param("catenateNumbers", "0")
					.param("catenateAll", "0")
					.param("splitOnCaseChange", "0")
					.param("splitOnNumerics", "0")
					.param("preserveOriginal", "1")
			.tokenFilter(LowerCaseFilterFactory.class)
			.tokenFilter(CoreFrenchMinimalStemFilterFactory.class);
	
	builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT_SORT).tokenizer(KeywordTokenizerFactory.class)
			.tokenFilter(ASCIIFoldingFilterFactory.class)
			.tokenFilter(LowerCaseFilterFactory.class)
			.tokenFilter(PatternReplaceFilterFactory.class)
					.param("pattern", "('-&\\.,\\(\\))")
					.param("replacement", " ")
					.param("replace", "all")
			.tokenFilter(PatternReplaceFilterFactory.class)
					.param("pattern", "([^0-9\\p{L} ])")
					.param("replacement", "")
					.param("replace", "all")
			.tokenFilter(TrimFilterFactory.class);
	
}
 
开发者ID:openwide-java,项目名称:owsi-core-parent,代码行数:49,代码来源:CoreLuceneAnalyzersDefinitionProvider.java

示例13: makeAnalyzer

import org.apache.lucene.analysis.core.LowerCaseFilterFactory; //导入依赖的package包/类
private Analyzer makeAnalyzer() throws IOException {
  return CustomAnalyzer.builder()
      .withTokenizer(WhitespaceTokenizerFactory.class)
      .addTokenFilter(LowerCaseFilterFactory.class)
      .build();
}
 
开发者ID:eclipse,项目名称:che,代码行数:7,代码来源:LuceneSearcher.java


注:本文中的org.apache.lucene.analysis.core.LowerCaseFilterFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。