本文整理汇总了Java中org.apache.lucene.analysis.core.WhitespaceTokenizerFactory类的典型用法代码示例。如果您正苦于以下问题:Java WhitespaceTokenizerFactory类的具体用法?Java WhitespaceTokenizerFactory怎么用?Java WhitespaceTokenizerFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WhitespaceTokenizerFactory类属于org.apache.lucene.analysis.core包,在下文中一共展示了WhitespaceTokenizerFactory类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
@Before
public void setUp() {
fieldType = new TextField();
Map<String, FieldType> fieldTypes = Maps.newHashMap();
fieldTypes.put("test", fieldType);
when(searcher.getSchema()).thenReturn(schema);
when(schema.getFieldTypes()).thenReturn(fieldTypes);
indexAnalyzer = new TokenizerChain(
new WhitespaceTokenizerFactory(Maps.<String, String> newHashMap()),
new TokenFilterFactory[] { indexTokenFilterFactory });
queryAnalyzer = new TokenizerChain(
new WhitespaceTokenizerFactory(Maps.<String, String> newHashMap()),
new TokenFilterFactory[] { queryTokenFilterFactory });
reloader = new SearcherAwareReloader(null);
}
示例2: getSearchMapping
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
@Factory
public SearchMapping getSearchMapping() {
SearchMapping mapping = new SearchMapping();
mapping.analyzerDef("autocompleteEdgeAnalyzer", PatternTokenizerFactory.class)
.tokenizerParam("pattern", "(.*)")
.tokenizerParam("group", "1")
.filter(LowerCaseFilterFactory.class)
.filter(StopFilterFactory.class)
.filter(EdgeNGramFilterFactory.class)
.param("minGramSize", "3")
.param("maxGramSize", "50")
.analyzerDef("autocompletePhoneticAnalyzer", StandardTokenizerFactory.class)
.filter(StandardFilterFactory.class)
.filter(StopFilterFactory.class)
.filter(PhoneticFilterFactory.class)
.param("encoder", "DoubleMetaphone")
.filter(SnowballPorterFilterFactory.class)
.param("language", "English")
.analyzerDef("autocompleteNGramAnalyzer", StandardTokenizerFactory.class)
.filter(WordDelimiterFilterFactory.class)
.filter(LowerCaseFilterFactory.class)
.filter(NGramFilterFactory.class)
.param("minGramSize", "3")
.param("maxGramSize", "20")
.analyzerDef("standardAnalyzer", StandardTokenizerFactory.class)
.filter(LowerCaseFilterFactory.class)
.analyzerDef("exactAnalyzer", StandardTokenizerFactory.class)
.analyzerDef("conceptParentPidsAnalyzer", WhitespaceTokenizerFactory.class);
return mapping;
}
示例3: testWhitespaceTokenizer
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
/**
* Test WhitespaceTokenizerFactory
*/
public void testWhitespaceTokenizer() throws Exception {
Reader reader = new StringReader("What's this thing do?");
WhitespaceTokenizerFactory factory = new WhitespaceTokenizerFactory();
factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
Map<String, String> args = Collections.emptyMap();
factory.init(args);
Tokenizer stream = factory.create(reader);
assertTokenStreamContents(stream,
new String[] {"What's", "this", "thing", "do?"});
}
示例4: tokenizeQueryString
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
/**
* Separates tokens from query. Treats each quote as a separate token, since that makes it easier to examine the query.
*
* @param queryString .
* @param tokens .
* @return number of quotes in the query
*/
public static int tokenizeQueryString(String queryString, List<String> tokens) {
int countOfQuotes = 0;
try {
// first tokenize words and treat each quote as a separate token
Map<String,String> args = new HashMap<String, String>();
args.put(WhitespaceTokenizerFactory.LUCENE_MATCH_VERSION_PARAM, Version.LUCENE_6_3_0.toString());
WhitespaceTokenizerFactory f = new WhitespaceTokenizerFactory(args);
WhitespaceTokenizer s = (WhitespaceTokenizer)f.create(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
s.setReader(new StringReader(queryString));
s.reset();
while (true) {
CharTermAttribute t = s.getAttribute(CharTermAttribute.class);
if (t == null) {
break;
}
String tokentText = new String(t.toString());
if (tokentText.equals("\"")) {
tokens.add("\"");
countOfQuotes++;
} else if (tokentText.startsWith("\"")) {
tokens.add("\"");
countOfQuotes++;
if (tokentText.endsWith("\"")) {
tokens.add(tokentText.substring(1, tokentText.length() - 1));
tokens.add("\"");
countOfQuotes++;
} else {
tokens.add(tokentText.substring(1));
}
} else if (tokentText.endsWith("\"")) {
tokens.add(tokentText.substring(0, tokentText.length() - 1));
tokens.add("\"");
countOfQuotes++;
} else if (!tokentText.trim().equals("")) {
// take into account only if different than empty string
tokens.add(tokentText);
}
if (!s.incrementToken()) {
break;
}
}
s.end();
s.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
return countOfQuotes;
}
示例5: registerWithPrefix
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
protected void registerWithPrefix(String prefix, LuceneAnalyzerDefinitionRegistryBuilder builder) {
builder.analyzer(prefix + HibernateSearchAnalyzer.KEYWORD).tokenizer(KeywordTokenizerFactory.class);
builder.analyzer(prefix + HibernateSearchAnalyzer.KEYWORD_CLEAN).tokenizer(KeywordTokenizerFactory.class)
.tokenFilter(ASCIIFoldingFilterFactory.class)
.tokenFilter(LowerCaseFilterFactory.class);
builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT).tokenizer(WhitespaceTokenizerFactory.class)
.tokenFilter(ASCIIFoldingFilterFactory.class)
.tokenFilter(WordDelimiterFilterFactory.class)
.param("generateWordParts", "1")
.param("generateNumberParts", "1")
.param("catenateWords", "0")
.param("catenateNumbers", "0")
.param("catenateAll", "0")
.param("splitOnCaseChange", "0")
.param("splitOnNumerics", "0")
.param("preserveOriginal", "1")
.tokenFilter(LowerCaseFilterFactory.class);
builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT_STEMMING).tokenizer(WhitespaceTokenizerFactory.class)
.tokenFilter(ASCIIFoldingFilterFactory.class)
.tokenFilter(WordDelimiterFilterFactory.class)
.param("generateWordParts", "1")
.param("generateNumberParts", "1")
.param("catenateWords", "0")
.param("catenateNumbers", "0")
.param("catenateAll", "0")
.param("splitOnCaseChange", "0")
.param("splitOnNumerics", "0")
.param("preserveOriginal", "1")
.tokenFilter(LowerCaseFilterFactory.class)
.tokenFilter(CoreFrenchMinimalStemFilterFactory.class);
builder.analyzer(prefix + HibernateSearchAnalyzer.TEXT_SORT).tokenizer(KeywordTokenizerFactory.class)
.tokenFilter(ASCIIFoldingFilterFactory.class)
.tokenFilter(LowerCaseFilterFactory.class)
.tokenFilter(PatternReplaceFilterFactory.class)
.param("pattern", "('-&\\.,\\(\\))")
.param("replacement", " ")
.param("replace", "all")
.tokenFilter(PatternReplaceFilterFactory.class)
.param("pattern", "([^0-9\\p{L} ])")
.param("replacement", "")
.param("replace", "all")
.tokenFilter(TrimFilterFactory.class);
}
示例6: makeAnalyzer
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
private Analyzer makeAnalyzer() throws IOException {
return CustomAnalyzer.builder()
.withTokenizer(WhitespaceTokenizerFactory.class)
.addTokenFilter(LowerCaseFilterFactory.class)
.build();
}
示例7: testLookupTokenizer
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
public void testLookupTokenizer() {
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("Whitespace", versionArgOnly()).getClass());
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("WHITESPACE", versionArgOnly()).getClass());
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("whitespace", versionArgOnly()).getClass());
}
示例8: testLookupTokenizerClass
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
public void testLookupTokenizerClass() {
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.lookupClass("Whitespace"));
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.lookupClass("WHITESPACE"));
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.lookupClass("whitespace"));
}
示例9: testLookupTokenizer
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; //导入依赖的package包/类
public void testLookupTokenizer() {
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("Whitespace").getClass());
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("WHITESPACE").getClass());
assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("whitespace").getClass());
}