本文整理汇总了Java中org.elasticsearch.index.analysis.TokenFilterFactory.create方法的典型用法代码示例。如果您正苦于以下问题:Java TokenFilterFactory.create方法的具体用法?Java TokenFilterFactory.create怎么用?Java TokenFilterFactory.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.index.analysis.TokenFilterFactory
的用法示例。
在下文中一共展示了TokenFilterFactory.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testOne
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
String source = "Das ist ein Bindestrich-Wort.";
String[] expected = {
"Das",
"ist",
"ein",
"Bindestrich-Wort",
"BindestrichWort",
"Wort",
"Bindestrich"
};
String resource = "org/xbib/elasticsearch/index/analysis/hyphen/hyphen_tokenizer.json";
Tokenizer tokenizer = tokenizerFactory(resource, "my_hyphen_tokenizer").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "hyphen");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例2: testGermanNumberFormat
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testGermanNumberFormat() throws IOException {
String source = "Muss Rudi Völler fünftausend oder 10000 EUR Strafe zahlen?";
String[] expected = {
"Muss",
"Rudi",
"Völler",
"fünftausend",
"oder",
"zehntausend",
"EUR",
"Strafe",
"zahlen"
};
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_numberformat.json";
Tokenizer tokenizer = tokenizerFactory(resource, "my_tokenizer").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "spellout_de");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例3: testAmericanEnglish
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testAmericanEnglish() throws IOException {
String source = "You will never get 100,000 US dollars of salary per year.";
String[] expected = {
"You",
"will",
"never",
"get",
"onehundredthousand",
"US",
"dollars",
"of",
"salary",
"per",
"year"
};
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_numberformat.json";
Tokenizer tokenizer = tokenizerFactory(resource, "my_tokenizer").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource,"spellout_en");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例4: testOne
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
String source = "Das ist ein Bindestrich-Wort.";
String[] expected = {
"Das",
"ist",
"ein",
"Bindestrich-Wort",
"BindestrichWort",
"Wort",
"Bindestrich"
};
String resource = "org/xbib/elasticsearch/index/analysis/hyphen/hyphen_tokenizer.json";
Tokenizer tokenizer = tokenizerFactory(resource, "my_hyphen_tokenizer").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource,"hyphen");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例5: testOne
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
String source = "Das ist ein Bindestrich-Wort.";
String[] expected = {
"Das",
"ist",
"ein",
"Bindestrich-Wort",
"BindestrichWort",
"Wort",
"Bindestrich"
};
AnalysisService analysisService = createAnalysisService();
Tokenizer tokenizer = analysisService.tokenizer("my_hyphen_tokenizer").create(new StringReader(source));
TokenFilterFactory tokenFilter = analysisService.tokenFilter("hyphen");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例6: testMetaphoneWords
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testMetaphoneWords() throws Exception {
Index index = new Index("test", "_na_");
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.filter.myStemmer.type", "br_metaphone")
.build();
AnalysisService analysisService = createAnalysisService(index, settings, new AnalysisMetaphonePlugin());
TokenFilterFactory filterFactory = analysisService.tokenFilter("br_metaphone");
Tokenizer tokenizer = new KeywordTokenizer();
Map<String,String> words = buildWordList();
Set<String> inputWords = words.keySet();
for(String word : inputWords) {
tokenizer.setReader(new StringReader(word));
TokenStream ts = filterFactory.create(tokenizer);
CharTermAttribute term1 = ts.addAttribute(CharTermAttribute.class);
ts.reset();
assertThat(ts.incrementToken(), equalTo(true));
assertThat(term1.toString(), equalTo(words.get(word)));
ts.close();
}
}
开发者ID:anaelcarvalho,项目名称:elasticsearch-analysis-metaphone_ptBR,代码行数:29,代码来源:MetaphoneTokenFilterTests.java
示例7: assertTokenFilter
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
private void assertTokenFilter(String name, Class<?> clazz) throws IOException {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get(name);
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream stream = tokenFilter.create(tokenizer);
assertThat(stream, instanceOf(clazz));
}
示例8: createComponents
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = tokenizerFactory.create();
TokenStream tokenStream = tokenizer;
for (TokenFilterFactory tokenFilter : Arrays.asList(stdnumTokenFilterFactory, uniqueTokenFilterFactory)) {
tokenStream = tokenFilter.create(tokenStream);
}
return new TokenStreamComponents(tokenizer, tokenStream);
}
示例9: testTransformTraditionalSimplified
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformTraditionalSimplified() throws IOException {
String source = "簡化字";
String[] expected = new String[] { "简化", "字" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_ch").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_ch");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例10: testTransformHanLatin
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformHanLatin() throws IOException {
String source = "中国";
String[] expected = new String[] { "zhōng guó" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_han").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_han");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例11: testTransformKatakanaHiragana
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformKatakanaHiragana() throws IOException {
String source = "ヒラガナ";
String[] expected = new String[] { "ひらがな" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_katakana").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_katakana");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例12: testTransformCyrillicLatin
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformCyrillicLatin() throws IOException {
String source = "Российская Федерация";
String[] expected = new String[] { "Rossijskaâ", "Federaciâ" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_cyr").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_cyr");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例13: testTransformCyrillicLatinReverse
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformCyrillicLatinReverse() throws IOException {
String source = "Rossijskaâ Federaciâ";
String[] expected = new String[] { "Российская", "Федерация"};
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_cyr").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_cyr_reverse");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例14: testTransformAnyLatin
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformAnyLatin() throws IOException {
String source = "Αλφαβητικός Κατάλογος";
String[] expected = new String[] { "Alphabētikós", "Katálogos" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_any_latin").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_any_latin");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}
示例15: testTransformNFD
import org.elasticsearch.index.analysis.TokenFilterFactory; //导入方法依赖的package包/类
@Test
public void testTransformNFD() throws IOException {
String source = "Alphabētikós Katálogos";
String[] expected = new String[] { "Alphabetikos", "Katalogos" };
String resource = "org/xbib/elasticsearch/index/analysis/icu/icu_transform.json";
Tokenizer tokenizer = tokenizerFactory(resource,"my_icu_tokenizer_nfd").create();
tokenizer.setReader(new StringReader(source));
TokenFilterFactory tokenFilter = tokenFilterFactory(resource, "my_icu_transformer_nfd");
TokenStream tokenStream = tokenFilter.create(tokenizer);
assertSimpleTSOutput(tokenStream, expected);
}