本文整理汇总了Java中org.elasticsearch.index.analysis.AnalysisService类的典型用法代码示例。如果您正苦于以下问题:Java AnalysisService类的具体用法?Java AnalysisService怎么用?Java AnalysisService使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AnalysisService类属于org.elasticsearch.index.analysis包,在下文中一共展示了AnalysisService类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: IndexQueryParserService
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Inject
public IndexQueryParserService(Index index, IndexSettingsService indexSettingsService,
IndicesQueriesRegistry indicesQueriesRegistry,
ScriptService scriptService, AnalysisService analysisService,
MapperService mapperService, IndexCache indexCache, IndexFieldDataService fieldDataService,
BitsetFilterCache bitsetFilterCache,
@Nullable SimilarityService similarityService) {
super(index, indexSettingsService.getSettings());
this.indexSettingsService = indexSettingsService;
this.scriptService = scriptService;
this.analysisService = analysisService;
this.mapperService = mapperService;
this.similarityService = similarityService;
this.indexCache = indexCache;
this.fieldDataService = fieldDataService;
this.bitsetFilterCache = bitsetFilterCache;
Settings indexSettings = indexSettingsService.getSettings();
this.defaultField = indexSettings.get(DEFAULT_FIELD, AllFieldMapper.NAME);
this.queryStringLenient = indexSettings.getAsBoolean(QUERY_STRING_LENIENT, false);
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings);
this.defaultAllowUnmappedFields = indexSettings.getAsBoolean(ALLOW_UNMAPPED, true);
this.indicesQueriesRegistry = indicesQueriesRegistry;
}
示例2: checkMappingsCompatibility
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
/**
* Checks the mappings for compatibility with the current version
*/
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
Index index = new Index(indexMetaData.getIndex());
Settings settings = indexMetaData.getSettings();
try {
SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
// We cannot instantiate real analysis server at this point because the node might not have
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) {
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService,
scriptService, mapperRegistry, dynamicArrayFieldMapperBuilderFactoryProvider)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
}
}
} catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "], reason: [" + ex.getMessage() + "]", ex);
}
}
示例3: testTokenizerFilter
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testTokenizerFilter() throws IOException {
String source = "eins zwei drei";
String[] expected = {
"d800000000001214",
"4a00000000000000",
"c0000000000a100"
};
AnalysisService analysisService = MapperTestUtils.analysisService("eudex_analysis.json");
TokenFilterFactory tokenFilterFactory = analysisService.tokenFilter("my_phonetic");
Tokenizer tokenizer = analysisService.tokenizer("my_phonetic").create();
tokenizer.setReader(new StringReader(source));
assertSimpleTSOutput(tokenFilterFactory.create(tokenizer), expected);
}
示例4: testOpenNLPAnalysis
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testOpenNLPAnalysis() throws Exception {
AnalysisService analysisService = MapperTestUtils.analysisService(getResource());
TokenizerFactory tokenizerFactory = analysisService.tokenizer("opennlp");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(OpenNLPTokenizerFactory.class));
this.tokenizerFactory = (OpenNLPTokenizerFactory) tokenizerFactory;
TokenFilterFactory filterFactory = analysisService.tokenFilter("opennlp");
MatcherAssert.assertThat(filterFactory, instanceOf(OpenNLPTokenFilterFactory.class));
this.filterFactory = (OpenNLPTokenFilterFactory) filterFactory;
TokenFilterFactory filterpayloadsfilterFactory = analysisService.tokenFilter("filterpayloads");
MatcherAssert.assertThat(filterpayloadsfilterFactory, instanceOf(FilterPayloadsTokenFilterFactory.class));
TokenFilterFactory strippayloadsfilterFactory = analysisService.tokenFilter("strippayloads");
MatcherAssert.assertThat(strippayloadsfilterFactory, instanceOf(StripPayloadsTokenFilterFactory.class));
analyze();
}
示例5: testBasicUsage
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testBasicUsage() throws Exception {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "tr")
.put("index.analysis.analyzer.myAnalyzer.strength", "primary")
.put("index.analysis.analyzer.myAnalyzer.decomposition", "canonical")
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsUpper = analyzer.tokenStream(null, "I WİLL USE TURKİSH CASING");
BytesRef b1 = bytesFromTokenStream(tsUpper);
TokenStream tsLower = analyzer.tokenStream(null, "ı will use turkish casıng");
BytesRef b2 = bytesFromTokenStream(tsLower);
assertTrue(compare(b1.bytes, b2.bytes) == 0);
}
示例6: testNormalization
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testNormalization() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "tr")
.put("index.analysis.analyzer.myAnalyzer.strength", "primary")
.put("index.analysis.analyzer.myAnalyzer.decomposition", "canonical")
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsUpper = analyzer.tokenStream(null, "I W\u0049\u0307LL USE TURKİSH CASING");
BytesRef b1 = bytesFromTokenStream(tsUpper);
TokenStream tsLower = analyzer.tokenStream(null, "ı will use turkish casıng");
BytesRef b2 = bytesFromTokenStream(tsLower);
assertTrue(compare(b1.bytes, b2.bytes) == 0);
}
示例7: testSecondaryStrength
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testSecondaryStrength() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "en")
.put("index.analysis.analyzer.myAnalyzer.strength", "secondary")
.put("index.analysis.analyzer.myAnalyzer.decomposition", "no")
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsUpper = analyzer.tokenStream("content", "TESTING");
BytesRef b1 = bytesFromTokenStream(tsUpper);
TokenStream tsLower = analyzer.tokenStream("content", "testing");
BytesRef b2 = bytesFromTokenStream(tsLower);
assertTrue(compare(b1.bytes, b2.bytes) == 0);
}
示例8: testIgnorePunctuation
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testIgnorePunctuation() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "en")
.put("index.analysis.analyzer.myAnalyzer.strength", "primary")
.put("index.analysis.analyzer.myAnalyzer.alternate", "shifted")
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsPunctuation = analyzer.tokenStream("content", "foo-bar");
BytesRef b1 = bytesFromTokenStream(tsPunctuation);
TokenStream tsWithoutPunctuation = analyzer.tokenStream("content", "foo bar");
BytesRef b2 = bytesFromTokenStream(tsWithoutPunctuation);
assertTrue(compare(b1.bytes, b2.bytes) == 0);
}
示例9: testIgnoreWhitespace
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testIgnoreWhitespace() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "en")
.put("index.analysis.analyzer.myAnalyzer.strength", "primary")
.put("index.analysis.analyzer.myAnalyzer.alternate", "shifted")
.put("index.analysis.analyzer.myAnalyzer.variableTop", 4096) // SPACE
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsWithoutSpace = analyzer.tokenStream(null, "foobar");
BytesRef b1 = bytesFromTokenStream(tsWithoutSpace);
TokenStream tsWithSpace = analyzer.tokenStream(null, "foo bar");
BytesRef b2 = bytesFromTokenStream(tsWithSpace);
assertTrue(compare(b1.bytes, b2.bytes) == 0);
// now assert that punctuation still matters: foo-bar < foo bar
TokenStream tsWithPunctuation = analyzer.tokenStream(null, "foo-bar");
BytesRef b3 = bytesFromTokenStream(tsWithPunctuation);
assertTrue(compare(b3.bytes, b1.bytes) < 0);
}
示例10: testNumerics
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testNumerics() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "en")
.put("index.analysis.analyzer.myAnalyzer.numeric", true)
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
TokenStream tsNine = analyzer.tokenStream(null, "foobar-9");
BytesRef b1 = bytesFromTokenStream(tsNine);
TokenStream tsTen = analyzer.tokenStream(null, "foobar-10");
BytesRef b2 = bytesFromTokenStream(tsTen);
assertTrue(compare(b1.bytes, b2.bytes) == -1);
}
示例11: testUpperCaseFirst
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testUpperCaseFirst() throws IOException {
Index index = new Index("test");
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.myAnalyzer.type", "icu_collation")
.put("index.analysis.analyzer.myAnalyzer.language", "en")
.put("index.analysis.analyzer.myAnalyzer.strength", "tertiary")
.put("index.analysis.analyzer.myAnalyzer.caseFirst", "upper")
.build();
AnalysisService analysisService = createAnalysisService(index, settings);
Analyzer analyzer = analysisService.analyzer("myAnalyzer").analyzer();
String lower = "resume";
String upper = "Resume";
TokenStream tsLower = analyzer.tokenStream(null, lower);
BytesRef b1 = bytesFromTokenStream(tsLower);
TokenStream tsUpper = analyzer.tokenStream(null, upper);
BytesRef b2 = bytesFromTokenStream(tsUpper);
assertTrue(compare(b2.bytes, b1.bytes) < 0);
}
示例12: createAnalysisService
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
private AnalysisService createAnalysisService() {
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.loadFromStream("icu_folding.json", getClass().getResourceAsStream("/org/xbib/elasticsearch/index/analysis/icu/icu_folding.json")).build();
Index index = new Index("test");
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
new EnvironmentModule(new Environment(settings)))
.createInjector();
AnalysisModule analysisModule = new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class));
new IcuPlugin(settings).onModule(analysisModule);
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
analysisModule)
.createChildInjector(parentInjector);
return injector.getInstance(AnalysisService.class);
}
示例13: testLetterNonBreak
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
@Test
public void testLetterNonBreak() throws IOException {
String source = "Das ist ein Bindestrich-Wort, oder etwa nicht? Jetzt kommen wir zum Ende.";
String[] expected = {
"Das",
"ist",
"ein",
"Bindestrich-Wort",
"oder",
"etwa",
"nicht",
"Jetzt",
"kommen",
"wir",
"zum",
"Ende"
};
AnalysisService analysisService = createAnalysisService();
Tokenizer tokenizer = analysisService.tokenizer("my_hyphen_icu_tokenizer").create();
tokenizer.setReader(new StringReader(source));
assertSimpleTSOutput(tokenizer, expected);
}
示例14: createAnalysisService
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
private AnalysisService createAnalysisService() {
Settings settings = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home"))
.loadFromStream("icu_tokenizer.json", getClass().getResourceAsStream("/org/xbib/elasticsearch/index/analysis/icu/icu_tokenizer.json")).build();
Index index = new Index("test");
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
new EnvironmentModule(new Environment(settings)))
.createInjector();
AnalysisModule analysisModule = new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class));
new IcuPlugin(settings).onModule(analysisModule);
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
analysisModule)
.createChildInjector(parentInjector);
return injector.getInstance(AnalysisService.class);
}
示例15: getAnalysisService
import org.elasticsearch.index.analysis.AnalysisService; //导入依赖的package包/类
private AnalysisService getAnalysisService() {
Index index = new Index("test");
Settings settings = ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class))
.addProcessor(new RosetteAnalysisBinderProcessor()))
.createChildInjector(parentInjector);
return injector.getInstance(AnalysisService.class);
}
开发者ID:basis-technology-corp,项目名称:elasticsearch-analysis-rosette,代码行数:17,代码来源:SimpleRosetteAnalysisTests.java