当前位置: 首页>>代码示例>>Java>>正文


Java AnalysisPlugin类代码示例

本文整理汇总了Java中org.elasticsearch.plugins.AnalysisPlugin的典型用法代码示例。如果您正苦于以下问题:Java AnalysisPlugin类的具体用法?Java AnalysisPlugin怎么用?Java AnalysisPlugin使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


AnalysisPlugin类属于org.elasticsearch.plugins包,在下文中一共展示了AnalysisPlugin类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupTokenizers

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<TokenizerFactory>> setupTokenizers(List<AnalysisPlugin> plugins) {
    NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = new NamedRegistry<>("tokenizer");
    tokenizers.register("standard", StandardTokenizerFactory::new);
    tokenizers.register("uax_url_email", UAX29URLEmailTokenizerFactory::new);
    tokenizers.register("path_hierarchy", PathHierarchyTokenizerFactory::new);
    tokenizers.register("PathHierarchy", PathHierarchyTokenizerFactory::new);
    tokenizers.register("keyword", KeywordTokenizerFactory::new);
    tokenizers.register("letter", LetterTokenizerFactory::new);
    tokenizers.register("lowercase", LowerCaseTokenizerFactory::new);
    tokenizers.register("whitespace", WhitespaceTokenizerFactory::new);
    tokenizers.register("nGram", NGramTokenizerFactory::new);
    tokenizers.register("ngram", NGramTokenizerFactory::new);
    tokenizers.register("edgeNGram", EdgeNGramTokenizerFactory::new);
    tokenizers.register("edge_ngram", EdgeNGramTokenizerFactory::new);
    tokenizers.register("pattern", PatternTokenizerFactory::new);
    tokenizers.register("classic", ClassicTokenizerFactory::new);
    tokenizers.register("thai", ThaiTokenizerFactory::new);
    tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers);
    return tokenizers;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:21,代码来源:AnalysisModule.java

示例2: analyze

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private List<String> analyze(Settings settings, String analyzerName, String text) throws IOException {
    IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
    AnalysisModule analysisModule = new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() {
        @Override
        public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
            return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
        }
    }));
    IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
    Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();

    AllEntries allEntries = new AllEntries();
    allEntries.addText("field1", text, 1.0f);

    TokenStream stream = AllTokenStream.allTokenStream("_all", text, 1.0f, analyzer);
    stream.reset();
    CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);

    List<String> terms = new ArrayList<>();
    while (stream.incrementToken()) {
        String tokText = termAtt.toString();
        terms.add(tokText);
    }
    return terms;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:CompoundAnalysisTests.java

示例3: createTestAnalysis

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
/**
 * Creates an TestAnalysis with all the default analyzers configured.
 */
public static TestAnalysis createTestAnalysis(Index index, Settings nodeSettings, Settings settings,
                                              AnalysisPlugin... analysisPlugins) throws IOException {
    Settings indexSettings = Settings.builder().put(settings)
            .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
            .build();
    return createTestAnalysis(IndexSettingsModule.newIndexSettings(index, indexSettings), nodeSettings, analysisPlugins);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:11,代码来源:ESTestCase.java

示例4: AnalysisModule

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
public AnalysisModule(Environment environment, List<AnalysisPlugin> plugins) throws IOException {
    NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = setupCharFilters(plugins);
    NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> hunspellDictionaries = setupHunspellDictionaries(plugins);
    hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.getRegistry());
    NamedRegistry<AnalysisProvider<TokenFilterFactory>> tokenFilters = setupTokenFilters(plugins, hunspellService);
    NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = setupTokenizers(plugins);
    NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> analyzers = setupAnalyzers(plugins);
    NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> normalizers = setupNormalizers(plugins);
    analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), tokenizers
        .getRegistry(), analyzers.getRegistry(), normalizers.getRegistry());
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:12,代码来源:AnalysisModule.java

示例5: setupCharFilters

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<CharFilterFactory>> setupCharFilters(List<AnalysisPlugin> plugins) {
    NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = new NamedRegistry<>("char_filter");
    charFilters.register("html_strip", HtmlStripCharFilterFactory::new);
    charFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
    charFilters.register("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
    charFilters.extractAndRegister(plugins, AnalysisPlugin::getCharFilters);
    return charFilters;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:AnalysisModule.java

示例6: getNewRegistry

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
public AnalysisRegistry getNewRegistry(Settings settings) {
    try {
        return new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() {
            @Override
            public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
                return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
            }
        })).getAnalysisRegistry();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:AnalysisModuleTests.java

示例7: testDefaultsCompoundAnalysis

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
public void testDefaultsCompoundAnalysis() throws Exception {
    Settings settings = getJsonSettings();
    IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
    AnalysisModule analysisModule = new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() {
        @Override
        public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
            return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
        }
    }));
    TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec");
    MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:CompoundAnalysisTests.java

示例8: setupHunspellDictionaries

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
public NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> setupHunspellDictionaries(List<AnalysisPlugin> plugins) {
    NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> hunspellDictionaries = new NamedRegistry<>("dictionary");
    hunspellDictionaries.extractAndRegister(plugins, AnalysisPlugin::getHunspellDictionaries);
    return hunspellDictionaries;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:6,代码来源:AnalysisModule.java

示例9: setupTokenFilters

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<TokenFilterFactory>> setupTokenFilters(List<AnalysisPlugin> plugins, HunspellService
    hunspellService) {
    NamedRegistry<AnalysisProvider<TokenFilterFactory>> tokenFilters = new NamedRegistry<>("token_filter");
    tokenFilters.register("stop", StopTokenFilterFactory::new);
    tokenFilters.register("reverse", ReverseTokenFilterFactory::new);
    tokenFilters.register("asciifolding", ASCIIFoldingTokenFilterFactory::new);
    tokenFilters.register("length", LengthTokenFilterFactory::new);
    tokenFilters.register("lowercase", LowerCaseTokenFilterFactory::new);
    tokenFilters.register("uppercase", UpperCaseTokenFilterFactory::new);
    tokenFilters.register("porter_stem", PorterStemTokenFilterFactory::new);
    tokenFilters.register("kstem", KStemTokenFilterFactory::new);
    tokenFilters.register("standard", StandardTokenFilterFactory::new);
    tokenFilters.register("nGram", NGramTokenFilterFactory::new);
    tokenFilters.register("ngram", NGramTokenFilterFactory::new);
    tokenFilters.register("edgeNGram", EdgeNGramTokenFilterFactory::new);
    tokenFilters.register("edge_ngram", EdgeNGramTokenFilterFactory::new);
    tokenFilters.register("shingle", ShingleTokenFilterFactory::new);
    tokenFilters.register("min_hash", MinHashTokenFilterFactory::new);
    tokenFilters.register("unique", UniqueTokenFilterFactory::new);
    tokenFilters.register("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
    tokenFilters.register("trim", TrimTokenFilterFactory::new);
    tokenFilters.register("limit", LimitTokenCountFilterFactory::new);
    tokenFilters.register("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
    tokenFilters.register("snowball", SnowballTokenFilterFactory::new);
    tokenFilters.register("stemmer", StemmerTokenFilterFactory::new);
    tokenFilters.register("word_delimiter", WordDelimiterTokenFilterFactory::new);
    tokenFilters.register("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new);
    tokenFilters.register("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new);
    tokenFilters.register("elision", ElisionTokenFilterFactory::new);
    tokenFilters.register("flatten_graph", FlattenGraphTokenFilterFactory::new);
    tokenFilters.register("keep", requriesAnalysisSettings(KeepWordFilterFactory::new));
    tokenFilters.register("keep_types", requriesAnalysisSettings(KeepTypesFilterFactory::new));
    tokenFilters.register("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));
    tokenFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new));
    tokenFilters.register("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
    tokenFilters.register("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new));
    tokenFilters.register("arabic_stem", ArabicStemTokenFilterFactory::new);
    tokenFilters.register("brazilian_stem", BrazilianStemTokenFilterFactory::new);
    tokenFilters.register("czech_stem", CzechStemTokenFilterFactory::new);
    tokenFilters.register("dutch_stem", DutchStemTokenFilterFactory::new);
    tokenFilters.register("french_stem", FrenchStemTokenFilterFactory::new);
    tokenFilters.register("german_stem", GermanStemTokenFilterFactory::new);
    tokenFilters.register("russian_stem", RussianStemTokenFilterFactory::new);
    tokenFilters.register("keyword_marker", requriesAnalysisSettings(KeywordMarkerTokenFilterFactory::new));
    tokenFilters.register("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new));
    tokenFilters.register("arabic_normalization", ArabicNormalizationFilterFactory::new);
    tokenFilters.register("german_normalization", GermanNormalizationFilterFactory::new);
    tokenFilters.register("hindi_normalization", HindiNormalizationFilterFactory::new);
    tokenFilters.register("indic_normalization", IndicNormalizationFilterFactory::new);
    tokenFilters.register("sorani_normalization", SoraniNormalizationFilterFactory::new);
    tokenFilters.register("persian_normalization", PersianNormalizationFilterFactory::new);
    tokenFilters.register("scandinavian_normalization", ScandinavianNormalizationFilterFactory::new);
    tokenFilters.register("scandinavian_folding", ScandinavianFoldingFilterFactory::new);
    tokenFilters.register("serbian_normalization", SerbianNormalizationFilterFactory::new);

    tokenFilters.register("hunspell", requriesAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory
        (indexSettings, name, settings, hunspellService)));
    tokenFilters.register("cjk_bigram", CJKBigramFilterFactory::new);
    tokenFilters.register("cjk_width", CJKWidthFilterFactory::new);

    tokenFilters.register("apostrophe", ApostropheFilterFactory::new);
    tokenFilters.register("classic", ClassicFilterFactory::new);
    tokenFilters.register("decimal_digit", DecimalDigitFilterFactory::new);
    tokenFilters.register("fingerprint", FingerprintTokenFilterFactory::new);
    tokenFilters.extractAndRegister(plugins, AnalysisPlugin::getTokenFilters);
    return tokenFilters;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:68,代码来源:AnalysisModule.java

示例10: setupAnalyzers

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> setupAnalyzers(List<AnalysisPlugin> plugins) {
    NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> analyzers = new NamedRegistry<>("analyzer");
    analyzers.register("default", StandardAnalyzerProvider::new);
    analyzers.register("standard", StandardAnalyzerProvider::new);
    analyzers.register("standard_html_strip", StandardHtmlStripAnalyzerProvider::new);
    analyzers.register("simple", SimpleAnalyzerProvider::new);
    analyzers.register("stop", StopAnalyzerProvider::new);
    analyzers.register("whitespace", WhitespaceAnalyzerProvider::new);
    analyzers.register("keyword", KeywordAnalyzerProvider::new);
    analyzers.register("pattern", PatternAnalyzerProvider::new);
    analyzers.register("snowball", SnowballAnalyzerProvider::new);
    analyzers.register("arabic", ArabicAnalyzerProvider::new);
    analyzers.register("armenian", ArmenianAnalyzerProvider::new);
    analyzers.register("basque", BasqueAnalyzerProvider::new);
    analyzers.register("brazilian", BrazilianAnalyzerProvider::new);
    analyzers.register("bulgarian", BulgarianAnalyzerProvider::new);
    analyzers.register("catalan", CatalanAnalyzerProvider::new);
    analyzers.register("chinese", ChineseAnalyzerProvider::new);
    analyzers.register("cjk", CjkAnalyzerProvider::new);
    analyzers.register("czech", CzechAnalyzerProvider::new);
    analyzers.register("danish", DanishAnalyzerProvider::new);
    analyzers.register("dutch", DutchAnalyzerProvider::new);
    analyzers.register("english", EnglishAnalyzerProvider::new);
    analyzers.register("finnish", FinnishAnalyzerProvider::new);
    analyzers.register("french", FrenchAnalyzerProvider::new);
    analyzers.register("galician", GalicianAnalyzerProvider::new);
    analyzers.register("german", GermanAnalyzerProvider::new);
    analyzers.register("greek", GreekAnalyzerProvider::new);
    analyzers.register("hindi", HindiAnalyzerProvider::new);
    analyzers.register("hungarian", HungarianAnalyzerProvider::new);
    analyzers.register("indonesian", IndonesianAnalyzerProvider::new);
    analyzers.register("irish", IrishAnalyzerProvider::new);
    analyzers.register("italian", ItalianAnalyzerProvider::new);
    analyzers.register("latvian", LatvianAnalyzerProvider::new);
    analyzers.register("lithuanian", LithuanianAnalyzerProvider::new);
    analyzers.register("norwegian", NorwegianAnalyzerProvider::new);
    analyzers.register("persian", PersianAnalyzerProvider::new);
    analyzers.register("portuguese", PortugueseAnalyzerProvider::new);
    analyzers.register("romanian", RomanianAnalyzerProvider::new);
    analyzers.register("russian", RussianAnalyzerProvider::new);
    analyzers.register("sorani", SoraniAnalyzerProvider::new);
    analyzers.register("spanish", SpanishAnalyzerProvider::new);
    analyzers.register("swedish", SwedishAnalyzerProvider::new);
    analyzers.register("turkish", TurkishAnalyzerProvider::new);
    analyzers.register("thai", ThaiAnalyzerProvider::new);
    analyzers.register("fingerprint", FingerprintAnalyzerProvider::new);
    analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers);
    return analyzers;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:50,代码来源:AnalysisModule.java

示例11: setupNormalizers

import org.elasticsearch.plugins.AnalysisPlugin; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> setupNormalizers(List<AnalysisPlugin> plugins) {
    NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> normalizers = new NamedRegistry<>("normalizer");
    // TODO: provide built-in normalizer providers?
    // TODO: pluggability?
    return normalizers;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:7,代码来源:AnalysisModule.java


注:本文中的org.elasticsearch.plugins.AnalysisPlugin类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。