本文整理汇总了Java中org.apache.lucene.analysis.tr.TurkishAnalyzer类的典型用法代码示例。如果您正苦于以下问题:Java TurkishAnalyzer类的具体用法?Java TurkishAnalyzer怎么用?Java TurkishAnalyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TurkishAnalyzer类属于org.apache.lucene.analysis.tr包,在下文中一共展示了TurkishAnalyzer类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStandardTokenizerVersions
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public void testStandardTokenizerVersions() throws Exception {
assertEquals(DEFAULT_VERSION, solrConfig.luceneMatchVersion);
final IndexSchema schema = h.getCore().getLatestSchema();
FieldType type = schema.getFieldType("textDefault");
TokenizerChain ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(DEFAULT_VERSION, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(DEFAULT_VERSION, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("text40");
ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(Version.LUCENE_4_0_0_ALPHA, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_4_3_0, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("textTurkishAnalyzerDefault");
Analyzer ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(DEFAULT_VERSION, ana1.getVersion());
type = schema.getFieldType("textTurkishAnalyzer40");
ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(Version.LUCENE_4_0_0_ALPHA, ana1.getVersion());
}
示例2: TurkishAnalyzerProvider
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public TurkishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new TurkishAnalyzer(
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, TurkishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
);
analyzer.setVersion(version);
}
示例3: TurkishAnalyzerProvider
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
@Inject
public TurkishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettingsService.getSettings(), name, settings);
analyzer = new TurkishAnalyzer(Analysis.parseStopWords(env, settings, TurkishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
analyzer.setVersion(version);
}
示例4: getDefaultStopSet
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
/**
* Ritorna il set di stop words di default per una lingua
*
* @param language lingua
* @return set di stop words
*/
public static CharArraySet getDefaultStopSet(String language) {
try {
if ("en".equalsIgnoreCase(language)) {
return StandardAnalyzer.STOP_WORDS_SET;
} else if ("es".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "spanish_stop.txt", StandardCharsets.UTF_8));
} else if ("fr".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "french_stop.txt", StandardCharsets.UTF_8));
} else if ("de".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "german_stop.txt", StandardCharsets.UTF_8));
} else if ("pl".equalsIgnoreCase(language)) {
return WordlistLoader.getWordSet(IOUtils.getDecodingReader(PolishAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
} else if ("pt".equalsIgnoreCase(language) || "br".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "portuguese_stop.txt", StandardCharsets.UTF_8));
} else if ("it".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "italian_stop.txt", StandardCharsets.UTF_8));
} else if ("cz".equalsIgnoreCase(language) || "sk".equalsIgnoreCase(language)) {
return WordlistLoader.getWordSet(IOUtils.getDecodingReader(CzechAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
} else if ("tr".equalsIgnoreCase(language)) {
return TurkishAnalyzer.loadStopwordSet(false, TurkishAnalyzer.class, "stopwords.txt", "#");
} else if ("ru".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "russian_stop.txt", StandardCharsets.UTF_8));
} else if ("ro".equalsIgnoreCase(language)) {
return RomanianAnalyzer.loadStopwordSet(false, RomanianAnalyzer.class, "stopwords.txt", "#");
} else if ("bg".equalsIgnoreCase(language)) {
return BulgarianAnalyzer.loadStopwordSet(false, BulgarianAnalyzer.class, "stopwords.txt", "#");
} else if ("nl".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "dutch_stop.txt", StandardCharsets.UTF_8));
}
} catch (Exception ignored) {
throw new RuntimeException("Unable to load default stopword set");
}
return StandardAnalyzer.STOP_WORDS_SET;
}
示例5: get
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
@Override
public TurkishAnalyzer get() {
return this.analyzer;
}
示例6: Tokenizer
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public Tokenizer() {
Analyzer en_analyzer = new EnglishAnalyzer(Version.LUCENE_44);
Analyzer es_analyzer = new SpanishAnalyzer(Version.LUCENE_44);
Analyzer de_analyzer = new GermanAnalyzer(Version.LUCENE_44);
Analyzer da_analyzer = new DanishAnalyzer(Version.LUCENE_44);
Analyzer el_analyzer = new GreekAnalyzer(Version.LUCENE_44);
Analyzer fr_analyzer = new FrenchAnalyzer(Version.LUCENE_44);
Analyzer it_analyzer = new ItalianAnalyzer(Version.LUCENE_44);
Analyzer pt_analyzer = new PortugueseAnalyzer(Version.LUCENE_44);
Analyzer ru_analyzer = new RussianAnalyzer(Version.LUCENE_44);
Analyzer fa_analyzer = new PersianAnalyzer(Version.LUCENE_44);
Analyzer ar_analyzer = new ArabicAnalyzer(Version.LUCENE_44);
Analyzer id_analyzer = new IndonesianAnalyzer(Version.LUCENE_44);
Analyzer pl_analyzer = new MorfologikAnalyzer(Version.LUCENE_44);
Analyzer nl_analyzer = new DutchAnalyzer(Version.LUCENE_44);
Analyzer no_analyzer = new NorwegianAnalyzer(Version.LUCENE_44);
Analyzer ro_analyzer = new RomanianAnalyzer(Version.LUCENE_44);
Analyzer sv_analyzer = new SwedishAnalyzer(Version.LUCENE_44);
Analyzer fi_analyzer = new FinnishAnalyzer(Version.LUCENE_44);
Analyzer tr_analyzer = new TurkishAnalyzer(Version.LUCENE_44);
Analyzer hu_analyzer = new HungarianAnalyzer(Version.LUCENE_44);
Analyzer bg_analyzer = new BulgarianAnalyzer(Version.LUCENE_44);
analyzers.put("en", en_analyzer);
analyzers.put("es", es_analyzer);
analyzers.put("de", de_analyzer);
analyzers.put("da", da_analyzer);
analyzers.put("el", el_analyzer);
analyzers.put("fr", fr_analyzer);
analyzers.put("it", it_analyzer);
analyzers.put("pt", pt_analyzer);
analyzers.put("ru", ru_analyzer);
analyzers.put("fa", fa_analyzer);
analyzers.put("ar", ar_analyzer);
analyzers.put("id", id_analyzer);
analyzers.put("pl", pl_analyzer);
analyzers.put("nl", nl_analyzer);
analyzers.put("no", no_analyzer);
analyzers.put("ro", ro_analyzer);
analyzers.put("sv", sv_analyzer);
analyzers.put("fi", fi_analyzer);
analyzers.put("tr", tr_analyzer);
analyzers.put("hu", hu_analyzer);
analyzers.put("bg", bg_analyzer);
}