本文整理汇总了Java中org.apache.lucene.analysis.util.ResourceLoader类的典型用法代码示例。如果您正苦于以下问题:Java ResourceLoader类的具体用法?Java ResourceLoader怎么用?Java ResourceLoader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ResourceLoader类属于org.apache.lucene.analysis.util包,在下文中一共展示了ResourceLoader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
InputStream stream = null;
try {
if (dictFile != null) // the dictionary can be empty.
dictionary = getWordSet(loader, dictFile, false);
// TODO: Broken, because we cannot resolve real system id
// ResourceLoader should also supply method like ClassLoader to get resource URL
stream = loader.openResource(hypFile);
final InputSource is = new InputSource(stream);
is.setEncoding(encoding); // if it's null let xml parser decide
is.setSystemId(hypFile);
if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0)) {
hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
} else {
hyphenator = Lucene43HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
}
} finally {
IOUtils.closeWhileHandlingException(stream);
}
}
示例2: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
if (mapping != null) {
List<String> wlist = null;
File mappingFile = new File(mapping);
if (mappingFile.exists()) {
wlist = getLines(loader, mapping);
} else {
List<String> files = splitFileNames(mapping);
wlist = new ArrayList<>();
for (String file : files) {
List<String> lines = getLines(loader, file.trim());
wlist.addAll(lines);
}
}
final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
parseRules(wlist, builder);
normMap = builder.build();
if (normMap.map == null) {
// if the inner FST is null, it means it accepts nothing (e.g. the file is empty)
// so just set the whole map to null
normMap = null;
}
}
}
示例3: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
String dicts[] = dictionaryFiles.split(",");
InputStream affix = null;
List<InputStream> dictionaries = new ArrayList<>();
try {
dictionaries = new ArrayList<>();
for (String file : dicts) {
dictionaries.add(loader.openResource(file));
}
affix = loader.openResource(affixFile);
this.dictionary = new Dictionary(affix, dictionaries, ignoreCase);
} catch (ParseException e) {
throw new IOException("Unable to load hunspell data! [dictionary=" + dictionaries + ",affix=" + affixFile + "]", e);
} finally {
IOUtils.closeWhileHandlingException(affix);
IOUtils.closeWhileHandlingException(dictionaries);
}
}
示例4: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
if (dictionaryFiles != null) {
assureMatchVersion();
List<String> files = splitFileNames(dictionaryFiles);
if (files.size() > 0) {
StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(ignoreCase);
for (String file : files) {
List<String> list = getLines(loader, file.trim());
for (String line : list) {
String[] mapping = line.split("\t", 2);
builder.add(mapping[0], mapping[1]);
}
}
dictionary = builder.build();
}
}
}
示例5: getDict
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
public static Dictionary getDict(String dicPath, ResourceLoader loader) {
Dictionary dic = null;
if(dicPath != null) {
File f = new File(dicPath);
if(!f.isAbsolute() && loader instanceof SolrResourceLoader) { //相对目录
SolrResourceLoader srl = (SolrResourceLoader) loader;
dicPath = srl.getInstanceDir()+dicPath;
f = new File(dicPath);
}
dic = Dictionary.getInstance(f);
} else {
dic = Dictionary.getInstance();
}
return dic;
}
示例6: createComponents
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer token = new IKTokenizer(reader, useSmart);
Map<String, String> paramsMap = new HashMap<String, String>();
Configuration cfg = DefaultConfig.getInstance();
paramsMap.put("luceneMatchVersion", luceneMatchVersion.toString());
paramsMap.put("synonyms", cfg.getExtSynonymDictionarys().get(0));
paramsMap.put("ignoreCase", "true");
SynonymFilterFactory factory = new SynonymFilterFactory(paramsMap);
ResourceLoader loader = new ClasspathResourceLoader();
try {
factory.inform(loader);
} catch (IOException e) {
e.printStackTrace();
}
return new TokenStreamComponents(token, factory.create(token));
}
示例7: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
if (userDictionaryPath != null) {
InputStream stream = loader.openResource(userDictionaryPath);
String encoding = userDictionaryEncoding;
if (encoding == null) {
encoding = IOUtils.UTF_8;
}
CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
Reader reader = new InputStreamReader(stream, decoder);
userDictionary = new UserDictionary(reader);
} else {
userDictionary = null;
}
}
示例8: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
assert tailored != null : "init must be called first!";
if (tailored.isEmpty()) {
config = new DefaultICUTokenizerConfig(cjkAsWords);
} else {
final BreakIterator breakers[] = new BreakIterator[UScript.CODE_LIMIT];
for (Map.Entry<Integer,String> entry : tailored.entrySet()) {
int code = entry.getKey();
String resourcePath = entry.getValue();
breakers[code] = parseRules(resourcePath, loader);
}
config = new DefaultICUTokenizerConfig(cjkAsWords) {
@Override
public BreakIterator getBreakIterator(int script) {
if (breakers[script] != null) {
return (BreakIterator) breakers[script].clone();
} else {
return super.getBreakIterator(script);
}
}
// TODO: we could also allow codes->types mapping
};
}
}
示例9: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
clazz = registry.get(name.toUpperCase(Locale.ROOT));
if( clazz == null ) {
clazz = resolveEncoder(name, loader);
}
if (maxCodeLength != null) {
try {
setMaxCodeLenMethod = clazz.getMethod("setMaxCodeLen", int.class);
} catch (Exception e) {
throw new IllegalArgumentException("Encoder " + name + " / " + clazz + " does not support " + MAX_CODE_LENGTH, e);
}
}
getEncoder();//trigger initialization for potential problems to be thrown now
}
示例10: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
if (stopWordFiles != null) {
if (FORMAT_WORDSET.equalsIgnoreCase(format)) {
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
} else if (FORMAT_SNOWBALL.equalsIgnoreCase(format)) {
stopWords = getSnowballWordSet(loader, stopWordFiles, ignoreCase);
} else {
throw new IllegalArgumentException("Unknown 'format' specified for 'words' file: " + format);
}
} else {
if (null != format) {
throw new IllegalArgumentException("'format' can not be specified w/o an explicit 'words' file: " + format);
}
if (luceneMatchVersion == null) {
stopWords = new CharArraySet(StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
} else {
stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
}
}
}
示例11: testInform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
public void testInform() throws Exception {
ResourceLoader loader = new ClasspathResourceLoader(getClass());
assertTrue("loader is null and it shouldn't be", loader != null);
KeepWordFilterFactory factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
"words", "keep-1.txt",
"ignoreCase", "true");
CharArraySet words = factory.getWords();
assertTrue("words is null and it shouldn't be", words != null);
assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2);
factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
"words", "keep-1.txt, keep-2.txt",
"ignoreCase", "true");
words = factory.getWords();
assertTrue("words is null and it shouldn't be", words != null);
assertTrue("words Size: " + words.size() + " is not: " + 4, words.size() == 4);
}
示例12: createRewriterFactory
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public RewriterFactory createRewriterFactory(NamedList<?> args,
ResourceLoader resourceLoader) throws IOException {
String rulesResourceName = (String) args.get("rules");
if (rulesResourceName == null) {
throw new IllegalArgumentException("Property 'rules' not configured");
}
Boolean ignoreCase = args.getBooleanArg("ignoreCase");
// querqy parser for queries that are part of the instructions in the
// rules
String rulesQuerqyParser = (String) args.get("querqyParser");
QuerqyParserFactory querqyParser = null;
if (rulesQuerqyParser != null) {
rulesQuerqyParser = rulesQuerqyParser.trim();
if (rulesQuerqyParser.length() > 0) {
querqyParser = resourceLoader.newInstance(rulesQuerqyParser, QuerqyParserFactory.class);
}
}
return new querqy.rewrite.commonrules.SimpleCommonRulesRewriterFactory(
new InputStreamReader(resourceLoader.openResource(rulesResourceName), "UTF-8"), querqyParser, ignoreCase != null && ignoreCase);
}
示例13: getWordSet
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
private CharArraySet getWordSet( ResourceLoader loader,
String wordFiles, boolean ignoreCase)
throws IOException {
List<String> files = splitFileNames(wordFiles);
CharArraySet words = null;
if (files.size() > 0) {
// default stopwords list has 35 or so words, but maybe don't make it that
// big to start
words = new CharArraySet( files.size() * 10, ignoreCase);
for (String file : files) {
List<String> wlist = getLines(loader, file.trim());
words.addAll(StopFilter.makeStopSet( wlist, ignoreCase));
}
}
return words;
}
示例14: getDict
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
public static Dictionary getDict(String dicPath, ResourceLoader loader) {
Dictionary dic = null;
if(dicPath != null) {
File f = new File(dicPath);
if(!f.isAbsolute() && loader instanceof SolrResourceLoader) { //相对目录
SolrResourceLoader srl = (SolrResourceLoader) loader;
dicPath = srl.getInstancePath().resolve(dicPath).toString();
f = new File(dicPath);
}
dic = Dictionary.getInstance(f);
} else {
dic = Dictionary.getInstance();
}
return dic;
}
示例15: inform
import org.apache.lucene.analysis.util.ResourceLoader; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
String stopTypesFiles = args.get("types");
enablePositionIncrements = getBoolean("enablePositionIncrements", false);
useWhitelist = getBoolean("useWhitelist", false);
if (stopTypesFiles != null) {
List<String> files = splitFileNames(stopTypesFiles);
if (files.size() > 0) {
stopTypes = new HashSet<String>();
for (String file : files) {
List<String> typesLines = getLines(loader, file.trim());
stopTypes.addAll(typesLines);
}
}
} else {
throw new IllegalArgumentException("Missing required parameter: types.");
}
}