当前位置: 首页>>代码示例>>Java>>正文


Java ResourceLoader.openResource方法代码示例

本文整理汇总了Java中org.apache.lucene.analysis.util.ResourceLoader.openResource方法的典型用法代码示例。如果您正苦于以下问题:Java ResourceLoader.openResource方法的具体用法?Java ResourceLoader.openResource怎么用?Java ResourceLoader.openResource使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.analysis.util.ResourceLoader的用法示例。


在下文中一共展示了ResourceLoader.openResource方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
  InputStream stream = null;
  try {
    if (dictFile != null) // the dictionary can be empty.
      dictionary = getWordSet(loader, dictFile, false);
    // TODO: Broken, because we cannot resolve real system id
    // ResourceLoader should also supply method like ClassLoader to get resource URL
    stream = loader.openResource(hypFile);
    final InputSource is = new InputSource(stream);
    is.setEncoding(encoding); // if it's null let xml parser decide
    is.setSystemId(hypFile);
    if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0)) {
      hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
    } else {
      hyphenator = Lucene43HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
    }
  } finally {
    IOUtils.closeWhileHandlingException(stream);
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:22,代码来源:HyphenationCompoundWordTokenFilterFactory.java

示例2: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
  String dicts[] = dictionaryFiles.split(",");

  InputStream affix = null;
  List<InputStream> dictionaries = new ArrayList<>();

  try {
    dictionaries = new ArrayList<>();
    for (String file : dicts) {
      dictionaries.add(loader.openResource(file));
    }
    affix = loader.openResource(affixFile);

    this.dictionary = new Dictionary(affix, dictionaries, ignoreCase);
  } catch (ParseException e) {
    throw new IOException("Unable to load hunspell data! [dictionary=" + dictionaries + ",affix=" + affixFile + "]", e);
  } finally {
    IOUtils.closeWhileHandlingException(affix);
    IOUtils.closeWhileHandlingException(dictionaries);
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:23,代码来源:HunspellStemFilterFactory.java

示例3: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
  if (userDictionaryPath != null) {
    InputStream stream = loader.openResource(userDictionaryPath);
    String encoding = userDictionaryEncoding;
    if (encoding == null) {
      encoding = IOUtils.UTF_8;
    }
    CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
        .onMalformedInput(CodingErrorAction.REPORT)
        .onUnmappableCharacter(CodingErrorAction.REPORT);
    Reader reader = new InputStreamReader(stream, decoder);
    userDictionary = new UserDictionary(reader);
  } else {
    userDictionary = null;
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:18,代码来源:JapaneseTokenizerFactory.java

示例4: createRewriterFactory

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public RewriterFactory createRewriterFactory(NamedList<?> args,
      ResourceLoader resourceLoader) throws IOException {
   String rulesResourceName = (String) args.get("rules");
   if (rulesResourceName == null) {
      throw new IllegalArgumentException("Property 'rules' not configured");
   }
   
   Boolean ignoreCase = args.getBooleanArg("ignoreCase");

   // querqy parser for queries that are part of the instructions in the
   // rules
   String rulesQuerqyParser = (String) args.get("querqyParser");
   QuerqyParserFactory querqyParser = null;
   if (rulesQuerqyParser != null) {
      rulesQuerqyParser = rulesQuerqyParser.trim();
      if (rulesQuerqyParser.length() > 0) {
         querqyParser = resourceLoader.newInstance(rulesQuerqyParser, QuerqyParserFactory.class);
      }
   }
   
   return new querqy.rewrite.commonrules.SimpleCommonRulesRewriterFactory(
         new InputStreamReader(resourceLoader.openResource(rulesResourceName), "UTF-8"), querqyParser, ignoreCase != null && ignoreCase);
}
 
开发者ID:renekrie,项目名称:querqy,代码行数:25,代码来源:SimpleCommonRulesRewriterFactory.java

示例5: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
  mode = getMode(args);
  String userDictionaryPath = args.get(USER_DICT_PATH);
  if (userDictionaryPath != null) {
    InputStream stream = loader.openResource(userDictionaryPath);
    String encoding = args.get(USER_DICT_ENCODING);
    if (encoding == null) {
      encoding = IOUtils.UTF_8;
    }
    CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
        .onMalformedInput(CodingErrorAction.REPORT)
        .onUnmappableCharacter(CodingErrorAction.REPORT);
    Reader reader = new InputStreamReader(stream, decoder);
    userDictionary = new UserDictionary(reader);
  } else {
    userDictionary = null;
  }
  discardPunctuation = getBoolean(DISCARD_PUNCTUATION, true);
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:21,代码来源:JapaneseTokenizerFactory.java

示例6: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
    if(sentenceModelFile!=null) {
        sentenceOp = new SentenceDetectorME(new SentenceModel(
                loader.openResource(sentenceModelFile)));
    }

    if(tokenizerModelFile==null)
        throw new IOException("Parameter 'tokenizerModle' is required, but is invalid:"+tokenizerModelFile);
    tokenizerOp = new TokenizerME(new TokenizerModel(
            loader.openResource(tokenizerModelFile)
    ));

    if(parChunkingClass!=null) {
        try {
            Class c = Class.forName(parChunkingClass);
            Object o = c.newInstance();
            paragraphChunker = (ParagraphChunker) o;
        }catch (Exception e){
            throw new IOException(e);
        }
    }

}
 
开发者ID:ziqizhang,项目名称:jate,代码行数:25,代码来源:OpenNLPTokenizerFactory.java

示例7: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
/**
 * Loads the hunspell dictionary and affix files defined in the configuration
 *  
 * @param loader ResourceLoader used to load the files
 */
@Override
public void inform(ResourceLoader loader) throws IOException {
  String dictionaryFiles[] = dictionaryArg.split(",");

  InputStream affix = null;
  List<InputStream> dictionaries = new ArrayList<InputStream>();

  try {
    dictionaries = new ArrayList<InputStream>();
    for (String file : dictionaryFiles) {
      dictionaries.add(loader.openResource(file));
    }
    affix = loader.openResource(affixFile);

    this.dictionary = new HunspellDictionary(affix, dictionaries, luceneMatchVersion, ignoreCase, strictAffixParsing);
  } catch (ParseException e) {
    throw new IOException("Unable to load hunspell data! [dictionary=" + dictionaryArg + ",affix=" + affixFile + "]", e);
  } finally {
    IOUtils.closeWhileHandlingException(affix);
    IOUtils.closeWhileHandlingException(dictionaries);
  }
}
 
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:28,代码来源:HunspellStemFilterFactory.java

示例8: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
  InputStream stream = null;
  try {
    if (dictFile != null) // the dictionary can be empty.
      dictionary = getWordSet(loader, dictFile, false);
    // TODO: Broken, because we cannot resolve real system id
    // ResourceLoader should also supply method like ClassLoader to get resource URL
    stream = loader.openResource(hypFile);
    final InputSource is = new InputSource(stream);
    is.setEncoding(encoding); // if it's null let xml parser decide
    is.setSystemId(hypFile);
    hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
  } finally {
    IOUtils.closeWhileHandlingException(stream);
  }
}
 
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:18,代码来源:HyphenationCompoundWordTokenFilterFactory.java

示例9: parseRules

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
private BreakIterator parseRules(String filename, ResourceLoader loader) throws IOException {
  StringBuilder rules = new StringBuilder();
  InputStream rulesStream = loader.openResource(filename);
  BufferedReader reader = new BufferedReader
      (IOUtils.getDecodingReader(rulesStream, StandardCharsets.UTF_8));
  String line = null;
  while ((line = reader.readLine()) != null) {
    if ( ! line.startsWith("#"))
      rules.append(line);
    rules.append('\n');
  }
  reader.close();
  return new RuleBasedBreakIterator(rules.toString());
}
 
开发者ID:europeana,项目名称:search,代码行数:15,代码来源:ICUTokenizerFactory.java

示例10: createFromRules

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
/**
 * Read custom rules from a file, and create a RuleBasedCollator
 * The file cannot support comments, as # might be in the rules!
 */
private Collator createFromRules(String fileName, ResourceLoader loader) {
  InputStream input = null;
  try {
   input = loader.openResource(fileName);
   String rules = IOUtils.toString(input, "UTF-8");
   return new RuleBasedCollator(rules);
  } catch (Exception e) {
    // io error or invalid rules
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeQuietly(input);
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:18,代码来源:ICUCollationField.java

示例11: getTemplates

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
/** Return a Templates object for the given filename */
private Templates getTemplates(ResourceLoader loader, String filename,int cacheLifetimeSeconds) throws IOException {
  
  Templates result = null;
  lastFilename = null;
  try {
    if(log.isDebugEnabled()) {
      log.debug("compiling XSLT templates:" + filename);
    }
    final String fn = "xslt/" + filename;
    final TransformerFactory tFactory = TransformerFactory.newInstance();
    tFactory.setURIResolver(new SystemIdResolver(loader).asURIResolver());
    tFactory.setErrorListener(xmllog);
    final StreamSource src = new StreamSource(loader.openResource(fn),
      SystemIdResolver.createSystemIdFromResourceName(fn));
    try {
      result = tFactory.newTemplates(src);
    } finally {
      // some XML parsers are broken and don't close the byte stream (but they should according to spec)
      IOUtils.closeQuietly(src.getInputStream());
    }
  } catch (Exception e) {
    log.error(getClass().getName(), "newTemplates", e);
    throw new IOException("Unable to initialize Templates '" + filename + "'", e);
  }
  
  lastFilename = filename;
  lastTemplates = result;
  cacheExpires = System.currentTimeMillis() + (cacheLifetimeSeconds * 1000);
  
  return result;
}
 
开发者ID:europeana,项目名称:search,代码行数:33,代码来源:TransformerProvider.java

示例12: parseRules

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
private BreakIterator parseRules(String filename, ResourceLoader loader) throws IOException {
  StringBuilder rules = new StringBuilder();
  InputStream rulesStream = loader.openResource(filename);
  BufferedReader reader = new BufferedReader
      (IOUtils.getDecodingReader(rulesStream, IOUtils.CHARSET_UTF_8));
  String line = null;
  while ((line = reader.readLine()) != null) {
    if ( ! line.startsWith("#"))
      rules.append(line);
    rules.append('\n');
  }
  reader.close();
  return new RuleBasedBreakIterator(rules.toString());
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:15,代码来源:ICUTokenizerFactory.java

示例13: getTemplates

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
/** Return a Templates object for the given filename */
private Templates getTemplates(ResourceLoader loader, String filename,int cacheLifetimeSeconds) throws IOException {
  
  Templates result = null;
  lastFilename = null;
  try {
    if(log.isDebugEnabled()) {
      log.debug("compiling XSLT templates:" + filename);
    }
    final String fn = "xslt/" + filename;
    final TransformerFactory tFactory = TransformerFactory.newInstance();
    tFactory.setURIResolver(new SystemIdResolver(loader).asURIResolver());
    tFactory.setErrorListener(xmllog);
    final StreamSource src = new StreamSource(loader.openResource(fn),
      SystemIdResolver.createSystemIdFromResourceName(fn));
    try {
      result = tFactory.newTemplates(src);
    } finally {
      // some XML parsers are broken and don't close the byte stream (but they should according to spec)
      IOUtils.closeQuietly(src.getInputStream());
    }
  } catch (Exception e) {
    log.error(getClass().getName(), "newTemplates", e);
    final IOException ioe = new IOException("Unable to initialize Templates '" + filename + "'");
    ioe.initCause(e);
    throw ioe;
  }
  
  lastFilename = filename;
  lastTemplates = result;
  cacheExpires = System.currentTimeMillis() + (cacheLifetimeSeconds * 1000);
  
  return result;
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:35,代码来源:TransformerProvider.java

示例14: addBoostInstructions

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
void addBoostInstructions(RulesCollectionBuilder builder, BoostDirection direction, float boost,
      ResourceLoader resourceLoader, String resourceName) throws IOException {

   try (
         BufferedReader reader = new BufferedReader(new InputStreamReader(resourceLoader.openResource(resourceName)))) {

      String line;

      while ((line = reader.readLine()) != null) {

         line = line.trim();
         if (line.length() > 0) {

            int pos = line.indexOf("#");

            if (pos > -1) {
               if (line.length() == 1) {
                  continue;
               }
               line = line.substring(0, pos).trim();
            }

            pos = line.indexOf("=>");
            if (pos > 0) {
               String inputsStr = line.substring(0, pos).trim();
               if (pos < line.length() - 2) {

                  String instructionStr = line.substring(pos + 2).trim();
                  if (instructionStr.length() > 0) {

                     List<Input> inputs = makeInputs(inputsStr);
                     if (inputs.size() > 0) {

                        for (String t : instructionStr.split(",")) {
                           t = t.trim();
                           if (t.length() > 0) {
                              Query query = termsToQuery(t);
                              if (!query.getClauses().isEmpty()) {
                                 for (Input input : inputs) {
                                    BoostInstruction bi = new BoostInstruction(query, direction, boost);
                                    builder.addRule(input, new Instructions(Collections.singletonList((Instruction) bi)));
                                 }
                              }
                           }
                        }

                     }

                  }
               }
            }

         }

      }
   }
}
 
开发者ID:renekrie,项目名称:querqy,代码行数:58,代码来源:SynonymFormatCommonRulesRewriterFactory.java

示例15: inform

import org.apache.lucene.analysis.util.ResourceLoader; //导入方法依赖的package包/类
/**
 * Loads the hunspell dictionary and affix files defined in the configuration
 *  
 * @param loader ResourceLoader used to load the files
 */
@Override
public void inform(ResourceLoader loader) throws IOException {
  assureMatchVersion();
  String dictionaryArg = args.get(PARAM_DICTIONARY);
  if (dictionaryArg == null) {
    throw new IllegalArgumentException("Parameter " + PARAM_DICTIONARY + " is mandatory.");
  }
  String dictionaryFiles[] = args.get(PARAM_DICTIONARY).split(",");
  String affixFile = args.get(PARAM_AFFIX);
  String pic = args.get(PARAM_IGNORE_CASE);
  if(pic != null) {
    if(pic.equalsIgnoreCase(TRUE)) ignoreCase = true;
    else if(pic.equalsIgnoreCase(FALSE)) ignoreCase = false;
    else throw new IllegalArgumentException("Unknown value for " + PARAM_IGNORE_CASE + ": " + pic + ". Must be true or false");
  }

  String strictAffixParsingParam = args.get(PARAM_STRICT_AFFIX_PARSING);
  boolean strictAffixParsing = true;
  if(strictAffixParsingParam != null) {
    if(strictAffixParsingParam.equalsIgnoreCase(FALSE)) strictAffixParsing = false;
    else if(strictAffixParsingParam.equalsIgnoreCase(TRUE)) strictAffixParsing = true;
    else throw new IllegalArgumentException("Unknown value for " + PARAM_STRICT_AFFIX_PARSING + ": " + strictAffixParsingParam + ". Must be true or false");
  }

  InputStream affix = null;
  List<InputStream> dictionaries = new ArrayList<InputStream>();

  try {
    dictionaries = new ArrayList<InputStream>();
    for (String file : dictionaryFiles) {
      dictionaries.add(loader.openResource(file));
    }
    affix = loader.openResource(affixFile);

    this.dictionary = new HunspellDictionary(affix, dictionaries, luceneMatchVersion, ignoreCase, strictAffixParsing);
  } catch (ParseException e) {
    throw new IOException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
  } finally {
    IOUtils.closeWhileHandlingException(affix);
    IOUtils.closeWhileHandlingException(dictionaries);
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:48,代码来源:HunspellStemFilterFactory.java


注:本文中的org.apache.lucene.analysis.util.ResourceLoader.openResource方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。