本文整理汇总了Java中edu.stanford.nlp.io.RuntimeIOException类的典型用法代码示例。如果您正苦于以下问题:Java RuntimeIOException类的具体用法?Java RuntimeIOException怎么用?Java RuntimeIOException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RuntimeIOException类属于edu.stanford.nlp.io包,在下文中一共展示了RuntimeIOException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readMacros
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public static List<Pair<String, String>> readMacros(BufferedReader bin) {
try {
List<Pair<String, String>> macros = new ArrayList<Pair<String, String>>();
String line;
int lineNumber = 0;
while ((line = bin.readLine()) != null) {
++lineNumber;
String trimmed = line.trim();
if (trimmed.equals("") || trimmed.charAt(0) == '#') {
continue;
}
String[] pieces = line.split("\t", 2);
if (pieces.length < 2) {
throw new IllegalArgumentException("Expected lines of the format " +
"original (tab) replacement. " +
"Line number " + lineNumber +
" does not match.");
}
macros.add(new Pair<String, String>(pieces[0], pieces[1]));
}
return macros;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例2: getNext
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
/**
* Internally fetches the next token.
*
* @return the next token in the token stream, or null if none exists.
*/
@Override
@SuppressWarnings("unchecked")
protected T getNext() {
// if (lexer == null) {
// return null;
// }
try {
return (T) lexer.next();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
// cdm 2007: this shouldn't be necessary: PTBLexer decides for itself whether to return CRs based on the same flag!
// get rid of CRs if necessary
// while (!tokenizeNLs && PTBLexer.cr.equals(((HasWord) token).word())) {
// token = (T)lexer.next();
// }
// horatio: we used to catch exceptions here, which led to broken
// behavior and made it very difficult to debug whatever the
// problem was.
}
示例3: readSVMLightFormat
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
private static RVFDataset<String, String> readSVMLightFormat(String filename, Index<String> featureIndex, Index<String> labelIndex, List<String> lines) {
BufferedReader in = null;
RVFDataset<String, String> dataset;
try {
dataset = new RVFDataset<String, String>(10, featureIndex, labelIndex);
in = new BufferedReader(new FileReader(filename));
while (in.ready()) {
String line = in.readLine();
if (lines != null)
lines.add(line);
dataset.add(svmLightLineToRVFDatum(line));
}
} catch (IOException e) {
throw new RuntimeIOException(e);
} finally {
IOUtils.closeIgnoringExceptions(in);
}
return dataset;
}
示例4: propFileToProperties
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
/**
* This method reads in properties listed in a file in the format prop=value, one property per line.
* Although <code>Properties.load(InputStream)</code> exists, I implemented this method to trim the lines,
* something not implemented in the <code>load()</code> method.
* @param filename A properties file to read
* @return The corresponding Properties object
*/
public static Properties propFileToProperties(String filename) {
Properties result = new Properties();
try {
InputStream is = new BufferedInputStream(new FileInputStream(filename));
result.load(is);
// trim all values
for (Object propKey : result.keySet()){
String newVal = result.getProperty((String)propKey);
result.setProperty((String)propKey,newVal.trim());
}
is.close();
return result;
} catch (IOException e) {
throw new RuntimeIOException("propFileToProperties could not read properties file: " + filename, e);
}
}
示例5: getNext
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
protected T getNext() {
try {
T nextToken = null;
// Depending on the orthographic normalization options,
// some tokens can be obliterated. In this case, keep iterating
// until we see a non-zero length token.
do {
nextToken = (T) lexer.next();
} while (nextToken != null && nextToken.word().length() == 0);
return nextToken;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例6: save
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
protected void save(DataOutputStream file,
Map<String, Set<String>> tagTokens) {
try {
file.writeInt(index.size());
for (String item : index) {
file.writeUTF(item);
if (learnClosedTags) {
if (tagTokens.get(item).size() < closedTagThreshold) {
markClosed(item);
}
}
file.writeBoolean(isClosed(item));
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例7: outputTaggedSentence
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public void outputTaggedSentence(List<? extends HasWord> sentence,
boolean outputLemmas, OutputStyle outputStyle,
boolean outputVerbosity, int numSentences,
String separator, Writer writer) {
try {
switch (outputStyle) {
case TSV:
writer.write(getTsvWords(outputVerbosity, outputLemmas, sentence));
break;
case XML:
case INLINE_XML:
writeXMLSentence(writer, sentence, numSentences, outputLemmas);
break;
case SLASH_TAGS:
writer.write(Sentence.listToString(sentence, false, config.getTagSeparator()));
writer.write(separator);
break;
default:
throw new IllegalArgumentException("Unsupported output style " + outputStyle);
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例8: loadDemonymLists
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
private void loadDemonymLists(String demonymFile) {
BufferedReader reader = null;
try {
reader = IOUtils.readerFromString(demonymFile);
while(reader.ready()){
String[] line = reader.readLine().split("\t");
if(line[0].startsWith("#")) continue;
Set<String> set = Generics.newHashSet();
for(String s : line){
set.add(s.toLowerCase());
demonymSet.add(s.toLowerCase());
}
demonyms.put(line[0].toLowerCase(), set);
}
adjectiveNation.addAll(demonymSet);
adjectiveNation.removeAll(demonyms.keySet());
} catch (IOException e){
throw new RuntimeIOException(e);
} finally {
IOUtils.closeIgnoringExceptions(reader);
}
}
示例9: loadGenderNumber
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
private void loadGenderNumber(String file){
try {
BufferedReader reader = IOUtils.readerFromString(file);
String line;
while ((line = reader.readLine())!=null){
String[] split = line.split("\t");
List<String> tokens = new ArrayList<String>(Arrays.asList(split[0].split(" ")));
String[] countStr = split[1].split(" ");
int[] counts = new int[4];
counts[0] = Integer.parseInt(countStr[0]);
counts[1] = Integer.parseInt(countStr[1]);
counts[2] = Integer.parseInt(countStr[2]);
counts[3] = Integer.parseInt(countStr[3]);
genderNumber.put(tokens, counts);
}
reader.close();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例10: getNextDocument
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public Document getNextDocument()
{
try {
if (curFileIndex >= fileList.size()) return null; // DONE!
File curFile = fileList.get(curFileIndex);
if (docIterator == null) {
docIterator = new DocumentIterator(curFile.getAbsolutePath(), options);
}
while ( ! docIterator.hasNext()) {
logger.info("Processed " + docIterator.docCnt + " documents in " + curFile.getAbsolutePath());
docIterator.close();
curFileIndex++;
if (curFileIndex >= fileList.size()) {
return null; // DONE!
}
curFile = fileList.get(curFileIndex);
docIterator = new DocumentIterator(curFile.getAbsolutePath(), options);
}
Document next = docIterator.next();
SieveCoreferenceSystem.logger.fine("Reading document: " + next.getDocumentID());
return next;
} catch (IOException ex) {
throw new RuntimeIOException(ex);
}
}
示例11: readMacros
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public static List<Pair<String, String>> readMacros(BufferedReader bin) {
try {
List<Pair<String, String>> macros = new ArrayList<Pair<String, String>>();
String line;
int lineNumber = 0;
while ((line = bin.readLine()) != null) {
++lineNumber;
String trimmed = line.trim();
if (trimmed.equals("") || trimmed.charAt(0) == '#') {
continue;
}
String[] pieces = line.split("\t", 2);
if (pieces.length < 2) {
throw new IllegalArgumentException("Expected lines of the format " +
"original (tab) replacement. " +
"Line number " + lineNumber +
" does not match.");
}
macros.add(new Pair<String, String>(pieces[0], pieces[1]));
}
return macros;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例12: readTagCount
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
/** A TagCount object's fields are read from the file. They are read from
* the current position and the file is not closed afterwards.
*/
public static TagCount readTagCount(DataInputStream rf) {
try {
TagCount tc = new TagCount();
int numTags = rf.readInt();
tc.map = Generics.newHashMap(numTags);
for (int i = 0; i < numTags; i++) {
String tag = rf.readUTF();
int count = rf.readInt();
if (tag.equals(NULL_SYMBOL)) tag = null;
tc.map.put(tag, count);
}
tc.getTagsCache = tc.map.keySet().toArray(new String[tc.map.keySet().size()]);
tc.sumCache = tc.calculateSumCache();
return tc;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
示例13: IntelKBPAnnotator
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
/**
* Create a new KBP annotator from the given properties.
*
* @param props The properties to use when creating this extractor.
*/
public IntelKBPAnnotator(String name, Properties props) {
// Parse standard properties
ArgumentParser.fillOptions(this, name, props);
// Load the extractor
try {
this.extractor = new IntelKBPEnsembleExtractor(
new IntelKBPTokensregexExtractor(tokensregexdir),
new IntelKBPSemgrexExtractor(semgrexdir),
IntelKBPStatisticalExtractor.loadStatisticalExtractor(),
DefaultKBPStatisticalExtractor.loadStatisticalExtractor()
).setEnsembleStrategy(IntelConfig.ENSEMBLE_STRATEGY);
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeIOException(e);
}
// Load TokensRegexNER
/*this.casedNER = new TokensRegexNERAnnotator(
regexnerCasedPath,
false);
this.caselessNER = new TokensRegexNERAnnotator(
regexnerCaselessPath,
true,
"^(NN|JJ).*");*/
// Create entity mention annotator
this.entityMentionAnnotator = new EntityMentionsAnnotator("kbp.entitymention", new Properties() {{
setProperty("kbp.entitymention.acronyms", "true");
setProperty("acronyms", "true");
}});
}
示例14: main
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
RedwoodConfiguration.standard().apply(); // Disable SLF4J crap.
ArgumentParser.fillOptions(IntelKBPSemgrexExtractor.class, args);
IntelKBPSemgrexExtractor extractor = new IntelKBPSemgrexExtractor(DIR);
List<Pair<KBPInput, String>> testExamples = DatasetUtils.readDataset(TEST_FILE);
extractor.computeAccuracy(testExamples.stream(), PREDICTIONS.map(x -> {
try {
return "stdout".equalsIgnoreCase(x) ? System.out : new PrintStream(new FileOutputStream(x));
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}));
}
示例15: main
import edu.stanford.nlp.io.RuntimeIOException; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
RedwoodConfiguration.standard().apply(); // Disable SLF4J crap.
ArgumentParser.fillOptions(IntelKBPTokensregexExtractor.class, args);
IntelKBPTokensregexExtractor extractor = new IntelKBPTokensregexExtractor(DIR);
List<Pair<KBPInput, String>> testExamples = DatasetUtils.readDataset(TEST_FILE);
extractor.computeAccuracy(testExamples.stream(), PREDICTIONS.map(x -> {
try {
return "stdout".equalsIgnoreCase(x) ? System.out : new PrintStream(new FileOutputStream(x));
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}));
}