本文整理汇总了Java中opennlp.tools.cmdline.parser.ParserTool类的典型用法代码示例。如果您正苦于以下问题:Java ParserTool类的具体用法?Java ParserTool怎么用?Java ParserTool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ParserTool类属于opennlp.tools.cmdline.parser包,在下文中一共展示了ParserTool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: scoreStructure
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public double scoreStructure(String ca, String q, String passage, boolean verbose) throws InvalidFormatException, IOException{
POSTaggerME parserModel = new POSTaggerME(new POSModel(new FileInputStream(new File("en-pos-model.bin"))));
Tokenizer tokenizer = new TokenizerME(new TokenizerModel(new FileInputStream(new File("en-token.bin"))));
Parser parser = ParserFactory.create(new ParserModel(new FileInputStream(new File("en-parser.bin"))));
double score = 0;
Parse[] questionParse = ParserTool.parseLine(q, parser, 1);
Parse[] passageParse = ParserTool.parseLine(q, parser, 1);
if (passage.contains(ca)) {
for (int i =0; i < questionParse.length; i++) {
score += matchChildren(questionParse[i],passageParse[i]);
}
}
return score;
}
示例2: parserTest1
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public void parserTest1() throws IOException {
if (!this.modelsAreInitialized) init();
Parser parser = ParserFactory.create(
this.parserModel,
20, // beam size
0.95);
Parse[] results = ParserTool.parseLine("Jane Austen was very modest about her own genius ."+this.q,
parser, 1);
Parse[] qResults = ParserTool.parseLine(this.q,parser, 1);
Parse[] rChn = (results[0].getChildren())[0].getChildren();
results[0].expandTopNode(results[0]);
for (int i = 0; i < results.length; i++) {
results[i].show();
}
for (int i = 0; i < qResults.length; i++) {
qResults[i].show();
}
System.out.print("\n\n");
for (int i = 0; i < rChn.length; i++) {
rChn[i].show();
System.out.print("\n");
}
}
示例3: parsePassageText
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public static Parse[] parsePassageText(String p) throws InvalidFormatException{
//initialize
SentenceDetectorME sentenceDetector = new SentenceDetectorME(sentenceModel);
Parser parser = ParserFactory.create(
parserModel,
20, // beam size
0.95); // advance percentage
String[] sentences = sentenceDetector.sentDetect(p);
Parse[] results = new Parse[sentences.length];
for (int i=0;i<sentences.length;i++){
String[] tks = SimpleTokenizer.INSTANCE.tokenize(sentences[i]);
String sent= StringUtils.join(tks," ");
System.out.println("Found sentence " + sent);
Parse[] sentResults = ParserTool.parseLine(sent,parser, 1);
results[i]=sentResults[0];
}
return results;
}
示例4: parsePassageText
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Parse[] parsePassageText(String p) throws InvalidFormatException{
if (!modelsAreInitialized)init();
//initialize
SentenceDetectorME sentenceDetector = new SentenceDetectorME(this.sentenceModel);
Parser parser = ParserFactory.create(
this.parserModel,
20, // beam size
0.95); // advance percentage
//find sentences, tokenize each, parse each, return top parse for each
String[] sentences = sentenceDetector.sentDetect(p);
Parse[] results = new Parse[sentences.length];
for (int i=0;i<sentences.length;i++){
String[] tks = SimpleTokenizer.INSTANCE.tokenize(sentences[i]);
//StringTokenizer st = new StringTokenizer(tks[i]);
//There are several tokenizers available. SimpleTokenizer works best
String sent= StringUtils.join(tks," ");
System.out.println("Found sentence " + sent);
Parse[] sentResults = ParserTool.parseLine(sent,parser, 1);
results[i]=sentResults[0];
}
return results;
}
示例5: main
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public static void main(String args[]) throws IOException
{
String wordnetDir = System.getProperty("wordnet.dir");
//wordnetDir="WordNet-3.0/dict/";
String question="Who is Abraham Lincoln?";
AnswerTypeContextGenerator atcg=new AnswerTypeContextGenerator(new File(wordnetDir));
String q=null;
String modelsDirProp = System.getProperty("model.dir");
// modelsDirProp="opennlp-models/";
File modelsDir = new File(modelsDirProp);
InputStream chunkerStream = new FileInputStream(
new File(modelsDir,"en-chunker.bin"));
ChunkerModel chunkerModel = new ChunkerModel(chunkerStream);
ChunkerME chunker = new ChunkerME(chunkerModel);
InputStream posStream = new FileInputStream(
new File(modelsDir,"en-pos-maxent.bin"));
POSModel posModel = new POSModel(posStream);
POSTaggerME tagger = new POSTaggerME(posModel);
Parser parser = new ChunkParser(chunker, tagger);
Parse query = ParserTool.parseLine(question,parser,1)[0];
String[] context=atcg.getContext(query);
for(int i=0;i<context.length;i++)
{
if(context[i].startsWith("hw=") || context[i].startsWith("mw="))
{
System.out.println(context[i].substring(3));
}
}
}
示例6: getFocusNoun
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public String[] getFocusNoun(String question) throws IOException
{
String wordnetDir = System.getProperty("wordnet.dir");
wordnetDir="WordNet-3.0/dict/";
AnswerTypeContextGenerator atcg=new AnswerTypeContextGenerator(new File(wordnetDir));
String q=null;
String modelsDirProp = System.getProperty("model.dir");
modelsDirProp="opennlp-models/";
File modelsDir = new File(modelsDirProp);
InputStream chunkerStream = new FileInputStream(
new File(modelsDir,"en-chunker.bin"));
ChunkerModel chunkerModel = new ChunkerModel(chunkerStream);
ChunkerME chunker = new ChunkerME(chunkerModel);
InputStream posStream = new FileInputStream(
new File(modelsDir,"en-pos-maxent.bin"));
POSModel posModel = new POSModel(posStream);
POSTaggerME tagger = new POSTaggerME(posModel);
Parser parser = new ChunkParser(chunker, tagger);
Parse query = ParserTool.parseLine(question,parser,1)[0];
String[] context=atcg.getContext(query);
String[] focus=new String[2];
int p=0;
for(int i=0;i<context.length;i++)
{
if(context[i].startsWith("hw=") || context[i].startsWith("mw="))
{
//System.out.println(context[i].substring(3));
focus[p++]=context[i].substring(3);
}
}
return focus;
}
示例7: next
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Event next() {
int split = line.indexOf(' ');
String outcome = line.substring(0, split);
String question = line.substring(split + 1);
Parse query = ParserTool.parseLine(question, parser, 1)[0];
return (new Event(outcome, atcg.getContext(query)));
}
示例8: parseText
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Multimap<String, String> parseText(String text) throws Exception {
loadResource();
Multimap<String, String> termTags = ArrayListMultimap.create();
Parse topParses[] = ParserTool.parseLine(text, parser, 1);
for (Parse p : topParses) {
List<Parse> parts = Arrays.asList(p.getChildren());
for (Parse parse : parts) {
getTermTags(parse, termTags);
}
}
return termTags;
}
示例9: parse
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public static Parse parse(String input) {
ParserModel model = (ParserModel) models.get(ParserModel.class);
Parser parser = ParserFactory.create(model);
Parse topParses[] = ParserTool.parseLine(input, parser, tokenizer(), 1);
return topParses[0];
}
示例10: parseSentence
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Parse parseSentence(String sentence){
Parse topParses[] = ParserTool.parseLine(sentence, parser, 1);
if (topParses.length == 0)
return null;
else
return topParses[0];
}
示例11: parseSentence
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
/** Turn one tokenized sentence into one top-ranked parse tree. */
public Parse parseSentence(List<String> tokens) {
//StringTokenizer st = new StringTokenizer(tks[i]);
//There are several tokenizers available. SimpleTokenizer works best
System.out.print(";");
String sent= StringUtils.join(tokens," ");
return ParserTool.parseLine(sent,parser, 1)[0];
}
示例12: parsePassageText
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Parse[] parsePassageText(String p) throws InvalidFormatException{
if (!modelsAreInitialized)init();
//initialize
SentenceDetectorME sentenceDetector = new SentenceDetectorME(this.sentenceModel);
NameFinderME nameFinder = new NameFinderME(this.nerModel);
Parser parser = ParserFactory.create(
this.parserModel,
20, // beam size
0.95); // advance percentage
//find sentences, tokenize each, parse each, return top parse for each
String[] sentences = sentenceDetector.sentDetect(p);
Parse[] results = new Parse[sentences.length];
for (int i=0;i<sentences.length;i++){
//String[] tks = SimpleTokenizer.INSTANCE.tokenize(sentences[i]);
//StringTokenizer st = new StringTokenizer(tks[i]);
//There are several tokenizers available. SimpleTokenizer works best
Tokenizer tokenizer = SimpleTokenizer.INSTANCE;
for (int si = 0; si < sentences.length; si++) {
Span[] tokenSpans = tokenizer.tokenizePos(sentences[si]);
String[] tokens = Span.spansToStrings(tokenSpans, sentences[si]);
Span[] names = nameFinder.find(tokens);
for (int ni = 0; ni < names.length; ni++) {
Span startSpan = tokenSpans[names[ni].getStart()];
int nameStart = startSpan.getStart();
Span endSpan = tokenSpans[names[ni].getEnd() - 1];
int nameEnd = endSpan.getEnd();
String name = sentences[si].substring(nameStart, nameEnd);
System.out.println(name);
}
}
String sent= StringUtils.join(tokenizer," ");
System.out.println("Found sentence " + sent);
Parse[] sentResults = ParserTool.parseLine(sent,parser, 1);
results[i]=sentResults[0];
}
return results;
}
示例13: parse
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Parse[] parse(String text) {
Parser parser = new Parser(parserModel);
return ParserTool.parseLine(text, parser, 5);
}
示例14: parse
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
public Parse parse(String sent) {
return ParserTool.parseLine(sent, parser, 1)[0];
}
示例15: scorePassage
import opennlp.tools.cmdline.parser.ParserTool; //导入依赖的package包/类
@Override
public double scorePassage(Phrase q, Answer a, Passage p) {
int countOfQuestionNPsInPassage = 0;
try {
//prep NLP tools
if (!this.modelsAreInitialized) init();
Parser parser = ParserFactory.create(this.parserModel, 20, 0.95);
//create question parse
Parse[] questionParse = ParserTool.parseLine(q.text, parser, 1);
//create passage parses (one for each sentence)
String[] passageSentences = this.DivideIntoSentences(p);
Parse[] passageParses = new Parse[passageSentences.length];
Parse[] tempParse;
for (int i=0; i < passageSentences.length; i++) {
tempParse = ParserTool.parseLine(passageSentences[i], parser, 1);
passageParses[i] = tempParse[0];
}
//retrieve NPs from the question parse
navigateTree(questionParse, 0, questionNPs);
//retrieve NPs from the passage parse
for (int i=0; i < passageParses.length; i++) {
navigateTree(passageParses, i, passageNPs);
}
//count the number of question NPs that are in the passage NP set (A)
for (String qNP: questionNPs) {
for (String pNP: passageNPs) {
//System.out.println("comparing " + qNP + " with " + pNP);
if (qNP.equals(pNP)) {
//System.out.println("match found");
countOfQuestionNPsInPassage++;
}
}
}
//System.out.println(countOfQuestionNPsInPassage);
//count the number of all NPs that are in the passage NP set (B)
//passageNPs.size();
} catch (InvalidFormatException e) {
e.printStackTrace();
}
//calculate A/B and return as the score
//System.out.print("******** score: " + (double)countOfQuestionNPsInPassage/passageNPs.size() + " *******");
//System.out.println(" count: " + passageNPs.size() + " *******");
if (passageNPs.size() == 0)
return 0;
else
return (double)countOfQuestionNPsInPassage/passageNPs.size();
}