本文整理汇总了Java中edu.stanford.nlp.parser.lexparser.LexicalizedParser类的典型用法代码示例。如果您正苦于以下问题:Java LexicalizedParser类的具体用法?Java LexicalizedParser怎么用?Java LexicalizedParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LexicalizedParser类属于edu.stanford.nlp.parser.lexparser包,在下文中一共展示了LexicalizedParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SentenceExtractThread
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public SentenceExtractThread(String resultDir,
String filename_cluster_read, String extractedSentencesSaveDir,
String textDir, LexicalizedParser lp, String dictPath) {
super();
this.clusterResultDir = resultDir;
this.filename_cluster_read = filename_cluster_read;
this.extractedSentencesSaveDir = extractedSentencesSaveDir;
this.textDir = textDir;
this.lp = lp;
try {
this.dict = WordNetUtil.openDictionary(dictPath);
} catch (final IOException e) {
this.log.error("打开WordNet失败!", e);
//e.printStackTrace();
}
}
示例2: demoDP
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
/**
* demoDP demonstrates turning a file into tokens and then parse trees. Note
* that the trees are printed by calling pennPrint on the Tree object. It is
* also possible to pass a PrintWriter to pennPrint if you want to capture
* the output.
*
* file => tokens => parse trees
*/
public static void demoDP(LexicalizedParser lp, String filename) {
// This option shows loading, sentence-segmenting and tokenizing
// a file using DocumentPreprocessor.
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
// You could also create a tokenizer here (as below) and pass it
// to DocumentPreprocessor
for (List<HasWord> sentence : new DocumentPreprocessor(filename)) {
Tree parse = lp.apply(sentence);
parse.pennPrint();
System.out.println();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
Collection tdl = gs.typedDependenciesCCprocessed();
System.out.println(tdl);
System.out.println();
}
}
示例3: run
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
@Override
public void run() {
try {
parser = new edu.stanford.nlp.parser.lexparser.LexicalizedParser(filename);
} catch (Exception ex) {
JOptionPane.showMessageDialog(ParserPanel.this, "Error loading parser: " + filename, null, JOptionPane.ERROR_MESSAGE);
setStatus("Error loading parser");
parser = null;
} catch (OutOfMemoryError e) {
JOptionPane.showMessageDialog(ParserPanel.this, "Could not load parser. Out of memory.", null, JOptionPane.ERROR_MESSAGE);
setStatus("Error loading parser");
parser = null;
}
stopProgressMonitor();
if (parser != null) {
setStatus("Loaded parser.");
parserFileLabel.setText("Parser: " + filename);
parseButton.setEnabled(true);
parseNextButton.setEnabled(true);
}
}
示例4: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static void main(String[] args) {
LexicalizedParser lp = new LexicalizedParser("englishPCFG.ser.gz");
lp.setOptionFlags(new String[]{"-maxLength", "80", "-retainTmpSubcategories"});
String[] sent = { "This", "is", "an", "easy", "sentence", "." };
Tree parse = (Tree) lp.apply(Arrays.asList(sent));
parse.pennPrint();
System.out.println();
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
Collection tdl = gs.typedDependenciesCollapsed();
System.out.println(tdl);
System.out.println();
TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
tp.printTree(parse);
}
示例5: T2PStanfordWrapper
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
/**
*
*/
public T2PStanfordWrapper() {
try {
ObjectInputStream in;
InputStream is;
URL u = T2PStanfordWrapper.class.getResource("/englishFactored.ser.gz");
if(u == null){
//opening from IDE
is = new FileInputStream(new File("resources/englishFactored.ser.gz"));
}else{
//opening from jar
URLConnection uc = u.openConnection();
is = uc.getInputStream();
}
in = new ObjectInputStream(new GZIPInputStream(new BufferedInputStream(is)));
f_parser = new LexicalizedParser(in);
f_tlp = new PennTreebankLanguagePack(); //new ChineseTreebankLanguagePack();
f_gsf = f_tlp.grammaticalStructureFactory();
}catch(Exception ex) {
ex.printStackTrace();
}
//option flags as in the Parser example, but without maxlength
f_parser.setOptionFlags(new String[]{"-retainTmpSubcategories"});
//f_parser.setOptionFlags(new String[]{"-segmentMarkov"});
Test.MAX_ITEMS = 4000000; //enables parsing of long sentences
}
示例6: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static void main(String[] args) {
LexicalizedParser lp = new LexicalizedParser("parsers/englishFactored.ser.gz");
lp.setOptionFlags(new String[]{"-maxLength", "80", "-retainTmpSubcategories"});
Tree parse = (Tree) lp.apply("Try this sentence, which is slightly longer.");
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
Collection<TypedDependency> tdl = gs.typedDependenciesCollapsed();
TypedDependency td = tdl.iterator().next();
TreeGraphNode node = td.dep();
node = (TreeGraphNode) node.parent();
node.deepCopy();
}
示例7: signature
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static String signature(String annotatorName, Properties props) {
StringBuilder os = new StringBuilder();
os.append(annotatorName + ".model:" +
props.getProperty(annotatorName + ".model",
LexicalizedParser.DEFAULT_PARSER_LOC));
os.append(annotatorName + ".debug:" +
props.getProperty(annotatorName + ".debug", "false"));
os.append(annotatorName + ".flags:" +
props.getProperty(annotatorName + ".flags", ""));
os.append(annotatorName + ".maxlen:" +
props.getProperty(annotatorName + ".maxlen", "-1"));
os.append(annotatorName + ".treemap:" +
props.getProperty(annotatorName + ".treemap", ""));
os.append(annotatorName + ".maxtime:" +
props.getProperty(annotatorName + ".maxtime", "0"));
os.append(annotatorName + ".buildgraphs:" +
props.getProperty(annotatorName + ".buildgraphs", "true"));
os.append(annotatorName + ".nthreads:" +
props.getProperty(annotatorName + ".nthreads", props.getProperty("nthreads", "")));
return os.toString();
}
示例8: writeImage
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static void writeImage(String sentence, String outFile, int scale) throws Exception {
LexicalizedParser lp = null;
try {
lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
} catch (Exception e) {
System.err.println("Could not load file englishPCFG.ser.gz. Try placing this file in the same directory as Dependencee.jar");
return;
}
lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"});
TokenizerFactory<CoreLabel> tokenizerFactory =
PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(sentence)).tokenize();
Tree tree = lp.apply(wordList);
writeImage(tree, outFile, scale);
}
示例9: testWriteImage
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
/**
* Test of writeImage method, of class Main.
*/
@Test
public void testWriteImage() throws Exception {
String text = "A quick brown fox jumped over the lazy dog.";
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
LexicalizedParser lp = LexicalizedParser.loadModel();
lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"});
TokenizerFactory<CoreLabel> tokenizerFactory =
PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize();
Tree tree = lp.apply(wordList);
GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
Collection<TypedDependency> tdl = gs.typedDependenciesCollapsed();
Main.writeImage(tdl, "image.png", 3);
assert (new File("image.png").exists());
}
示例10: demoDP
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static void demoDP(LexicalizedParser lp, String filename) {
// This option shows loading and sentence-segment and tokenizing
// a file using DocumentPreprocessor
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
// You could also create a tokenizer here (as below) and pass it
// to DocumentPreprocessor
for (List<HasWord> sentence : new DocumentPreprocessor(filename)) {
Tree parse = lp.apply(sentence);
parse.pennPrint();
System.out.println();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
Collection tdl = gs.typedDependenciesCCprocessed(true);
System.out.println(tdl);
System.out.println();
}
}
示例11: instantiateStanfordParser
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
private void instantiateStanfordParser()
throws ResourceInstantiationException {
if(stanfordParser != null) return;
try {
// String filepath = Files.fileFromURL(parserFile).getAbsolutePath();
stanfordParser =
LexicalizedParser.getParserFromSerializedFile(parserFile
.toExternalForm());
} catch(Exception e) {
throw new ResourceInstantiationException(e);
}
}
示例12: initLexResources
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
private void initLexResources() {
try {
options = new Options();
options.testOptions.verbose = true;
// Parser
parser = LexicalizedParser.loadModel(_serializedGrammar);
//parser = new LexicalizedParser(_serializedGrammar, options);
} catch( Exception ex ) { ex.printStackTrace(); }
// Dependency tree info
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
gsf = tlp.grammaticalStructureFactory();
}
示例13: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
LexicalizedParser parser = LexicalizedParser.loadModel();
File[] files=new File(fileDir).listFiles();
int num=0;
double score=0.0;
for(File f:files)
{
if(f.isDirectory())
continue;
BufferedReader br=new BufferedReader(new FileReader(f.getAbsolutePath()));
String line="";
while((line=br.readLine())!=null)
{
StringTokenizer st=new StringTokenizer(line, "\\.");
while(st.hasMoreTokens())
{
score=score+parser.parse(st.nextToken()).score();
num++;
}
}
System.out.println(score+" for "+f.getName());
br.close();
}
System.out.println(score+"/"+num);
System.out.println(parser.parse(s).score());
}
示例14: setParse
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
private void setParse(){
if (this.segtext == null || this.segtext.length() == 0) {
StringBuffer sb = new StringBuffer();
for(String w : seggedText)
{
sb.append(w + " ");
}
segtext = sb.toString();
}
LexicalizedParser lp=DicModel.loadParser();
Tree t = lp.parse(segtext);
ChineseGrammaticalStructure gs = new ChineseGrammaticalStructure(t);
parseResult = gs.typedDependenciesCollapsed();
}
示例15: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入依赖的package包/类
/**
* The main method demonstrates the easiest way to load a parser. Simply
* call loadModel and specify the path of a serialized grammar model, which
* can be a file, a resource on the classpath, or even a URL. For example,
* this demonstrates loading from the models jar file, which you therefore
* need to include in the classpath for ParserDemo to work.
*/
public static void main(String[] args) {
LexicalizedParser lp = LexicalizedParser
.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
if (args.length > 0) {
demoDP(lp, args[0]);
} else {
demoAPI(lp);
}
}