本文整理汇总了Java中edu.stanford.nlp.trees.TreePrint.printTree方法的典型用法代码示例。如果您正苦于以下问题:Java TreePrint.printTree方法的具体用法?Java TreePrint.printTree怎么用?Java TreePrint.printTree使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.trees.TreePrint
的用法示例。
在下文中一共展示了TreePrint.printTree方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: demoAPI
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* demoAPI demonstrates other ways of calling the parser with already
* tokenized text, or in some cases, raw text that needs to be tokenized as
* a single sentence. Output is handled with a TreePrint object. Note that
* the options used when creating the TreePrint can determine what results
* to print out. Once again, one can capture the output by passing a
* PrintWriter to TreePrint.printTree.
*
* difference: already tokenized text
*
*
*/
public static void demoAPI(LexicalizedParser lp) {
// This option shows parsing a list of correctly tokenized words
String[] sent = { "This", "is", "an", "easy", "sentence", "." };
List<CoreLabel> rawWords = Sentence.toCoreLabelList(sent);
Tree parse = lp.apply(rawWords);
parse.pennPrint();
System.out.println();
// This option shows loading and using an explicit tokenizer
String sent2 = "Hey @Apple, pretty much all your products are amazing. You blow minds every time you launch a new gizmo."
+ " that said, your hold music is crap";
TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(
new CoreLabelTokenFactory(), "");
Tokenizer<CoreLabel> tok = tokenizerFactory
.getTokenizer(new StringReader(sent2));
List<CoreLabel> rawWords2 = tok.tokenize();
parse = lp.apply(rawWords2);
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
System.out.println(tdl);
System.out.println();
// You can also use a TreePrint object to print trees and dependencies
TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
tp.printTree(parse);
}
示例2: getPenn
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
public String getPenn()
{
if(bufferTree==null) return "";
TreePrint tp = new TreePrint("penn");
Writer parse_string = new StringWriter();
PrintWriter printWriter = new PrintWriter(parse_string);
tp.printTree(bufferTree, printWriter); // print tree
return parse_string.toString();
}
示例3: getDependencies
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
public String getDependencies()
{
TreePrint tp = new TreePrint("typedDependenciesCollapsed");
Writer parse_string = new StringWriter();
PrintWriter printWriter = new PrintWriter(parse_string);
tp.printTree(bufferTree, printWriter); // print tree
return parse_string.toString();
}
示例4: parse
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* Parse a (speech) lattice with the PCFG parser.
*
* @param lr a lattice to parse
* @return Whether the lattice could be parsed by the grammar
*/
boolean parse(HTKLatticeReader lr) {
TreePrint treePrint = getTreePrint();
PrintWriter pwOut = op.tlpParams.pw();
parseSucceeded = false;
parseNoMemory = false;
parseUnparsable = false;
parseSkipped = false;
parseFallback = false;
whatFailed = null;
originalSentence = null;
if (lr.getNumStates() > op.testOptions.maxLength + 1) { // + 1 for boundary symbol
parseSkipped = true;
throw new UnsupportedOperationException("Lattice too big: " + lr.getNumStates());
}
if (op.doPCFG) {
if (!pparser.parse(lr)) {
return parseSucceeded;
}
if (op.testOptions.verbose) {
pwOut.println("PParser output");
treePrint.printTree(getBestPCFGParse(false), pwOut);
}
}
parseSucceeded = true;
return true;
}
示例5: addConstituentTreeInfo
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* Generates the XML content for a constituent tree
*/
private static void addConstituentTreeInfo(Element treeInfo, Tree tree, TreePrint constituentTreePrinter) {
StringWriter treeStrWriter = new StringWriter();
constituentTreePrinter.printTree(tree, new PrintWriter(treeStrWriter, true));
String temp = treeStrWriter.toString();
//System.err.println(temp);
treeInfo.appendChild(temp);
}
示例6: TreeObjectToString
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
public static String TreeObjectToString(Tree tree, TreePrint tp)
{
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
tp.printTree(tree, pw);
return sw.getBuffer().toString().trim();
}
示例7: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( Vector<String> paragraphs, GigaDoc doc,
GigaDoc depdoc) {
int sid = 0;
// Paragraphs may be multiple sentences
for( String fragment : paragraphs ) {
// Replace underscores (gigaword has underscores in many places commas should be)
if( fragment.contains(" _ ") ) fragment = fragment.replaceAll(" _ ", " , ");
// Split sentences
List<List<HasWord>> list = Ling.getSentencesFromText(fragment);
// System.out.println("From fragment: " + fragment);
// for( List sent : list )
// System.out.println(" -> " + sent);
// Loop over each sentence
for( List<HasWord> sentence : list ) {
Tree ansTree;
// System.out.println(sentence.size() + ": " + sentence);
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}
}
示例8: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( Vector<String> paragraphs, GigaDoc doc, GigaDoc depdoc) {
int sid = 0;
// Paragraphs may be multiple sentences
for( String fragment : paragraphs ) {
// System.out.println("* " + fragment);
// Replace underscores (gigaword has underscores in many places commas should be)
if( fragment.contains(" _ ") ) fragment = fragment.replaceAll(" _ ", " , ");
// Loop over each sentence
for( List<HasWord> sentence : Ling.getSentencesFromText(fragment) ) {
Tree ansTree;
// System.out.println("Calling parse on: " + sentence);
// System.out.println(sentence.size() + ": **" + sentence + "**");
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}
}
示例9: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入方法依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( List<List<HasWord>> sentences, GigaDoc doc,
GigaDoc depdoc) {
int sid = 0;
// Loop over each sentence
for( List<HasWord> sentence : sentences ) {
Tree ansTree;
if( sid % 5 == 0 ) { System.out.print("."); }
if( sid % 50 == 0 ) { System.out.println("\n" + sentence); }
// System.out.println(sentence.size() + ": " + sentence);
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Save to InfoFile
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
// System.out.println(sentence);
// System.out.println(treeStrWriter);
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}