本文整理汇总了Java中edu.stanford.nlp.trees.TreePrint类的典型用法代码示例。如果您正苦于以下问题:Java TreePrint类的具体用法?Java TreePrint怎么用?Java TreePrint使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TreePrint类属于edu.stanford.nlp.trees包,在下文中一共展示了TreePrint类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: demoAPI
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* demoAPI demonstrates other ways of calling the parser with already
* tokenized text, or in some cases, raw text that needs to be tokenized as
* a single sentence. Output is handled with a TreePrint object. Note that
* the options used when creating the TreePrint can determine what results
* to print out. Once again, one can capture the output by passing a
* PrintWriter to TreePrint.printTree.
*
* difference: already tokenized text
*
*
*/
public static void demoAPI(LexicalizedParser lp) {
// This option shows parsing a list of correctly tokenized words
String[] sent = { "This", "is", "an", "easy", "sentence", "." };
List<CoreLabel> rawWords = Sentence.toCoreLabelList(sent);
Tree parse = lp.apply(rawWords);
parse.pennPrint();
System.out.println();
// This option shows loading and using an explicit tokenizer
String sent2 = "Hey @Apple, pretty much all your products are amazing. You blow minds every time you launch a new gizmo."
+ " that said, your hold music is crap";
TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(
new CoreLabelTokenFactory(), "");
Tokenizer<CoreLabel> tok = tokenizerFactory
.getTokenizer(new StringReader(sent2));
List<CoreLabel> rawWords2 = tok.tokenize();
parse = lp.apply(rawWords2);
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
System.out.println(tdl);
System.out.println();
// You can also use a TreePrint object to print trees and dependencies
TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
tp.printTree(parse);
}
示例2: getPenn
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
public String getPenn()
{
if(bufferTree==null) return "";
TreePrint tp = new TreePrint("penn");
Writer parse_string = new StringWriter();
PrintWriter printWriter = new PrintWriter(parse_string);
tp.printTree(bufferTree, printWriter); // print tree
return parse_string.toString();
}
示例3: getDependencies
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
public String getDependencies()
{
TreePrint tp = new TreePrint("typedDependenciesCollapsed");
Writer parse_string = new StringWriter();
PrintWriter printWriter = new PrintWriter(parse_string);
tp.printTree(bufferTree, printWriter); // print tree
return parse_string.toString();
}
示例4: parse
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* Parse a (speech) lattice with the PCFG parser.
*
* @param lr a lattice to parse
* @return Whether the lattice could be parsed by the grammar
*/
boolean parse(HTKLatticeReader lr) {
TreePrint treePrint = getTreePrint();
PrintWriter pwOut = op.tlpParams.pw();
parseSucceeded = false;
parseNoMemory = false;
parseUnparsable = false;
parseSkipped = false;
parseFallback = false;
whatFailed = null;
originalSentence = null;
if (lr.getNumStates() > op.testOptions.maxLength + 1) { // + 1 for boundary symbol
parseSkipped = true;
throw new UnsupportedOperationException("Lattice too big: " + lr.getNumStates());
}
if (op.doPCFG) {
if (!pparser.parse(lr)) {
return parseSucceeded;
}
if (op.testOptions.verbose) {
pwOut.println("PParser output");
treePrint.printTree(getBestPCFGParse(false), pwOut);
}
}
parseSucceeded = true;
return true;
}
示例5: addConstituentTreeInfo
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* Generates the XML content for a constituent tree
*/
private static void addConstituentTreeInfo(Element treeInfo, Tree tree, TreePrint constituentTreePrinter) {
StringWriter treeStrWriter = new StringWriter();
constituentTreePrinter.printTree(tree, new PrintWriter(treeStrWriter, true));
String temp = treeStrWriter.toString();
//System.err.println(temp);
treeInfo.appendChild(temp);
}
示例6: parseTaggedWords
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
public ParseTree parseTaggedWords(List<TaggedWord> taggedWords, List<String> outputFormat) throws Exception
{
TreePrint treePrinter = ParserUtil.setOptions(outputFormat, tlp);
Tree parseTree = model.apply(taggedWords);
// TODO: Do these parse trees have scores, like the lexicalized ones do?
return new ParseTree(ParserUtil.TreeObjectToString(parseTree, treePrinter), parseTree.score());
}
示例7: TreeObjectToString
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
public static String TreeObjectToString(Tree tree, TreePrint tp)
{
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
tp.printTree(tree, pw);
return sw.getBuffer().toString().trim();
}
示例8: setOptions
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
public static TreePrint setOptions(List<String> outputOptions, TreebankLanguagePack tlp) throws Exception
{
String outputFormatStr = "oneline"; // default
String outputFormatOptionsStr = "";
// for output formatting
if (outputOptions != null && outputOptions.size() > 0)
{
int ofIndex = outputOptions.indexOf("-outputFormat");
int ofoIndex = outputOptions.indexOf("-outputFormatOptions");
if (ofIndex >= 0)
{
outputFormatStr = outputOptions.get(ofIndex+1);
}
if (ofoIndex >= 0)
{
outputFormatOptionsStr = outputOptions.get(ofoIndex+1);
}
if (ofIndex < 0 && ofoIndex < 0)
{
throw new Exception("Invalid option(s): " + outputOptions.toString());
}
}
return new TreePrint(outputFormatStr, outputFormatOptionsStr, tlp);
// for everything else; disabled for now
// if (!options.isEmpty())
// {
// String[] remainingOptions = new String[options.size()];
// options.toArray(remainingOptions);
// org.ets.research.nlp.stanford_thrift.parser.setOptionFlags(remainingOptions);
// customParserOptionsSet = true;
// }
}
示例9: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( Vector<String> paragraphs, GigaDoc doc,
GigaDoc depdoc) {
int sid = 0;
// Paragraphs may be multiple sentences
for( String fragment : paragraphs ) {
// Replace underscores (gigaword has underscores in many places commas should be)
if( fragment.contains(" _ ") ) fragment = fragment.replaceAll(" _ ", " , ");
// Split sentences
List<List<HasWord>> list = Ling.getSentencesFromText(fragment);
// System.out.println("From fragment: " + fragment);
// for( List sent : list )
// System.out.println(" -> " + sent);
// Loop over each sentence
for( List<HasWord> sentence : list ) {
Tree ansTree;
// System.out.println(sentence.size() + ": " + sentence);
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}
}
示例10: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( Vector<String> paragraphs, GigaDoc doc, GigaDoc depdoc) {
int sid = 0;
// Paragraphs may be multiple sentences
for( String fragment : paragraphs ) {
// System.out.println("* " + fragment);
// Replace underscores (gigaword has underscores in many places commas should be)
if( fragment.contains(" _ ") ) fragment = fragment.replaceAll(" _ ", " , ");
// Loop over each sentence
for( List<HasWord> sentence : Ling.getSentencesFromText(fragment) ) {
Tree ansTree;
// System.out.println("Calling parse on: " + sentence);
// System.out.println(sentence.size() + ": **" + sentence + "**");
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}
}
示例11: analyzeSentences
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* @desc Parses input sentences and prints the parses to the given doc.
* @param paragraphs Vector of strings of sentences
* @param doc The current document we're printing to.
* @param depdoc The document of dependencies that we're printing to.
*/
private void analyzeSentences( List<List<HasWord>> sentences, GigaDoc doc,
GigaDoc depdoc) {
int sid = 0;
// Loop over each sentence
for( List<HasWord> sentence : sentences ) {
Tree ansTree;
if( sid % 5 == 0 ) { System.out.print("."); }
if( sid % 50 == 0 ) { System.out.println("\n" + sentence); }
// System.out.println(sentence.size() + ": " + sentence);
if( sentence.size() > MAX_SENTENCE_LENGTH )
System.out.println("Sentence far too long: " + sentence.size());
else if( (ansTree = parser.parseTree(sentence)) == null )
System.out.println("Failed to parse: " + sentence);
else {
// Save to InfoFile
// Build a StringWriter, print the tree to it, then save the string
StringWriter treeStrWriter = new StringWriter();
TreePrint tp = new TreePrint("penn");
tp.printTree(ansTree, new PrintWriter(treeStrWriter,true));
// System.out.println(sentence);
// System.out.println(treeStrWriter);
doc.addParse(treeStrWriter.toString());
// Create the dependency tree - CAUTION: DESTRUCTIVE to parse tree
try {
GrammaticalStructure gs = gsf.newGrammaticalStructure(ansTree);
// Collection<TypedDependency> deps = gs.typedDependenciesCollapsed();
Collection<TypedDependency> deps = gs.typedDependenciesCCprocessed(true);
depdoc.addDependencies(deps, sid);
} catch( Exception ex ) {
ex.printStackTrace();
System.out.println("WARNING: dependency tree creation failed...adding null deps");
depdoc.addDependencies(null, sid);
}
sid++;
}
}
}
示例12: getTreePrint
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/** Return a TreePrint for formatting parsed output trees.
* @return A TreePrint for formatting parsed output trees.
*/
public TreePrint getTreePrint() {
return op.testOptions.treePrint(op.tlpParams);
}
示例13: construct
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
private void construct(Properties props, boolean enforceRequirements) {
this.numWords = 0;
this.constituentTreePrinter = new TreePrint("penn");
this.dependencyTreePrinter = new TreePrint("typedDependenciesCollapsed");
this.gsf = new PennTreebankLanguagePack().grammaticalStructureFactory();
if (props == null) {
// if undefined, find the properties file in the classpath
props = loadPropertiesFromClasspath();
} else if (props.getProperty("annotators") == null) {
// this happens when some command line options are specified (e.g just "-filelist") but no properties file is.
// we use the options that are given and let them override the default properties from the class path properties.
Properties fromClassPath = loadPropertiesFromClasspath();
fromClassPath.putAll(props);
props = fromClassPath;
}
this.properties = props;
AnnotatorPool pool = getDefaultAnnotatorPool(props);
// now construct the annotators from the given properties in the given order
List<String> annoNames = Arrays.asList(getRequiredProperty(props, "annotators").split("[, \t]+"));
Set<String> alreadyAddedAnnoNames = Generics.newHashSet();
Set<Requirement> requirementsSatisfied = Generics.newHashSet();
for (String name : annoNames) {
name = name.trim();
if (name.isEmpty()) { continue; }
System.err.println("Adding annotator " + name);
Annotator an = pool.get(name);
this.addAnnotator(an);
if (enforceRequirements) {
Set<Requirement> allRequirements = an.requires();
for (Requirement requirement : allRequirements) {
if (!requirementsSatisfied.contains(requirement)) {
String fmt = "annotator \"%s\" requires annotator \"%s\"";
throw new IllegalArgumentException(String.format(fmt, name, requirement));
}
}
requirementsSatisfied.addAll(an.requirementsSatisfied());
}
// the NFL domain requires several post-processing rules after
// tokenization. add these transparently if the NFL annotator
// is required
if (name.equals(STANFORD_TOKENIZE) &&
annoNames.contains(STANFORD_NFL) &&
!annoNames.contains(STANFORD_NFL_TOKENIZE)) {
Annotator pp = pool.get(STANFORD_NFL_TOKENIZE);
this.addAnnotator(pp);
}
alreadyAddedAnnoNames.add(name);
}
// Sanity check
if (! alreadyAddedAnnoNames.contains(STANFORD_SSPLIT)) {
System.setProperty(NEWLINE_SPLITTER_PROPERTY, "false");
}
}
示例14: toTreeString
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* Transform a parse tree into a format
*
* @param tree
* @param format
* @return
*/
public static String toTreeString(Tree tree, String format) {
StringWriter sw = new StringWriter();
new TreePrint(format).printTree(tree, new PrintWriter(sw));
return sw.toString().trim();
}
示例15: treePrint
import edu.stanford.nlp.trees.TreePrint; //导入依赖的package包/类
/**
* Determines method for print trees on output.
*
* @param tlpParams The treebank parser params
* @return A suitable tree printing object
*/
public static TreePrint treePrint(TreebankLangParserParams tlpParams) {
TreebankLanguagePack tlp = tlpParams.treebankLanguagePack();
return new TreePrint(outputFormat, outputFormatOptions, tlp, tlpParams.headFinder());
}