本文整理汇总了Java中edu.stanford.nlp.trees.PennTreeReader类的典型用法代码示例。如果您正苦于以下问题:Java PennTreeReader类的具体用法?Java PennTreeReader怎么用?Java PennTreeReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
PennTreeReader类属于edu.stanford.nlp.trees包,在下文中一共展示了PennTreeReader类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
TreeJPanel tjp = new TreeJPanel();
// String ptbTreeString1 = "(ROOT (S (NP (DT This)) (VP (VBZ is) (NP (DT a) (NN test))) (. .)))";
String ptbTreeString = "(ROOT (S (NP (NNP Interactive_Tregex)) (VP (VBZ works)) (PP (IN for) (PRP me)) (. !))))";
if (args.length > 0) {
ptbTreeString = args[0];
}
Tree tree = (new PennTreeReader(new StringReader(ptbTreeString), new LabeledScoredTreeFactory(new StringLabelFactory()))).readTree();
tjp.setTree(tree);
tjp.setBackground(Color.white);
JFrame frame = new JFrame();
frame.getContentPane().add(tjp, BorderLayout.CENTER);
frame.addWindowListener(new WindowAdapter() {
@Override
public void windowClosing(WindowEvent e) {
System.exit(0);
}
});
frame.pack();
frame.setVisible(true);
frame.setVisible(true);
}
示例2: stringToTree
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
/**
* Build the Tree object from its string form.
*/
public static Tree stringToTree(String str, TreeFactory tf) {
try {
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(str)), tf);
Tree parseTree = ptr.readTree();
return parseTree;
} catch( Exception ex ) { ex.printStackTrace(); }
return null;
}
示例3: analyzeParsesNoCoref
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
/**
* @desc Process parses for verbs
* @param parses The list of parses in String form
*/
private void analyzeParsesNoCoref( Vector<String> parses ) {
TreeFactory tf = new LabeledScoredTreeFactory();
Vector<String> verbs = new Vector();
// Save the verbs in each parse
int sentence = 0;
for( String parse : parses ) {
try {
// Read the parse
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
Tree ansTree = ptr.readTree();
// Look for the verbs in the tree
if( ansTree != null ) {
Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(ansTree);
for( Tree verb : parseVerbs )
// verbs.add(verb.firstChild().firstChild().value());
verbs.add(verb.firstChild().value());
}
} catch( Exception ex ) { ex.printStackTrace(); }
sentence++;
}
// do the pair counts
countPairs(verbs);
}
示例4: analyzeParses
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
private void analyzeParses( GigaDoc doc, Collection<String> parses, Collection<EntityMention> entities ) {
if( entities != null ) {
TreeFactory tf = new LabeledScoredTreeFactory();
// Read in all the parse trees
Tree trees[] = new Tree[parses.size()];
int i = 0;
for( String parse : parses ) {
try {
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
trees[i] = ptr.readTree();
} catch( Exception ex ) { ex.printStackTrace(); }
i++;
}
// Convert all entity spans from character spans to word-based
for( EntityMention mention : entities ) {
mention.convertCharSpanToIndex(TreeOperator.toRaw(trees[mention.sentenceID()-1]));
doc.addEntity(mention);
// mentions[mention.sentenceID()-1].add(mention);
}
// Save the verbs in each parse
int sid = 0, eid = 0;
for( Tree tree : trees ) {
if( tree != null ) {
// Look for the verbs in the tree
Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(tree);
for( Tree verb : parseVerbs ) {
// System.out.println(" verb: " + verb + " index: " + (TreeOperator.wordIndex(tree,verb)+1));
doc.addEvent(new WordEvent(eid, verb.firstChild().value(), TreeOperator.wordIndex(tree,verb)+1, sid+1));
eid++;
}
sid++;
}
}
}
}
示例5: readTreeFromString
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
/**
* Read tree from a string
*
* @param parseStr
* input tree in form a string
* @return tree
*/
public Tree readTreeFromString(String parseStr) {
// read in the input into a Tree data structure
TreeReader treeReader = new PennTreeReader(new StringReader(parseStr),
tree_factory);
Tree inputTree = null;
try {
inputTree = treeReader.readTree();
treeReader.close();
} catch (IOException e) {
e.printStackTrace();
}
return inputTree;
}
示例6: getStanfordContituencyTree
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public Tree getStanfordContituencyTree() {
TreeFactory tf = new LabeledScoredTreeFactory();
StringReader r = new StringReader(getParseText());
TreeReader tr = new PennTreeReader(r, tf);
try {
return tr.readTree();
} catch (IOException e) {
throw new RuntimeException("Error: IOException should not be thrown by StringReader");
}
}
示例7: load
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public void load(String penn) throws IOException
{
Reader in = new StringReader(penn);
PennTreeReader tr = new PennTreeReader(in, new LabeledScoredTreeFactory(),
new NPTmpRetainingTreeNormalizer());
bufferTree = tr.readTree();
}
示例8: newTreeReader
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public TreeReader newTreeReader(Reader in) {
final NegraPennTreeNormalizer tn = new NegraPennTreeNormalizer(tlp, nodeCleanup);
if (treeNormalizerInsertNPinPP) {
tn.setInsertNPinPP(true);
}
return new PennTreeReader(in, new LabeledScoredTreeFactory(new StringLabelFactory()), tn, new NegraPennTokenizer(in));
}
示例9: newTreeReader
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public TreeReader newTreeReader(Reader in) {
final NegraPennTreeNormalizer tn = new NegraPennTreeNormalizer(tlp, nodeCleanup);
if (treeNormalizerInsertNPinPP)
tn.setInsertNPinPP(true);
return new PennTreeReader(in, new LabeledScoredTreeFactory(), tn, new NegraPennTokenizer(in));
}
示例10: newTreeReader
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public TreeReader newTreeReader(Reader in) {
return new PennTreeReader(in, new LabeledScoredTreeFactory());
}
示例11: calculateIDFofVerbs
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
/**
* Counts all *verbs* in the parse trees and tracks IDF counts.
*/
private void calculateIDFofVerbs( Vector<String> parses ) {
TreeFactory tf = new LabeledScoredTreeFactory();
HashMap<String,Boolean> seen = new HashMap(100);
HashMap<String,Boolean> seenLemmas = new HashMap(100);
// Loop over each parse tree
for( String parse : parses ) {
try {
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
Tree ansTree = ptr.readTree();
if( ansTree != null ) {
// Look for the verbs in the tree
Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(ansTree);
for( Tree verb : parseVerbs ) {
String wordString = verb.firstChild().value().toLowerCase();
// Look for a particle
String particle = findParticleInTree(verb.parent(ansTree));
if( particle != null ) wordString += "_" + particle.toLowerCase();
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !seen.containsKey(wordString) ) {
seen.put(wordString, true);
// add the count of documents
idf.increaseDocCount(wordString);
}
// Count the documents the lemma appears in, for IDF score
String lemmaString = verbToLemma(wordString);
if( lemmaString == null ) lemmaString = wordString;
if( !seenLemmas.containsKey(lemmaString) ) {
seenLemmas.put(lemmaString, true);
// add the count of documents
idfLemmas.increaseDocCount(lemmaString);
}
// Increment word frequency
idf.increaseTermFrequency(wordString);
idfLemmas.increaseTermFrequency(lemmaString);
}
}
}
} catch( Exception ex ) { ex.printStackTrace(); }
}
}
示例12: load
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
/** This method does its own buffering of the passed in InputStream. */
public Annotation load(InputStream is) throws IOException, ClassNotFoundException, ClassCastException {
is = new BufferedInputStream(is);
if(compress) is = new GZIPInputStream(is);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
Annotation doc = new Annotation("");
String line;
// read the coref graph (new format)
Map<Integer, CorefChain> chains = loadCorefChains(reader);
if(chains != null) doc.set(CorefCoreAnnotations.CorefChainAnnotation.class, chains);
// read the coref graph (old format)
line = reader.readLine().trim();
if(line.length() > 0){
String [] bits = line.split(" ");
if(bits.length % 4 != 0){
throw new RuntimeIOException("ERROR: Incorrect format for the serialized coref graph: " + line);
}
List<Pair<IntTuple, IntTuple>> corefGraph = new ArrayList<Pair<IntTuple,IntTuple>>();
for(int i = 0; i < bits.length; i += 4){
IntTuple src = new IntTuple(2);
IntTuple dst = new IntTuple(2);
src.set(0, Integer.parseInt(bits[i]));
src.set(1, Integer.parseInt(bits[i + 1]));
dst.set(0, Integer.parseInt(bits[i + 2]));
dst.set(1, Integer.parseInt(bits[i + 3]));
corefGraph.add(new Pair<IntTuple, IntTuple>(src, dst));
}
doc.set(CorefCoreAnnotations.CorefGraphAnnotation.class, corefGraph);
}
// read individual sentences
List<CoreMap> sentences = new ArrayList<CoreMap>();
while((line = reader.readLine()) != null){
CoreMap sentence = new Annotation("");
// first line is the parse tree. construct it with CoreLabels in Tree nodes
Tree tree = new PennTreeReader(new StringReader(line), new LabeledScoredTreeFactory(CoreLabel.factory())).readTree();
sentence.set(TreeCoreAnnotations.TreeAnnotation.class, tree);
// read the dependency graphs
IntermediateSemanticGraph intermCollapsedDeps = loadDependencyGraph(reader);
IntermediateSemanticGraph intermUncollapsedDeps = loadDependencyGraph(reader);
IntermediateSemanticGraph intermCcDeps = loadDependencyGraph(reader);
// the remaining lines until empty line are tokens
List<CoreLabel> tokens = new ArrayList<CoreLabel>();
while((line = reader.readLine()) != null){
if(line.length() == 0) break;
CoreLabel token = loadToken(line, haveExplicitAntecedent);
tokens.add(token);
}
sentence.set(CoreAnnotations.TokensAnnotation.class, tokens);
// convert the intermediate graph to an actual SemanticGraph
SemanticGraph collapsedDeps = convertIntermediateGraph(intermCollapsedDeps, tokens);
sentence.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, collapsedDeps);
SemanticGraph uncollapsedDeps = convertIntermediateGraph(intermUncollapsedDeps, tokens);
sentence.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, uncollapsedDeps);
SemanticGraph ccDeps = convertIntermediateGraph(intermCcDeps, tokens);
sentence.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, ccDeps);
sentences.add(sentence);
}
doc.set(CoreAnnotations.SentencesAnnotation.class, sentences);
reader.close();
return doc;
}
示例13: newTreeReader
import edu.stanford.nlp.trees.PennTreeReader; //导入依赖的package包/类
public TreeReader newTreeReader(Reader in) {
return new PennTreeReader(in, new LabeledScoredTreeFactory(), new HebrewTreeNormalizer(),new PennTreebankTokenizer(in));
}