本文整理匯總了Java中edu.stanford.nlp.trees.Tree.isPreTerminal方法的典型用法代碼示例。如果您正苦於以下問題:Java Tree.isPreTerminal方法的具體用法?Java Tree.isPreTerminal怎麽用?Java Tree.isPreTerminal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.trees.Tree
的用法示例。
在下文中一共展示了Tree.isPreTerminal方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: verbTreesFromTree
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return A list of verb subtrees e.g. (VBG running)
*/
public static Vector<Tree> verbTreesFromTree(Tree tree) {
// System.out.println("verbTree: " + tree);
Vector<Tree> verbs = new Vector<Tree>();
// System.out.println(" tree label: " + tree.label().value().toString());
// if tree is a leaf
if( tree.isPreTerminal() && tree.label().value().startsWith("VB") ) {
// System.out.println(" if!!");
// add the verb subtree
verbs.add(tree);
}
// else scale the tree
else {
// System.out.println(" else!!");
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
Vector<Tree> temp = verbTreesFromTree(child);
verbs.addAll(temp);
}
}
return verbs;
}
示例2: verbsFromTree
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return A list of verbs in the tree
*/
public static Vector<String> verbsFromTree(Tree tree, Tree parent) {
Vector<String> verbs = new Vector<String>();
// if tree is a leaf
if( tree.isPreTerminal() && tree.label().toString().startsWith("VB") ) {
String verb = tree.firstChild().value();
// get arguments
// System.out.println("parent of " + verb + " is " + parent);
// verb += argsOfPhrase(tree, parent);
// System.out.println("now: " + verb);
// add the verb
verbs.add(verb);
}
// else scale the tree
else {
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
Vector<String> temp = verbsFromTree(child, tree);
verbs.addAll(temp);
}
}
return verbs;
}
示例3: inorderTraverse
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return The CHARACTER OFFSET where the stop tree begins
*/
public static int inorderTraverse(Tree full, Tree stop) {
if( full == null || full == stop ) return 0;
int sum = 0;
if( full.isPreTerminal() ) {
String value = full.firstChild().value();
// System.out.println(value + " is " + value.length());
return value.length() + 1; // +1 for space character
// return full.firstChild().value().length() + 1;
}
else {
for( Tree child : full.getChildrenAsList() ) {
if( child == stop ) {
// System.out.println("Stopping at " + child);
return sum;
}
sum += inorderTraverse(child, stop);
if( child.contains(stop) ) return sum;
}
}
return sum;
}
示例4: toRaw
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return The raw text of a parse tree's nodes
*/
public static String toRaw(Tree full) {
if( full == null ) return "";
if( full.isPreTerminal() ) return full.firstChild().value();
else {
String str = "";
for( Tree child : full.getChildrenAsList() ) {
if( str.length() == 0 ) str = toRaw(child);
else str += " " + toRaw(child);
}
return str;
}
}
示例5: calculateTreeIDF
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Recurses over a tree and adds an IDF counts leaves
* Leaves must be Nouns, Verbs or Adjectives to be counted
*/
public void calculateTreeIDF(Tree tree, Tree fullTree) {
// if tree is a leaf
if( tree.isPreTerminal() ) {
String wordString = tree.firstChild().value().toLowerCase();
// If we are counting every single token
if( _ignorepostags ) {
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(wordString) ) {
_seen.put(wordString, true);
// add the count of documents
_idf.increaseDocCount(wordString);
}
_idf.increaseTermFrequency(wordString);
}
}
// Else we are lemmatizing and only counting Verbs, Nouns, and Adjectives.
else {
char normPostag = normalizePOS(tree.label().value().toString());
// if a verb, look for a particle
if( normPostag == 'v' ) {
// System.out.println(wordString + " tree " + tree.parent(fullTree));
String particle = TreeOperator.findParticleInTree(tree.parent(fullTree));
// System.out.println(wordString + " particle " + particle);
if( particle != null ) wordString += "_" + particle.toLowerCase();
}
// Verbs, Nouns, Adjectives
if( normPostag == 'v' || normPostag == 'n' || normPostag == 'j' || normPostag == 'r' ) {
String key = createKey(wordString, normPostag);
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(key) ) {
_seen.put(key, true);
// add the count of documents
_idf.increaseDocCount(key);
}
_idf.increaseTermFrequency(key);
// LEMMA counts
String lemmaString = null;
if( normPostag == 'v' ) lemmaString = _wordnet.verbToLemma(wordString);
else if( normPostag == 'n' ) lemmaString = _wordnet.nounToLemma(wordString);
else if( normPostag == 'j' ) lemmaString = _wordnet.adjectiveToLemma(wordString);
else if( normPostag == 'r' ) lemmaString = _wordnet.nounToLemma(wordString);
if( lemmaString == null ) lemmaString = wordString;
key = createKey(lemmaString, normPostag);
if( !_seenLemmas.containsKey(key) ) {
_seenLemmas.put(key, true);
_idfLemmas.increaseDocCount(key);
}
_idfLemmas.increaseTermFrequency(key);
}
}
}
}
// else recurse on the tree
else {
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children )
calculateTreeIDF(child, fullTree);
}
}
示例6: calculateTreeIDF
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Recurses over a tree and adds an IDF counts leaves
* Leaves must be Nouns, Verbs or Adjectives to be counted
*/
public void calculateTreeIDF(Tree tree, Tree fullTree) {
// if tree is a leaf
if( tree.isPreTerminal() ) {
String wordString = tree.firstChild().value().toLowerCase();
// If we are counting every single token
if( _ignorepostags ) {
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(wordString) ) {
_seen.put(wordString, true);
// add the count of documents
_idf.increaseDocCount(wordString);
}
_idf.increaseTermFrequency(wordString);
}
}
// Else we are lemmatizing and only counting Verbs, Nouns, and Adjectives.
else {
char normPostag = normalizePOS(tree.label().value().toString());
// if a verb, look for a particle
if( normPostag == 'v' ) {
// System.out.println(wordString + " tree " + tree.parent(fullTree));
String particle = BasicEventAnalyzer.findParticleInTree(tree.parent(fullTree));
// System.out.println(wordString + " particle " + particle);
if( particle != null ) wordString += "_" + particle.toLowerCase();
}
// Verbs, Nouns, Adjectives
if( normPostag == 'v' || normPostag == 'n' || normPostag == 'j' || normPostag == 'r' ) {
String key = createKey(wordString, normPostag);
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(key) ) {
_seen.put(key, true);
// add the count of documents
_idf.increaseDocCount(key);
}
_idf.increaseTermFrequency(key);
// LEMMA counts
String lemmaString = null;
if( normPostag == 'v' ) lemmaString = _wordnet.verbToLemma(wordString);
else if( normPostag == 'n' ) lemmaString = _wordnet.nounToLemma(wordString);
else if( normPostag == 'j' ) lemmaString = _wordnet.adjectiveToLemma(wordString);
else if( normPostag == 'r' ) lemmaString = _wordnet.nounToLemma(wordString);
if( lemmaString == null ) lemmaString = wordString;
key = createKey(lemmaString, normPostag);
if( !_seenLemmas.containsKey(key) ) {
_seenLemmas.put(key, true);
_idfLemmas.increaseDocCount(key);
}
_idfLemmas.increaseTermFrequency(key);
}
}
}
}
// else recurse on the tree
else {
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children )
calculateTreeIDF(child, fullTree);
}
}