本文整理匯總了Java中edu.stanford.nlp.trees.Tree.getChildrenAsList方法的典型用法代碼示例。如果您正苦於以下問題:Java Tree.getChildrenAsList方法的具體用法?Java Tree.getChildrenAsList怎麽用?Java Tree.getChildrenAsList使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.trees.Tree
的用法示例。
在下文中一共展示了Tree.getChildrenAsList方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: leavesFromTree
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return A Vector of all the leaves ... we basically flatten the tree
*/
public static List<Tree> leavesFromTree(Tree tree) {
List<Tree> leaves = new ArrayList<Tree>();
// System.out.println("myt=" + tree);
// if tree is a leaf
// if( tree.isPreTerminal() || tree.firstChild().isLeaf() ) {
if( tree.firstChild().isLeaf() ) {
// System.out.println(" * preterm");
// add the verb subtree
leaves.add(tree);
}
// else scale the tree
else {
// System.out.println(" * scaling");
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
List<Tree> temp = leavesFromTree(child);
leaves.addAll(temp);
}
}
return leaves;
}
示例2: verbTreesFromTree
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return A list of verb subtrees e.g. (VBG running)
*/
public static Vector<Tree> verbTreesFromTree(Tree tree) {
// System.out.println("verbTree: " + tree);
Vector<Tree> verbs = new Vector<Tree>();
// System.out.println(" tree label: " + tree.label().value().toString());
// if tree is a leaf
if( tree.isPreTerminal() && tree.label().value().startsWith("VB") ) {
// System.out.println(" if!!");
// add the verb subtree
verbs.add(tree);
}
// else scale the tree
else {
// System.out.println(" else!!");
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
Vector<Tree> temp = verbTreesFromTree(child);
verbs.addAll(temp);
}
}
return verbs;
}
示例3: verbsFromTree
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return A list of verbs in the tree
*/
public static Vector<String> verbsFromTree(Tree tree, Tree parent) {
Vector<String> verbs = new Vector<String>();
// if tree is a leaf
if( tree.isPreTerminal() && tree.label().toString().startsWith("VB") ) {
String verb = tree.firstChild().value();
// get arguments
// System.out.println("parent of " + verb + " is " + parent);
// verb += argsOfPhrase(tree, parent);
// System.out.println("now: " + verb);
// add the verb
verbs.add(verb);
}
// else scale the tree
else {
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
Vector<String> temp = verbsFromTree(child, tree);
verbs.addAll(temp);
}
}
return verbs;
}
示例4: wordIndex
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* @return The WORD INDEX (starting at 0) where the stop tree begins
*/
public static int wordIndex(Tree full, Tree stop) {
if( full == null || full == stop ) return 0;
int sum = 0;
// if( full.isPreTerminal() ) {
if( full.firstChild().isLeaf() ) {
return 1;
}
else {
for( Tree child : full.getChildrenAsList() ) {
if( child == stop ) {
// System.out.println("Stopping at " + child);
return sum;
}
sum += wordIndex(child, stop);
if( child.contains(stop) ) return sum;
}
}
return sum;
}
示例5: indexToSubtreeHelp
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public static Tree indexToSubtreeHelp(Tree full, int current, int goal) {
// System.out.println("--" + current + "-" + full + "-preterm" + full.isPreTerminal() + "-goal" + goal);
if( full == null ) return null;
if( (current+1) == goal &&
// (full.isPreTerminal() || full.label().value().equals("CD")) )
full.firstChild().isLeaf() )
return full;
else {
for( Tree child : full.getChildrenAsList() ) {
int length = countLeaves(child);
// System.out.println("--Child length " + length);
if( goal <= current+length )
return indexToSubtreeHelp(child, current, goal);
else
current += length;
}
}
return null;
}
示例6: subSentences
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* 獲取一句話中的所有子句集合
*
* @param tree
* @param subSentList
*/
private void subSentences(Tree tree, List<String> subSentList) {
if ("S".equals(tree.label().toString()) || "SINV".equals(tree.label().toString())) {
StringBuilder sb = new StringBuilder();
this.subSentence(tree, sb);
String strTmp = sb.toString().trim();
if (StringUtils.isNotBlank(strTmp) && strTmp.split("\\s+").length >= 8) {
// 長度大於8的子句進入候選
subSentList.add(strTmp);
}
}
List<Tree> childTrees = tree.getChildrenAsList();
for (Tree childTree : childTrees) {
this.subSentences(childTree, subSentList);
}
}
示例7: annotatePhraseStructureRecursively
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Generate a SyntaxTreeNode Annotation corresponding to this Tree. Work
* recursively so that the annotations are actually generated from the bottom
* up, in order to build the consists list of annotation IDs.
*
* @param tree
* the current subtree
* @param rootTree
* the whole sentence, used to find the span of the current subtree
* @return a GATE Annotation of type "SyntaxTreeNode"
*/
protected Annotation annotatePhraseStructureRecursively(
AnnotationSet annotationSet, StanfordSentence stanfordSentence,
Tree tree, Tree rootTree) {
Annotation annotation = null;
Annotation child;
String label = tree.value();
List<Tree> children = tree.getChildrenAsList();
if(children.size() == 0) { return null; }
/* implied else */
/*
* following line generates ClassCastException IntPair span =
* tree.getSpan(); edu.stanford.nlp.ling.CategoryWordTag at
* edu.stanford.nlp.trees.Tree.getSpan(Tree.java:393) but I think it's a bug
* in the parser, so I'm hacking around it as follows.
*/
int startPos = Trees.leftEdge(tree, rootTree);
int endPos = Trees.rightEdge(tree, rootTree);
Long startNode = stanfordSentence.startPos2offset(startPos);
Long endNode = stanfordSentence.endPos2offset(endPos);
List<Integer> consists = new ArrayList<Integer>();
Iterator<Tree> childIter = children.iterator();
while(childIter.hasNext()) {
child =
annotatePhraseStructureRecursively(annotationSet, stanfordSentence,
childIter.next(), rootTree);
if((child != null) && (!child.getType().equals(inputTokenType))) {
consists.add(child.getId());
}
}
annotation =
annotatePhraseStructureConstituent(annotationSet, startNode, endNode,
label, consists, tree.depth());
return annotation;
}
示例8: argsOfPhrase
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public static String argsOfPhrase(Tree tree, Tree parent) {
String args = "";
List<Tree> children = parent.getChildrenAsList();
for( Tree child : children ) {
if( child != tree ) {
if( args.length() == 1 ) args = child.label().toString();
else args += ":" + child.label().toString();
}
}
return args;
}
示例9: getTreeLength
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public static int getTreeLength(Tree tree) {
int sum = 0;
if( tree.firstChild().isLeaf() )
return 1;
else
for( Tree child : tree.getChildrenAsList() )
sum += getTreeLength(child);
return sum;
}
示例10: subSentence
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* 返回一個s節點下麵的完整子句
*
* @param tree
* @param sb
*/
private void subSentence(Tree tree, StringBuilder sb) {
if (tree.isLeaf()) {
sb.append(tree.nodeString() + " ");
return;
} else {
List<Tree> childTrees = tree.getChildrenAsList();
for (Tree child : childTrees) {
this.subSentence(child, sb);
}
}
}
示例11: sameProduction
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
private boolean sameProduction(Tree t1, Tree t2) {
if (t1.value().equals(t2.value())) {
List<Tree> childList1 = t1.getChildrenAsList();
List<Tree> childList2 = t2.getChildrenAsList();
if (childList1.size() == childList2.size()) {
for (int i = 0; i < childList1.size(); i++) {
if (!childList1.get(i).value().equals(childList2.get(i).value())) {
return false;
}
}
return true;
}
}
return false;
}
示例12: constructConstituent
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
*
* @param root
* @param left
* @param right
* @param n
* is the length of the sentence is tokens.
* @param p
* @param tokenizationUUID
* @return The constituent ID
* @throws AnalyticException
*/
private static int constructConstituent(Tree root, int left,
int right, int n, Parse p, UUID tokenizationUUID, HeadFinder hf)
throws AnalyticException {
Constituent constituent = new Constituent();
constituent.setId(p.getConstituentListSize());
constituent.setTag(root.value());
constituent.setStart(left);
constituent.setEnding(right);
p.addToConstituentList(constituent);
Tree headTree = null;
if (!root.isLeaf()) {
try {
headTree = hf.determineHead(root);
} catch (java.lang.IllegalArgumentException iae) {
LOGGER.warn("Failed to find head, falling back on rightmost constituent.", iae);
headTree = root.children()[root.numChildren() - 1];
}
}
int i = 0, headTreeIdx = -1;
int leftPtr = left;
for (Tree child : root.getChildrenAsList()) {
int width = child.getLeaves().size();
int childId = constructConstituent(child, leftPtr, leftPtr
+ width, n, p, tokenizationUUID, hf);
constituent.addToChildList(childId);
leftPtr += width;
if (headTree != null && child == headTree) {
assert (headTreeIdx < 0);
headTreeIdx = i;
}
i++;
}
if (headTreeIdx >= 0)
constituent.setHeadChildIndex(headTreeIdx);
if (!constituent.isSetChildList())
constituent.setChildList(new ArrayList<Integer>());
return constituent.getId();
}
示例13: getDependencyByLine
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public ArrayList<ArrayList<String>> getDependencyByLine(
LexicalizedParser lp, String filename, String authorfilename) {
ArrayList<ArrayList<String>> retArrayList = new ArrayList<ArrayList<String>>();
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
try {
BufferedReader br = new BufferedReader(new FileReader(filename));
BufferedReader authorReader = new BufferedReader(new FileReader(
authorfilename));
String line = "";
String author = "";
while ((line = br.readLine()) != null) {
author = authorReader.readLine();
Tokenizer<? extends HasWord> toke = tlp.getTokenizerFactory()
.getTokenizer(new StringReader(line));
List<? extends HasWord> sentence = toke.tokenize();
Tree parse = lp.apply(sentence);
List<Tree> childTrees = parse.getChildrenAsList();
Stack<Tree> treeStack = new Stack<Tree>();
treeStack.addAll(childTrees);
Label prevLabel = null;
Label curLabel = parse.label();
HashMap<Integer, Pair<Label, Label>> wordTagMap = new HashMap<Integer, Pair<Label, Label>>();
int depth = 1;
while (!treeStack.isEmpty()) {
Tree curTree = treeStack.pop();
prevLabel = curLabel;
curLabel = curTree.label();
childTrees = curTree.getChildrenAsList();
if (0 == childTrees.size()) {
// word node
wordTagMap.put(depth, new Pair<Label, Label>(curLabel,
prevLabel));
depth++;
} else {
treeStack.addAll(childTrees);
}
}
final int numWord = wordTagMap.size();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
for (TypedDependency typedDep : tdl) {
int govIndex = typedDep.gov().index();
int depIndex = typedDep.dep().index();
if (wordTagMap.containsKey(govIndex)
&& wordTagMap.containsKey(depIndex)) {
ArrayList<String> arrList = new ArrayList<String>();
arrList.add(typedDep.dep().nodeString());
arrList.add(wordTagMap.get(numWord
- typedDep.dep().index() + 1).snd.toString());
arrList.add(typedDep.reln().toString());
arrList.add(typedDep.gov().nodeString());
arrList.add(wordTagMap.get(numWord
- typedDep.gov().index() + 1).snd.toString());
arrList.add(author);
arrList.add(line);
retArrayList.add(arrList);
}
}
}
br.close();
authorReader.close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return retArrayList;
}
示例14: calculateTreeIDF
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Recurses over a tree and adds an IDF counts leaves
* Leaves must be Nouns, Verbs or Adjectives to be counted
*/
public void calculateTreeIDF(Tree tree, Tree fullTree) {
// if tree is a leaf
if( tree.isPreTerminal() ) {
String wordString = tree.firstChild().value().toLowerCase();
// If we are counting every single token
if( _ignorepostags ) {
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(wordString) ) {
_seen.put(wordString, true);
// add the count of documents
_idf.increaseDocCount(wordString);
}
_idf.increaseTermFrequency(wordString);
}
}
// Else we are lemmatizing and only counting Verbs, Nouns, and Adjectives.
else {
char normPostag = normalizePOS(tree.label().value().toString());
// if a verb, look for a particle
if( normPostag == 'v' ) {
// System.out.println(wordString + " tree " + tree.parent(fullTree));
String particle = BasicEventAnalyzer.findParticleInTree(tree.parent(fullTree));
// System.out.println(wordString + " particle " + particle);
if( particle != null ) wordString += "_" + particle.toLowerCase();
}
// Verbs, Nouns, Adjectives
if( normPostag == 'v' || normPostag == 'n' || normPostag == 'j' || normPostag == 'r' ) {
String key = createKey(wordString, normPostag);
// Don't count numerals or weird starting punctuation tokens
if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") ) {
// Count the documents the word appears in, for IDF score
if( !_seen.containsKey(key) ) {
_seen.put(key, true);
// add the count of documents
_idf.increaseDocCount(key);
}
_idf.increaseTermFrequency(key);
// LEMMA counts
String lemmaString = null;
if( normPostag == 'v' ) lemmaString = _wordnet.verbToLemma(wordString);
else if( normPostag == 'n' ) lemmaString = _wordnet.nounToLemma(wordString);
else if( normPostag == 'j' ) lemmaString = _wordnet.adjectiveToLemma(wordString);
else if( normPostag == 'r' ) lemmaString = _wordnet.nounToLemma(wordString);
if( lemmaString == null ) lemmaString = wordString;
key = createKey(lemmaString, normPostag);
if( !_seenLemmas.containsKey(key) ) {
_seenLemmas.put(key, true);
_idfLemmas.increaseDocCount(key);
}
_idfLemmas.increaseTermFrequency(key);
}
}
}
}
// else recurse on the tree
else {
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children )
calculateTreeIDF(child, fullTree);
}
}