本文整理匯總了Java中edu.stanford.nlp.trees.Tree.label方法的典型用法代碼示例。如果您正苦於以下問題:Java Tree.label方法的具體用法?Java Tree.label怎麽用?Java Tree.label使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.trees.Tree
的用法示例。
在下文中一共展示了Tree.label方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: toStringBuilder
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
static StringBuilder toStringBuilder(Tree tree, StringBuilder sb,
boolean printOnlyLabelValue, String offset) {
if (tree.isLeaf()) {
if (tree.label() != null) sb.append(printOnlyLabelValue ? tree.label().value() : tree.label());
return sb;
}
sb.append('(');
if (tree.label() != null) {
if (printOnlyLabelValue) {
if (tree.value() != null) sb.append(tree.label().value());
// don't print a null, just nothing!
} else {
sb.append(tree.label());
}
}
Tree[] kids = tree.children();
if (kids != null) {
for (Tree kid : kids) {
if (kid.isLeaf()) sb.append(' ');
else sb.append('\n').append(offset).append(' ');
toStringBuilder(kid, sb, printOnlyLabelValue,offset + " ");
}
}
return sb.append(')');
}
示例2: treeToDot
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public String treeToDot()
{
String result="graph {\n";
Queue<Tree> q = new LinkedList<>();
q.add(this);
int a, b;
a=this.hashCode()*this.children().hashCode();
result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" [label=\""+this.label()+"\"];\n";
while(!q.isEmpty())
{
Tree t = q.remove();
for(Tree child: t.children())
{
a=t.hashCode()*t.children().hashCode();
if(child.children().length>0)
b=child.hashCode()*child.children().hashCode();
else
b=child.hashCode()*this.hashCode();
result+=" N_"+(b<0?-b%Integer.MAX_VALUE:b)+" [label=\""+child.label()+"\"];\n";
result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" -- "+"N_"+(b<0?-b%Integer.MAX_VALUE:b)+";\n";
q.add(child);
}
}
result+="}";
return result;
}
示例3: addTreebankNodeToIndexes
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
private void addTreebankNodeToIndexes(
TreebankNode node,
JCas jCas,
Tree tree,
List<CoreLabel> tokenAnns) {
// figure out begin and end character offsets
CoreMap label = (CoreMap) tree.label();
CoreMap beginToken = tokenAnns.get(label.get(BeginIndexAnnotation.class));
CoreMap endToken = tokenAnns.get(label.get(EndIndexAnnotation.class) - 1);
int nodeBegin = beginToken.get(CharacterOffsetBeginAnnotation.class);
int nodeEnd = endToken.get(CharacterOffsetEndAnnotation.class);
// set span, node type, children (mutual recursion), and add it to the JCas
node.setBegin(nodeBegin);
node.setEnd(nodeEnd);
node.setNodeType(tree.value());
node.setChildren(this.addTreebankNodeChildrenToIndexes(node, jCas, tokenAnns, tree));
node.setLeaf(node.getChildren().size() == 0);
node.addToIndexes();
}
示例4: preorder
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
private static String preorder(Tree tree) {
List<Tree> queue = new LinkedList<>();
queue.add(tree);
while ( ! queue.isEmpty()) {
Tree currentNode = queue.remove(0);
if (currentNode.isLeaf())
continue;
Tree children[] = currentNode.children();
int childCount = children.length;
IndexedWord hw = (IndexedWord) currentNode.label();
List<FeatureNode> featureNodes = new ArrayList<>(childCount);
for (int i = 0; i < childCount; i++) {
featureNodes.add(new FeatureNode(children[i], hw));
queue.add(children[i]);
}
if (childCount < 8) {
Pair<Double, List<Integer>> result = search(featureNodes, new LinkedList<Integer>(), Double.NEGATIVE_INFINITY);
if (result != null) {
List<Integer> permutation = result.second;
List<Tree> newChildren = new ArrayList<>(Arrays.asList(children));
for (int i = 0; i < childCount; i++) {
int idx = permutation.get(i);
newChildren.set(idx, children[i]);
}
currentNode.setChildren(newChildren);
} else {
System.err.println("Warning: No path found.");
}
}
}
return StringUtils.join(tree.yieldWords());
}
示例5: FeatureNode
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
FeatureNode(Tree node, IndexedWord hw) {
List<Label> yield = node.yield();
this.word = (IndexedWord) node.label();
this.hw = hw;
this.lm = (IndexedWord) yield.get(0);
this.rm = (IndexedWord) yield.get(yield.size() - 1);
this.dst = hw.index() - this.word.index();
}
示例6: getDependencyByLine
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
public ArrayList<ArrayList<String>> getDependencyByLine(
LexicalizedParser lp, String filename, String authorfilename) {
ArrayList<ArrayList<String>> retArrayList = new ArrayList<ArrayList<String>>();
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
try {
BufferedReader br = new BufferedReader(new FileReader(filename));
BufferedReader authorReader = new BufferedReader(new FileReader(
authorfilename));
String line = "";
String author = "";
while ((line = br.readLine()) != null) {
author = authorReader.readLine();
Tokenizer<? extends HasWord> toke = tlp.getTokenizerFactory()
.getTokenizer(new StringReader(line));
List<? extends HasWord> sentence = toke.tokenize();
Tree parse = lp.apply(sentence);
List<Tree> childTrees = parse.getChildrenAsList();
Stack<Tree> treeStack = new Stack<Tree>();
treeStack.addAll(childTrees);
Label prevLabel = null;
Label curLabel = parse.label();
HashMap<Integer, Pair<Label, Label>> wordTagMap = new HashMap<Integer, Pair<Label, Label>>();
int depth = 1;
while (!treeStack.isEmpty()) {
Tree curTree = treeStack.pop();
prevLabel = curLabel;
curLabel = curTree.label();
childTrees = curTree.getChildrenAsList();
if (0 == childTrees.size()) {
// word node
wordTagMap.put(depth, new Pair<Label, Label>(curLabel,
prevLabel));
depth++;
} else {
treeStack.addAll(childTrees);
}
}
final int numWord = wordTagMap.size();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
for (TypedDependency typedDep : tdl) {
int govIndex = typedDep.gov().index();
int depIndex = typedDep.dep().index();
if (wordTagMap.containsKey(govIndex)
&& wordTagMap.containsKey(depIndex)) {
ArrayList<String> arrList = new ArrayList<String>();
arrList.add(typedDep.dep().nodeString());
arrList.add(wordTagMap.get(numWord
- typedDep.dep().index() + 1).snd.toString());
arrList.add(typedDep.reln().toString());
arrList.add(typedDep.gov().nodeString());
arrList.add(wordTagMap.get(numWord
- typedDep.gov().index() + 1).snd.toString());
arrList.add(author);
arrList.add(line);
retArrayList.add(arrList);
}
}
}
br.close();
authorReader.close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return retArrayList;
}
示例7: getDependencyBySentence
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* file => tokens => parse trees
*
* @param lp
* @param filename
* tuples
*/
public ArrayList<ArrayList<String>> getDependencyBySentence(
LexicalizedParser lp, String filename) {
ArrayList<ArrayList<String>> retArrayList = new ArrayList<ArrayList<String>>();
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
for (List<HasWord> sentence : new DocumentPreprocessor(filename)) {
Tree parse = lp.apply(sentence);
List<Tree> childTrees = parse.getChildrenAsList();
Stack<Tree> treeStack = new Stack<Tree>();
treeStack.addAll(childTrees);
Label prevLabel = null;
Label curLabel = parse.label();
HashMap<Integer, Pair<Label, Label>> wordTagMap = new HashMap<Integer, Pair<Label, Label>>();
int depth = 1;
while (!treeStack.isEmpty()) {
Tree curTree = treeStack.pop();
prevLabel = curLabel;
curLabel = curTree.label();
childTrees = curTree.getChildrenAsList();
if (0 == childTrees.size()) {
// word node
wordTagMap.put(depth, new Pair<Label, Label>(curLabel,
prevLabel));
depth++;
} else {
treeStack.addAll(childTrees);
}
}
final int numWord = wordTagMap.size();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
for (TypedDependency typedDep : tdl) {
int govIndex = typedDep.gov().index();
int depIndex = typedDep.dep().index();
if (wordTagMap.containsKey(govIndex)
&& wordTagMap.containsKey(depIndex)) {
ArrayList<String> arrList = new ArrayList<String>();
arrList.add(typedDep.dep().nodeString());
arrList.add(wordTagMap.get(numWord - typedDep.dep().index()
+ 1).snd.toString());
arrList.add(typedDep.reln().toString());
arrList.add(typedDep.gov().nodeString());
arrList.add(wordTagMap.get(numWord - typedDep.gov().index()
+ 1).snd.toString());
retArrayList.add(arrList);
}
}
}
return retArrayList;
}