本文整理匯總了Java中edu.stanford.nlp.trees.Tree類的典型用法代碼示例。如果您正苦於以下問題:Java Tree類的具體用法?Java Tree怎麽用?Java Tree使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Tree類屬於edu.stanford.nlp.trees包,在下文中一共展示了Tree類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getStanfordSentimentRate
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
public int getStanfordSentimentRate(String sentimentText) {
Properties props = new Properties();
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
//StanfordCoreNLP
int totalRate = 0;
String[] linesArr = sentimentText.split("\\.");
for (int i = 0; i < linesArr.length; i++) {
if (linesArr[i] != null) {
Annotation annotation = pipeline.process(linesArr[i]);
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
int score = RNNCoreAnnotations.getPredictedClass(tree);
totalRate = totalRate + (score - 2);
}
}
}
return totalRate;
}
示例2: demoDP
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* demoDP demonstrates turning a file into tokens and then parse trees. Note
* that the trees are printed by calling pennPrint on the Tree object. It is
* also possible to pass a PrintWriter to pennPrint if you want to capture
* the output.
*
* file => tokens => parse trees
*/
public static void demoDP(LexicalizedParser lp, String filename) {
// This option shows loading, sentence-segmenting and tokenizing
// a file using DocumentPreprocessor.
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
// You could also create a tokenizer here (as below) and pass it
// to DocumentPreprocessor
for (List<HasWord> sentence : new DocumentPreprocessor(filename)) {
Tree parse = lp.apply(sentence);
parse.pennPrint();
System.out.println();
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
Collection tdl = gs.typedDependenciesCCprocessed();
System.out.println(tdl);
System.out.println();
}
}
示例3: findSentiment
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
public static int findSentiment(String tweet) {
int mainSentiment = 0;
if (tweet != null && tweet.length() > 0) {
int longest = 0;
Annotation annotation = pipeline.process(tweet);
for (CoreMap sentence : annotation
.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence
.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
String partText = sentence.toString();
if (partText.length() > longest) {
mainSentiment = sentiment;
longest = partText.length();
}
}
}
return mainSentiment;
}
示例4: toStringBuilder
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
static StringBuilder toStringBuilder(Tree tree, StringBuilder sb,
boolean printOnlyLabelValue, String offset) {
if (tree.isLeaf()) {
if (tree.label() != null) sb.append(printOnlyLabelValue ? tree.label().value() : tree.label());
return sb;
}
sb.append('(');
if (tree.label() != null) {
if (printOnlyLabelValue) {
if (tree.value() != null) sb.append(tree.label().value());
// don't print a null, just nothing!
} else {
sb.append(tree.label());
}
}
Tree[] kids = tree.children();
if (kids != null) {
for (Tree kid : kids) {
if (kid.isLeaf()) sb.append(' ');
else sb.append('\n').append(offset).append(' ');
toStringBuilder(kid, sb, printOnlyLabelValue,offset + " ");
}
}
return sb.append(')');
}
示例5: treeToDot
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
public String treeToDot()
{
String result="graph {\n";
Queue<Tree> q = new LinkedList<>();
q.add(this);
int a, b;
a=this.hashCode()*this.children().hashCode();
result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" [label=\""+this.label()+"\"];\n";
while(!q.isEmpty())
{
Tree t = q.remove();
for(Tree child: t.children())
{
a=t.hashCode()*t.children().hashCode();
if(child.children().length>0)
b=child.hashCode()*child.children().hashCode();
else
b=child.hashCode()*this.hashCode();
result+=" N_"+(b<0?-b%Integer.MAX_VALUE:b)+" [label=\""+child.label()+"\"];\n";
result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" -- "+"N_"+(b<0?-b%Integer.MAX_VALUE:b)+";\n";
q.add(child);
}
}
result+="}";
return result;
}
示例6: makeConcreteCParse
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* Whenever there's an empty parse, this method will set the required
* constituent list to be an empty list. It's up to the caller on what to do
* with the returned Parse.
*
* @param n
* is the number of tokens in the sentence
*
* @throws AnalyticException
*/
private Parse makeConcreteCParse(Tree root, int n, UUID tokenizationUUID, HeadFinder hf) throws AnalyticException {
int left = 0;
int right = root.getLeaves().size();
if (right != n)
throw new AnalyticException("number of leaves in the parse (" + right + ") is not equal to the number of tokens in the sentence (" + n + ")");
Parse p = new ParseFactory(this.gen).create();
TheoryDependencies deps = new TheoryDependencies();
deps.addToTokenizationTheoryList(tokenizationUUID);
AnnotationMetadata md = new AnnotationMetadata("Stanford CoreNLP", Timing.currentLocalTime(), 1);
p.setMetadata(md);
constructConstituent(root, left, right, n, p, tokenizationUUID, hf);
if (!p.isSetConstituentList()) {
LOGGER.warn("Setting constituent list to compensate for the empty parse for tokenization id {} and tree {}", tokenizationUUID, root);
p.setConstituentList(new ArrayList<Constituent>());
}
return p;
}
示例7: findSentiment
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
public static int findSentiment(String text) {
int mainSentiment = 0;
if (text != null && text.length() > 0) {
int longest = 0;
Annotation annotation = pipeline.process(text);
for (CoreMap sentence : annotation
.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence
.get(SentimentCoreAnnotations.AnnotatedTree.class);
int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
String partText = sentence.toString();
if (partText.length() > longest) {
mainSentiment = sentiment;
longest = partText.length();
}
}
}
return mainSentiment;
}
示例8: countTokenPairsWithCoref
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* Count all pairs of tokens with coref arguments in the document.
* This counts token pairs with their relations, not just tokens.
*/
private void countTokenPairsWithCoref(List<Tree> trees, List<List<TypedDependency>> deps, List<EntityMention> mentions, List<NERSpan> ners) {
// Now get the "token:arg" events whose arg may corefer elsewhere.
List<WordEvent> events = extractEvents(trees, deps, mentions, _wordnet, _tokenType, _fullPrep);
// for( WordEvent event : events ) System.out.println("event: " + event.toStringFull());
// Count arguments of tokens with their objects. (collocations)
if( _countObjectCollocations ) {
List<WordEvent> allNewEvents = new ArrayList<WordEvent>();
for( WordEvent event : events ) {
List<WordEvent> newEvents = getCollocations(event, deps.get(event.sentenceID()-1), ners, _wordnet);
if( newEvents != null ) {
// for( WordEvent newEvent : newEvents ) System.out.println("NEW event: " + newEvent.toStringFull());
allNewEvents.addAll(newEvents);
}
}
events.addAll(allNewEvents);
}
// Count the pairs.
countEventPairs(events, 10000);
}
示例9: verbTreesFromTree
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* @return A list of verb subtrees e.g. (VBG running)
*/
public static Vector<Tree> verbTreesFromTree(Tree tree) {
// System.out.println("verbTree: " + tree);
Vector<Tree> verbs = new Vector<Tree>();
// System.out.println(" tree label: " + tree.label().value().toString());
// if tree is a leaf
if( tree.isPreTerminal() && tree.label().value().startsWith("VB") ) {
// System.out.println(" if!!");
// add the verb subtree
verbs.add(tree);
}
// else scale the tree
else {
// System.out.println(" else!!");
List<Tree> children = tree.getChildrenAsList();
for( Tree child : children ) {
Vector<Tree> temp = verbTreesFromTree(child);
verbs.addAll(temp);
}
}
return verbs;
}
示例10: compute
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
private double compute(int i, int j, List<Tree> nodes1, List<Tree> nodes2, double[][] mem) {
if (mem[i][j] >= 0) {
return mem[i][j];
}
//if (sameProduction(nodes1.get(i), nodes2.get(j))) {
if (nodes1.get(i).value().equals(nodes2.get(j).value()) &&
nodes1.get(i).hashCode() == nodes2.get(j).hashCode()) { //similar hashCode -> same production
mem[i][j] = lambda * lambda;
if (!nodes1.get(i).isLeaf() && !nodes2.get(j).isLeaf()) {
List<Tree> childList1 = nodes1.get(i).getChildrenAsList();
List<Tree> childList2 = nodes2.get(j).getChildrenAsList();
for (int k = 0; k < childList1.size(); k++) {
//mem[i][j] *= 1 + compute(nodes1.indexOf(childList1.get(k)), nodes2.indexOf(childList2.get(k)), nodes1, nodes2, mem);
mem[i][j] *= 1 + compute(indexOf(nodes1, childList1.get(k)), indexOf(nodes2, childList2.get(k)), nodes1, nodes2, mem);
}
}
} else {
mem[i][j] = 0.0;
}
return mem[i][j];
}
示例11: getEventTimeBigram
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* Create one feature string, the bigram of the event word and the rightmost token in the timex phrase.
* The bigram is ordered by text order.
*/
private Counter<String> getEventTimeBigram(TextEvent event, Timex timex, List<Tree> trees) {
Counter<String> feats = new ClassicCounter<String>();
List<String> tokens = TreeOperator.stringLeavesFromTree(trees.get(timex.sid()));
String timeToken = tokens.get(timex.offset()-1);
if( TimebankUtil.isDayOfWeek(timeToken) )
timeToken = "DAYOFWEEK";
if( event.sid() == timex.sid() && event.index() < timex.offset() )
feats.incrementCount("bi-" + tokens.get(event.index()-1) + "_" + timeToken);
else if( event.sid() == timex.sid() )
feats.incrementCount("bi-" + timeToken + "_" + tokens.get(event.index()-1));
// In different sentences.
else {
List<String> eventTokens = TreeOperator.stringLeavesFromTree(trees.get(event.sid()));
if( event.sid() < timex.sid() )
feats.incrementCount("bi-" + eventTokens.get(event.index()-1) + "_" + timeToken);
else
feats.incrementCount("bi-" + timeToken + "_" + eventTokens.get(event.index()-1));
}
return feats;
}
示例12: wordIndex
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* @return The WORD INDEX (starting at 0) where the stop tree begins
*/
public static int wordIndex(Tree full, Tree stop) {
if( full == null || full == stop ) return 0;
int sum = 0;
// if( full.isPreTerminal() ) {
if( full.firstChild().isLeaf() ) {
return 1;
}
else {
for( Tree child : full.getChildrenAsList() ) {
if( child == stop ) {
// System.out.println("Stopping at " + child);
return sum;
}
sum += wordIndex(child, stop);
if( child.contains(stop) ) return sum;
}
}
return sum;
}
示例13: inorderTraverse
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* @return The CHARACTER OFFSET where the stop tree begins
*/
public static int inorderTraverse(Tree full, Tree stop) {
if( full == null || full == stop ) return 0;
int sum = 0;
if( full.isPreTerminal() ) {
String value = full.firstChild().value();
// System.out.println(value + " is " + value.length());
return value.length() + 1; // +1 for space character
// return full.firstChild().value().length() + 1;
}
else {
for( Tree child : full.getChildrenAsList() ) {
if( child == stop ) {
// System.out.println("Stopping at " + child);
return sum;
}
sum += inorderTraverse(child, stop);
if( child.contains(stop) ) return sum;
}
}
return sum;
}
示例14: buildTokenLemma
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* Finds the token in the tree, and uses its POS Tag to lookup the lemma in WordNet.
* Also attaches a particle if there is one for the token.
*/
public static String buildTokenLemma(String token, int index, Tree tree, Map<Integer, String> particles, WordNet wordnet) {
if( index == 0 )
return null;
Tree subtree = TreeOperator.indexToSubtree(tree, index);
if( subtree == null ) {
System.out.println("null subtree " + token + " index " + index + " tree=" + tree);
// System.exit(-1);
return null;
}
String posTag = subtree.label().value();
String govLemma = wordnet.lemmatizeTaggedWord(token, posTag);
if( CountVerbDeps.isNumber(token) )
govLemma = CountVerbDeps.NUMBER_STRING;
// Attach particle.
if( particles != null && particles.size() > 0 ) {
String particle = particles.get(index);
if( particle != null )
govLemma = govLemma + "_" + particle;
}
char normalPOS = CalculateIDF.normalizePOS(posTag);
return CalculateIDF.createKey(govLemma, normalPOS);
}
示例15: getSingleEventTokenFeatures
import edu.stanford.nlp.trees.Tree; //導入依賴的package包/類
/**
* Create token/lemma/synset features for an event.
* @param eventIndex Either 1 or 2, the first or second event in your link. This differentiates the feature names.
*/
private Counter<String> getSingleEventTokenFeatures(int eventIndex, TextEvent event1, List<Tree> trees) {
Counter<String> feats = new ClassicCounter<String>();
String token = event1.string();
String postag = TreeOperator.indexToPOSTag(trees.get(event1.sid()), event1.index());
String lemma = _wordnet.lemmatizeTaggedWord(token, postag);
// Token and Lemma
feats.incrementCount("token" + eventIndex + "-" + token);
feats.incrementCount("lemma" + eventIndex + "-" + lemma);
// WordNet synset
Synset[] synsets = null;
if( postag.startsWith("VB") )
synsets = _wordnet.synsetsOf(token, POS.VERB);
else if( postag.startsWith("NN") )
synsets = _wordnet.synsetsOf(token, POS.NOUN);
if( synsets != null && synsets.length > 0 )
feats.incrementCount("synset" + eventIndex + "-" + synsets[0].getOffset());
return feats;
}