当前位置: 首页>>代码示例>>Java>>正文


Java Tree类代码示例

本文整理汇总了Java中edu.stanford.nlp.trees.Tree的典型用法代码示例。如果您正苦于以下问题:Java Tree类的具体用法?Java Tree怎么用?Java Tree使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Tree类属于edu.stanford.nlp.trees包,在下文中一共展示了Tree类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getStanfordSentimentRate

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
public int getStanfordSentimentRate(String sentimentText) {
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    //StanfordCoreNLP
    int totalRate = 0;
    String[] linesArr = sentimentText.split("\\.");
    for (int i = 0; i < linesArr.length; i++) {
        if (linesArr[i] != null) {
            Annotation annotation = pipeline.process(linesArr[i]);
            for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
                Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
                int score = RNNCoreAnnotations.getPredictedClass(tree);
                totalRate = totalRate + (score - 2);
            }
        }
    }
    return totalRate;
}
 
开发者ID:wso2-incubator,项目名称:twitter-sentiment-analysis,代码行数:20,代码来源:StanfordNLP.java

示例2: demoDP

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
 * demoDP demonstrates turning a file into tokens and then parse trees. Note
 * that the trees are printed by calling pennPrint on the Tree object. It is
 * also possible to pass a PrintWriter to pennPrint if you want to capture
 * the output.
 * 
 * file => tokens => parse trees
 */
public static void demoDP(LexicalizedParser lp, String filename) {
	// This option shows loading, sentence-segmenting and tokenizing
	// a file using DocumentPreprocessor.
	TreebankLanguagePack tlp = new PennTreebankLanguagePack();
	GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
	// You could also create a tokenizer here (as below) and pass it
	// to DocumentPreprocessor
	for (List<HasWord> sentence : new DocumentPreprocessor(filename)) {
		Tree parse = lp.apply(sentence);
		parse.pennPrint();
		System.out.println();

		GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
		Collection tdl = gs.typedDependenciesCCprocessed();
		System.out.println(tdl);
		System.out.println();
	}
}
 
开发者ID:opinion-extraction-propagation,项目名称:TASC-Tuples,代码行数:27,代码来源:ParserDemo.java

示例3: findSentiment

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
public static int findSentiment(String tweet) {

		int mainSentiment = 0;
		if (tweet != null && tweet.length() > 0) {
			int longest = 0;
			Annotation annotation = pipeline.process(tweet);
			for (CoreMap sentence : annotation
					.get(CoreAnnotations.SentencesAnnotation.class)) {
				Tree tree = sentence
						.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
				int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
				String partText = sentence.toString();
				if (partText.length() > longest) {
					mainSentiment = sentiment;
					longest = partText.length();
				}

			}
		}
		return mainSentiment;
	}
 
开发者ID:Activiti,项目名称:activiti-cloud-examples,代码行数:22,代码来源:NLP.java

示例4: toStringBuilder

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
static StringBuilder toStringBuilder(Tree tree, StringBuilder sb, 
  boolean printOnlyLabelValue, String offset) {
  if (tree.isLeaf()) {
    if (tree.label() != null) sb.append(printOnlyLabelValue ? tree.label().value() : tree.label());
    return sb;
  } 
  sb.append('(');
  if (tree.label() != null) {
  	if (printOnlyLabelValue) {
  		if (tree.value() != null) sb.append(tree.label().value());
  		// don't print a null, just nothing!
  	} else {
  		sb.append(tree.label());
  	}
  }
  Tree[] kids = tree.children();
  if (kids != null) {
  	for (Tree kid : kids) {
  		if (kid.isLeaf()) sb.append(' '); 
  		else sb.append('\n').append(offset).append(' ');
  		toStringBuilder(kid, sb, printOnlyLabelValue,offset + "  ");
  	}
  }
  return sb.append(')');
}
 
开发者ID:sivareddyg,项目名称:UDepLambda,代码行数:26,代码来源:TreePrinter.java

示例5: treeToDot

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
public String treeToDot()
{
    String result="graph  {\n";
    Queue<Tree> q = new LinkedList<>();
    q.add(this);
    int a, b;
    a=this.hashCode()*this.children().hashCode();
    result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" [label=\""+this.label()+"\"];\n";
    while(!q.isEmpty())
    {
        Tree t = q.remove();
        for(Tree child: t.children())
        {
            a=t.hashCode()*t.children().hashCode();
            if(child.children().length>0)
                b=child.hashCode()*child.children().hashCode();
            else
                b=child.hashCode()*this.hashCode();
            result+=" N_"+(b<0?-b%Integer.MAX_VALUE:b)+" [label=\""+child.label()+"\"];\n";
            result+=" N_"+(a<0?-a%Integer.MAX_VALUE:a)+" -- "+"N_"+(b<0?-b%Integer.MAX_VALUE:b)+";\n";
            q.add(child);
        }
    }
    result+="}";
    return result;
}
 
开发者ID:skrtbhtngr,项目名称:corenlp-helper,代码行数:27,代码来源:TreeExtended.java

示例6: makeConcreteCParse

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
 * Whenever there's an empty parse, this method will set the required
 * constituent list to be an empty list. It's up to the caller on what to do
 * with the returned Parse.
 *
 * @param n
 *          is the number of tokens in the sentence
 *
 * @throws AnalyticException
 */
private Parse makeConcreteCParse(Tree root, int n, UUID tokenizationUUID, HeadFinder hf) throws AnalyticException {
  int left = 0;
  int right = root.getLeaves().size();
  if (right != n)
    throw new AnalyticException("number of leaves in the parse (" + right + ") is not equal to the number of tokens in the sentence (" + n + ")");

  Parse p = new ParseFactory(this.gen).create();
  TheoryDependencies deps = new TheoryDependencies();
  deps.addToTokenizationTheoryList(tokenizationUUID);
  AnnotationMetadata md = new AnnotationMetadata("Stanford CoreNLP", Timing.currentLocalTime(), 1);
  p.setMetadata(md);
  constructConstituent(root, left, right, n, p, tokenizationUUID, hf);
  if (!p.isSetConstituentList()) {
    LOGGER.warn("Setting constituent list to compensate for the empty parse for tokenization id {} and tree {}", tokenizationUUID, root);
    p.setConstituentList(new ArrayList<Constituent>());
  }
  return p;
}
 
开发者ID:hltcoe,项目名称:concrete-stanford-deprecated2,代码行数:29,代码来源:PreNERCoreMapWrapper.java

示例7: findSentiment

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
public static int findSentiment(String text) {

       int mainSentiment = 0;
       if (text != null && text.length() > 0) {
           int longest = 0;
           Annotation annotation = pipeline.process(text);
           for (CoreMap sentence : annotation
                   .get(CoreAnnotations.SentencesAnnotation.class)) {
               Tree tree = sentence
                       .get(SentimentCoreAnnotations.AnnotatedTree.class);
               int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
               String partText = sentence.toString();
               if (partText.length() > longest) {
                   mainSentiment = sentiment;
                   longest = partText.length();
               }

           }
       }
       return mainSentiment;
   }
 
开发者ID:dflick-pivotal,项目名称:sentimentr-release,代码行数:22,代码来源:NLP.java

示例8: countTokenPairsWithCoref

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
   * Count all pairs of tokens with coref arguments in the document.
   * This counts token pairs with their relations, not just tokens.
   */
  private void countTokenPairsWithCoref(List<Tree> trees, List<List<TypedDependency>> deps, List<EntityMention> mentions, List<NERSpan> ners) {
     // Now get the "token:arg" events whose arg may corefer elsewhere.
    List<WordEvent> events = extractEvents(trees, deps, mentions, _wordnet, _tokenType, _fullPrep);
    
//    for( WordEvent event : events ) System.out.println("event: " + event.toStringFull());
    
    // Count arguments of tokens with their objects. (collocations)
    if( _countObjectCollocations ) {
      List<WordEvent> allNewEvents = new ArrayList<WordEvent>();
      for( WordEvent event : events ) {
        List<WordEvent> newEvents = getCollocations(event, deps.get(event.sentenceID()-1), ners, _wordnet);
        if( newEvents != null ) {
//          for( WordEvent newEvent : newEvents ) System.out.println("NEW event: " + newEvent.toStringFull());
          allNewEvents.addAll(newEvents);
        }
      }
      events.addAll(allNewEvents);
    }
    
    // Count the pairs.
    countEventPairs(events, 10000);
  }
 
开发者ID:nchambers,项目名称:schemas,代码行数:27,代码来源:CountTokenPairs.java

示例9: verbTreesFromTree

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
   * @return A list of verb subtrees e.g. (VBG running)
   */
  public static Vector<Tree> verbTreesFromTree(Tree tree) {
//    System.out.println("verbTree: " + tree);
    Vector<Tree> verbs = new Vector<Tree>();
//    System.out.println("  tree label: " + tree.label().value().toString());
    
    // if tree is a leaf
    if( tree.isPreTerminal() && tree.label().value().startsWith("VB") ) {
//      System.out.println("  if!!");
      // add the verb subtree
      verbs.add(tree);
    }
    // else scale the tree    
    else {
//      System.out.println("  else!!");
      List<Tree> children = tree.getChildrenAsList();
      for( Tree child : children ) {
        Vector<Tree> temp = verbTreesFromTree(child);
        verbs.addAll(temp);
      }
    }

    return verbs;
  }
 
开发者ID:nchambers,项目名称:probschemas,代码行数:27,代码来源:TreeOperator.java

示例10: compute

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
private double compute(int i, int j, List<Tree> nodes1, List<Tree> nodes2, double[][] mem) {
    if (mem[i][j] >= 0) {
        return mem[i][j];
    }
    //if (sameProduction(nodes1.get(i), nodes2.get(j))) {
    if (nodes1.get(i).value().equals(nodes2.get(j).value()) && 
            nodes1.get(i).hashCode() == nodes2.get(j).hashCode()) {     //similar hashCode -> same production
        
        mem[i][j] = lambda * lambda;
        if (!nodes1.get(i).isLeaf() && !nodes2.get(j).isLeaf()) {
            List<Tree> childList1 = nodes1.get(i).getChildrenAsList();
            List<Tree> childList2 = nodes2.get(j).getChildrenAsList();
            for (int k = 0; k < childList1.size(); k++) {
                //mem[i][j] *= 1 + compute(nodes1.indexOf(childList1.get(k)), nodes2.indexOf(childList2.get(k)), nodes1, nodes2, mem);
                mem[i][j] *= 1 + compute(indexOf(nodes1, childList1.get(k)), indexOf(nodes2, childList2.get(k)), nodes1, nodes2, mem);
            }
        }
    } else {
        mem[i][j] = 0.0;            
    }
            
    return mem[i][j];
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:24,代码来源:TreeKernel.java

示例11: getEventTimeBigram

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
 * Create one feature string, the bigram of the event word and the rightmost token in the timex phrase.
 * The bigram is ordered by text order.
 */
private Counter<String> getEventTimeBigram(TextEvent event, Timex timex, List<Tree> trees) {
  Counter<String> feats = new ClassicCounter<String>();
  List<String> tokens = TreeOperator.stringLeavesFromTree(trees.get(timex.sid()));
  String timeToken = tokens.get(timex.offset()-1);
  if( TimebankUtil.isDayOfWeek(timeToken) )
    timeToken = "DAYOFWEEK";
  
  if( event.sid() == timex.sid() && event.index() < timex.offset() )
    feats.incrementCount("bi-" + tokens.get(event.index()-1) + "_" + timeToken);
  else if( event.sid() == timex.sid() )
    feats.incrementCount("bi-" + timeToken + "_" + tokens.get(event.index()-1));

  // In different sentences.
  else {
    List<String> eventTokens = TreeOperator.stringLeavesFromTree(trees.get(event.sid()));
    if( event.sid() < timex.sid() )
      feats.incrementCount("bi-" + eventTokens.get(event.index()-1) + "_" + timeToken);
    else
      feats.incrementCount("bi-" + timeToken + "_" + eventTokens.get(event.index()-1));
  }
  
  return feats;
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:28,代码来源:TLinkFeaturizer.java

示例12: wordIndex

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
   * @return The WORD INDEX (starting at 0) where the stop tree begins
   */
  public static int wordIndex(Tree full, Tree stop) {
    if( full == null || full == stop ) return 0;

    int sum = 0;
//  if( full.isPreTerminal() ) {
    if( full.firstChild().isLeaf() ) {
      return 1;
    }
    else {
      for( Tree child : full.getChildrenAsList() ) {
        if( child == stop ) {
          //	  System.out.println("Stopping at " + child);
          return sum;
        }
        sum += wordIndex(child, stop);
        if( child.contains(stop) ) return sum;
      }
    }
    return sum;
  }
 
开发者ID:nchambers,项目名称:probschemas,代码行数:24,代码来源:TreeOperator.java

示例13: inorderTraverse

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
 * @return The CHARACTER OFFSET where the stop tree begins
 */
public static int inorderTraverse(Tree full, Tree stop) {
  if( full == null || full == stop ) return 0;

  int sum = 0;
  if( full.isPreTerminal() ) {
    String value = full.firstChild().value();
    //      System.out.println(value + " is " + value.length());
    return value.length() + 1; // +1 for space character
    //      return full.firstChild().value().length() + 1;
  }
  else {
    for( Tree child : full.getChildrenAsList() ) {
      if( child == stop ) {
        //	  System.out.println("Stopping at " + child);
        return sum;
      }
      sum += inorderTraverse(child, stop);
      if( child.contains(stop) ) return sum;
    }
  }
  return sum;
}
 
开发者ID:nchambers,项目名称:probschemas,代码行数:26,代码来源:TreeOperator.java

示例14: buildTokenLemma

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
   * Finds the token in the tree, and uses its POS Tag to lookup the lemma in WordNet.
   * Also attaches a particle if there is one for the token.
   */
  public static String buildTokenLemma(String token, int index, Tree tree, Map<Integer, String> particles, WordNet wordnet) {
    if( index == 0 )
      return null;
    
    Tree subtree = TreeOperator.indexToSubtree(tree, index);
    if( subtree == null ) {
      System.out.println("null subtree " + token + " index " + index + " tree=" + tree);
//      System.exit(-1);
      return null;
    }
    String posTag = subtree.label().value();
    String govLemma = wordnet.lemmatizeTaggedWord(token, posTag);
    if( CountVerbDeps.isNumber(token) )
      govLemma = CountVerbDeps.NUMBER_STRING;
    // Attach particle.
    if( particles != null && particles.size() > 0 ) {
      String particle = particles.get(index);
      if( particle != null )
        govLemma = govLemma + "_" + particle;
    }
    char normalPOS = CalculateIDF.normalizePOS(posTag);

    return CalculateIDF.createKey(govLemma, normalPOS);
  }
 
开发者ID:nchambers,项目名称:schemas,代码行数:29,代码来源:CountTokenPairs.java

示例15: getSingleEventTokenFeatures

import edu.stanford.nlp.trees.Tree; //导入依赖的package包/类
/**
 * Create token/lemma/synset features for an event.
 * @param eventIndex Either 1 or 2, the first or second event in your link. This differentiates the feature names.
 */
private Counter<String> getSingleEventTokenFeatures(int eventIndex, TextEvent event1, List<Tree> trees) {
  Counter<String> feats = new ClassicCounter<String>();
  
  String token = event1.string();
  String postag = TreeOperator.indexToPOSTag(trees.get(event1.sid()), event1.index());
  String lemma = _wordnet.lemmatizeTaggedWord(token, postag);

  // Token and Lemma
  feats.incrementCount("token" + eventIndex + "-" + token);
  feats.incrementCount("lemma" + eventIndex + "-" + lemma);
  
  // WordNet synset
  Synset[] synsets = null;
  if( postag.startsWith("VB") )
    synsets = _wordnet.synsetsOf(token, POS.VERB);
  else if( postag.startsWith("NN") )
    synsets = _wordnet.synsetsOf(token, POS.NOUN);
  if( synsets != null && synsets.length > 0 )
    feats.incrementCount("synset" + eventIndex + "-" + synsets[0].getOffset());
  
  return feats;
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:27,代码来源:TLinkFeaturizer.java


注:本文中的edu.stanford.nlp.trees.Tree类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。