当前位置: 首页>>代码示例>>Java>>正文


Java TreeCoreAnnotations类代码示例

本文整理汇总了Java中edu.stanford.nlp.trees.TreeCoreAnnotations的典型用法代码示例。如果您正苦于以下问题:Java TreeCoreAnnotations类的具体用法?Java TreeCoreAnnotations怎么用?Java TreeCoreAnnotations使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TreeCoreAnnotations类属于edu.stanford.nlp.trees包,在下文中一共展示了TreeCoreAnnotations类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: extractNPorPRP

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
protected void extractNPorPRP(CoreMap s, List<Mention> mentions, Set<IntPair> mentionSpanSet, Set<IntPair> namedEntitySpanSet) {
  List<CoreLabel> sent = s.get(CoreAnnotations.TokensAnnotation.class);
  Tree tree = s.get(TreeCoreAnnotations.TreeAnnotation.class);
  tree.indexLeaves();
  SemanticGraph dependency = s.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class);

  final String mentionPattern = "/^(?:NP|PRP)/";
  TregexPattern tgrepPattern = TregexPattern.compile(mentionPattern);
  TregexMatcher matcher = tgrepPattern.matcher(tree);
  while (matcher.find()) {
    Tree t = matcher.getMatch();
    List<Tree> mLeaves = t.getLeaves();
    int beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
    int endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
    IntPair mSpan = new IntPair(beginIdx, endIdx);
    if(!mentionSpanSet.contains(mSpan) && !insideNE(mSpan, namedEntitySpanSet)) {
      int mentionID = assignIds? ++maxID:-1;
      Mention m = new Mention(mentionID, beginIdx, endIdx, dependency, new ArrayList<CoreLabel>(sent.subList(beginIdx, endIdx)), t);
      mentions.add(m);
      mentionSpanSet.add(mSpan);
    }
  }
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:24,代码来源:RuleBasedCorefMentionFinder.java

示例2: findHead

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
protected void findHead(CoreMap s, List<Mention> mentions) {
  Tree tree = s.get(TreeCoreAnnotations.TreeAnnotation.class);
  List<CoreLabel> sent = s.get(CoreAnnotations.TokensAnnotation.class);
  tree.indexSpans(0);
  for (Mention m : mentions){
    Tree head = findSyntacticHead(m, tree, sent);
    m.headIndex = ((CoreLabel) head.label()).get(CoreAnnotations.IndexAnnotation.class)-1;
    m.headWord = sent.get(m.headIndex);
    m.headString = m.headWord.get(CoreAnnotations.TextAnnotation.class).toLowerCase();
    int start = m.headIndex - m.startIndex;
    if (start < 0 || start >= m.originalSpan.size()) {
      SieveCoreferenceSystem.logger.warning("Invalid index for head " + start + "=" + m.headIndex + "-" + m.startIndex
              + ": originalSpan=[" + StringUtils.joinWords(m.originalSpan, " ") + "], head=" + m.headWord);
      SieveCoreferenceSystem.logger.warning("Setting head string to entire mention");
      m.headIndex = m.startIndex;
      m.headWord = m.originalSpan.get(0);
      m.headString = m.originalSpan.toString();
    }
  }
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:21,代码来源:RuleBasedCorefMentionFinder.java

示例3: recallErrors

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
private static void recallErrors(List<List<Mention>> goldMentions, List<List<Mention>> predictedMentions, Annotation doc) throws IOException {
  List<CoreMap> coreMaps = doc.get(CoreAnnotations.SentencesAnnotation.class);
  int numSentences = goldMentions.size();
  for (int i=0;i<numSentences;i++){
    CoreMap coreMap = coreMaps.get(i);
    List<CoreLabel> words = coreMap.get(CoreAnnotations.TokensAnnotation.class);
    Tree tree = coreMap.get(TreeCoreAnnotations.TreeAnnotation.class);
    List<Mention> goldMentionsSent = goldMentions.get(i);
    List<Pair<Integer,Integer>> goldMentionsSpans = extractSpans(goldMentionsSent);

    for (Pair<Integer,Integer> mentionSpan: goldMentionsSpans){
      logger.finer("RECALL ERROR\n");
      logger.finer(coreMap + "\n");
      for (int x=mentionSpan.first;x<mentionSpan.second;x++){
        logger.finer(words.get(x).value() + " ");
      }
      logger.finer("\n"+tree + "\n");
    }
  }
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:21,代码来源:CoNLLMentionExtractor.java

示例4: sentenceDeepMentionCopy

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
/**
 * Deep copy of the sentence: we create new entity/relation/event lists here.
 * However,  we do not deep copy the ExtractionObjects themselves!
 * @param sentence
 */
public static Annotation sentenceDeepMentionCopy(Annotation sentence) {
  Annotation newSent = new Annotation(sentence.get(CoreAnnotations.TextAnnotation.class));

  newSent.set(CoreAnnotations.TokensAnnotation.class, sentence.get(CoreAnnotations.TokensAnnotation.class));
  newSent.set(TreeCoreAnnotations.TreeAnnotation.class, sentence.get(TreeCoreAnnotations.TreeAnnotation.class));
  newSent.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, sentence.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class));
  newSent.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class));
  newSent.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class));
  newSent.set(CoreAnnotations.DocIDAnnotation.class, sentence.get(CoreAnnotations.DocIDAnnotation.class));

  // deep copy of all mentions lists
  List<EntityMention> ents = sentence.get(MachineReadingAnnotations.EntityMentionsAnnotation.class);
  if(ents != null) newSent.set(MachineReadingAnnotations.EntityMentionsAnnotation.class, new ArrayList<EntityMention>(ents));
  List<RelationMention> rels = sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class);
  if(rels != null) newSent.set(MachineReadingAnnotations.RelationMentionsAnnotation.class, new ArrayList<RelationMention>(rels));
  List<EventMention> evs = sentence.get(MachineReadingAnnotations.EventMentionsAnnotation.class);
  if(evs != null) newSent.set(MachineReadingAnnotations.EventMentionsAnnotation.class, new ArrayList<EventMention>(evs));

  return newSent;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:26,代码来源:AnnotationUtils.java

示例5: main

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
public static void main (String[] args) {
    String string = "I went into my bedroom and flipped the light switch. Oh, I see that the ceiling lamp is not turning on." +
            " It must be that the light bulb needs replacement. I go through my closet and find a new light bulb that will fit" +
            " this lamp and place it in my pocket. I also get my stepladder and place it under the lamp. I make sure the light" +
            " switch is in the off position. I climb up the ladder and unscrew the old light bulb. I place the old bulb in my " +
            "pocket and take out the new one. I then screw in the new bulb. I climb down the stepladder and place it back into " +
            "the closet. I then throw out the old bulb into the recycling bin. I go back to my bedroom and turn on the light switch." +
            " I am happy to see that there is again light in my room.";
    Properties prop = new Properties();
    prop.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
    //prop.setProperty("parse.model", "edu/stanford/nlp/models/parser/nndep/english_SD.gz");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(prop);
    Annotation annotation = new Annotation(string);
    pipeline.annotate(annotation); // add annotation to pipeline
    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
    List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
    for(CoreMap sentence: sentences) {
        for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
            // this is the text of the token
            String word = token.get(CoreAnnotations.TextAnnotation.class);
            // this is the POS tag of the token
            String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
            // this is the NER label of the token
            //String ne = token.get(CoreAnnotations.NamedEntityTagAnnotation.class);
            System.out.print(word + "/" + pos);
        }
        System.out.println("\n");
        // this is the parse tree of the current sentence
        Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
        System.out.println("parse tree:\n" + tree);

        SemanticGraph dependencies = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
        System.out.println("dependency graph:\n" + dependencies);
    }

}
 
开发者ID:IsaacChanghau,项目名称:Word2VecfJava,代码行数:37,代码来源:StanfordCoreNLPExample.java

示例6: getSentences

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
public ArrayList<Sentence> getSentences(ArrayList<String> textInput) {
	ArrayList<Sentence> sents = new ArrayList<Sentence>();
	Properties props = new Properties();
	props.put("annotators", "tokenize, ssplit, pos, parse");
	StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
	for(String s:textInput)
	{
		Annotation annotation = new Annotation(s);
		pipeline.annotate(annotation);
		List<CoreMap> sentences = annotation
				.get(CoreAnnotations.SentencesAnnotation.class);
		ArrayList<Tree> treeList = new ArrayList<Tree>();
		for (int i = 0; i < sentences.size(); i++) 
		{
			if (sentences != null && sentences.size() > 0) {
				ArrayCoreMap sentence = (ArrayCoreMap) sentences.get(i);
				Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
				ArrayList<Clause> c = getClauses(tree);
				ArrayList<Word> words = getWordList(tree);
				SemanticGraph sg = sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);
				ArrayList<GrammarRelation> relations = getRelationList(sg);
				sents.add(new Sentence(words, relations, c));
			}
		}
	}
	return sents;
	
}
 
开发者ID:nus-mmsys,项目名称:talk-to-code,代码行数:29,代码来源:StanfordParser.java

示例7: toIndexedWord

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
/**
 * Transform a DEPNode instance into a CoreLabel instance
 * 
 * @param unit
 * @return
 */
public static IndexedWord toIndexedWord(DEPNode unit) {
	IndexedWord new_unit = new IndexedWord();
	new_unit.setIndex(unit.id);
	new_unit.setValue(unit.form);
	new_unit.setWord(unit.form);
	new_unit.setTag(unit.pos);
	new_unit.setLemma(unit.lemma);
	new_unit.set(TreeCoreAnnotations.HeadTagAnnotation.class, new TreeGraphNode(new StringLabel(unit.pos)));
	return new_unit;
}
 
开发者ID:hakchul77,项目名称:irnlp_toolkit,代码行数:17,代码来源:ClearNlpWrapper.java

示例8: extractEnumerations

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
/** Extract enumerations (A, B, and C) */
protected void extractEnumerations(CoreMap s, List<Mention> mentions, Set<IntPair> mentionSpanSet, Set<IntPair> namedEntitySpanSet){
  List<CoreLabel> sent = s.get(CoreAnnotations.TokensAnnotation.class);
  Tree tree = s.get(TreeCoreAnnotations.TreeAnnotation.class);
  SemanticGraph dependency = s.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class);

  final String mentionPattern = "NP < (/^(?:NP|NNP|NML)/=m1 $.. (/^CC|,/ $.. /^(?:NP|NNP|NML)/=m2))";
  TregexPattern tgrepPattern = TregexPattern.compile(mentionPattern);
  TregexMatcher matcher = tgrepPattern.matcher(tree);
  Map<IntPair, Tree> spanToMentionSubTree = Generics.newHashMap();
  while (matcher.find()) {
    matcher.getMatch();
    Tree m1 = matcher.getNode("m1");
    Tree m2 = matcher.getNode("m2");

    List<Tree> mLeaves = m1.getLeaves();
    int beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
    int endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
    spanToMentionSubTree.put(new IntPair(beginIdx, endIdx), m1);

    mLeaves = m2.getLeaves();
    beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
    endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
    spanToMentionSubTree.put(new IntPair(beginIdx, endIdx), m2);
  }

  for(IntPair mSpan : spanToMentionSubTree.keySet()){
    if(!mentionSpanSet.contains(mSpan) && !insideNE(mSpan, namedEntitySpanSet)) {
      int mentionID = assignIds? ++maxID:-1;
      Mention m = new Mention(mentionID, mSpan.get(0), mSpan.get(1), dependency,
                              new ArrayList<CoreLabel>(sent.subList(mSpan.get(0), mSpan.get(1))), spanToMentionSubTree.get(mSpan));
      mentions.add(m);
      mentionSpanSet.add(mSpan);
    }
  }
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:37,代码来源:RuleBasedCorefMentionFinder.java

示例9: parse

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
private Tree parse(List<CoreLabel> tokens,
                   List<ParserConstraint> constraints) {
  CoreMap sent = new Annotation("");
  sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
  sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
  Annotation doc = new Annotation("");
  List<CoreMap> sents = new ArrayList<CoreMap>();
  sents.add(sent);
  doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
  getParser().annotate(doc);
  sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
  return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:14,代码来源:RuleBasedCorefMentionFinder.java

示例10: fillInParseAnnotations

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
/**
 * Thread safety note: nothing special is done to ensure the thread
 * safety of the GrammaticalStructureFactory.  However, both the
 * EnglishGrammaticalStructureFactory and the
 * ChineseGrammaticalStructureFactory are thread safe.
 */
public static void fillInParseAnnotations(boolean verbose, boolean buildGraphs, GrammaticalStructureFactory gsf, CoreMap sentence, Tree tree) {
  // make sure all tree nodes are CoreLabels
  // TODO: why isn't this always true? something fishy is going on
  ParserAnnotatorUtils.convertToCoreLabels(tree);

  // index nodes, i.e., add start and end token positions to all nodes
  // this is needed by other annotators down stream, e.g., the NFLAnnotator
  tree.indexSpans(0);

  sentence.set(TreeCoreAnnotations.TreeAnnotation.class, tree);
  if (verbose) {
    System.err.println("Tree is:");
    tree.pennPrint(System.err);
  }

  if (buildGraphs) {
    String docID = sentence.get(CoreAnnotations.DocIDAnnotation.class);
    if (docID == null) {
      docID = "";
    }

    Integer sentenceIndex = sentence.get(CoreAnnotations.SentenceIndexAnnotation.class);
    int index = (sentenceIndex == null) ? 0 : sentenceIndex;

    // generate the dependency graph
    SemanticGraph deps = SemanticGraphFactory.generateCollapsedDependencies(gsf.newGrammaticalStructure(tree), docID, index);
    SemanticGraph uncollapsedDeps = SemanticGraphFactory.generateUncollapsedDependencies(gsf.newGrammaticalStructure(tree), docID, index);
    SemanticGraph ccDeps = SemanticGraphFactory.generateCCProcessedDependencies(gsf.newGrammaticalStructure(tree), docID, index);
    if (verbose) {
      System.err.println("SDs:");
      System.err.println(deps.toString("plain"));
    }
    sentence.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, deps);
    sentence.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, uncollapsedDeps);
    sentence.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, ccDeps);
  }

  setMissingTags(sentence, tree);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:46,代码来源:ParserAnnotatorUtils.java

示例11: main

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
public static void main(String[] args) throws IOException, ClassNotFoundException {
    Timing tim = new Timing();
    AnnotationPipeline ap = new AnnotationPipeline();
    boolean verbose = false;
    ap.addAnnotator(new PTBTokenizerAnnotator(verbose));
    ap.addAnnotator(new WordsToSentencesAnnotator(verbose));
    // ap.addAnnotator(new NERCombinerAnnotator(verbose));
    // ap.addAnnotator(new OldNERAnnotator(verbose));
    // ap.addAnnotator(new NERMergingAnnotator(verbose));
    ap.addAnnotator(new ParserAnnotator(verbose, -1));
/**
    ap.addAnnotator(new UpdateSentenceFromParseAnnotator(verbose));
    ap.addAnnotator(new NumberAnnotator(verbose));
    ap.addAnnotator(new QuantifiableEntityNormalizingAnnotator(verbose));
    ap.addAnnotator(new StemmerAnnotator(verbose));
    ap.addAnnotator(new MorphaAnnotator(verbose));
**/
//    ap.addAnnotator(new SRLAnnotator());

    String text = ("USAir said in the filings that Mr. Icahn first contacted Mr. Colodny last September to discuss the benefits of combining TWA and USAir -- either by TWA's acquisition of USAir, or USAir's acquisition of TWA.");
    Annotation a = new Annotation(text);
    ap.annotate(a);
    System.out.println(a.get(CoreAnnotations.TokensAnnotation.class));
    for (CoreMap sentence : a.get(CoreAnnotations.SentencesAnnotation.class)) {
      System.out.println(sentence.get(TreeCoreAnnotations.TreeAnnotation.class));
    }

    if (TIME) {
      System.out.println(ap.timingInformation());
      System.err.println("Total time for AnnotationPipeline: " +
                         tim.toSecondsString() + " sec.");
    }
  }
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:34,代码来源:AnnotationPipeline.java

示例12: parse

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
protected Tree parse(List<CoreLabel> tokens,
                     List<ParserConstraint> constraints) {
  CoreMap sent = new Annotation("");
  sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
  sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
  Annotation doc = new Annotation("");
  List<CoreMap> sents = new ArrayList<CoreMap>();
  sents.add(sent);
  doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
  getParser().annotate(doc);
  sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
  return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:14,代码来源:GenericDataSetReader.java

示例13: getTree

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
public static Tree getTree(String sentence) {
    return getOneSentence(sentence).get(TreeCoreAnnotations.TreeAnnotation.class);
}
 
开发者ID:xsank,项目名称:Shour,代码行数:4,代码来源:CoreNLP.java

示例14: addParseTree

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
static void addParseTree(Map<String,Object> sent_info, CoreMap sentence) {
	sent_info.put("parse", sentence.get(TreeCoreAnnotations.TreeAnnotation.class).toString());
}
 
开发者ID:UKPLab,项目名称:tac2015-event-detection,代码行数:4,代码来源:JsonPipeline.java

示例15: main

import edu.stanford.nlp.trees.TreeCoreAnnotations; //导入依赖的package包/类
public static void main(String[] args) {
    Properties props = new Properties();
    //props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
    props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref, sentiment");
    
    /*
    boolean caseless = true;
    if (caseless) {
            props.put("","");
            props.put("pos.model","edu/stanford/nlp/models/pos-tagger/english-caseless-left3words-distsim.tagger");
            props.put("parse.model","edu/stanford/nlp/models/lexparser/englishPCFG.caseless.ser.gz ");
            props.put("ner.model","edu/stanford/nlp/models/ner/english.all.3class.caseless.distsim.crf.ser.gz edu/stanford/nlp/models/ner/english.muc.7class.caseless.distsim.crf.ser.gz edu/stanford/nlp/models/ner/english.conll.4class.caseless.distsim.crf.ser.gz ");
    }
            */

   
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);


    Annotation annotation;
    if (args.length > 0) {
        annotation = new Annotation(IOUtils.slurpFileNoExceptions(args[0]));
    } else {
        annotation = new Annotation("This is good.  I am parsing natural language now and can help people.");
    }

    pipeline.annotate(annotation);
    
    /*pipeline.prettyPrint(annotation, out);
    if (xmlOut != null) {
        pipeline.xmlPrint(annotation, xmlOut);
    }

    out.println(annotation.toShorterString());*/
    
    
    List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
    if (sentences == null) return;
    
    for (CoreMap sentence : sentences) {
  // traversing the words in the current sentence
        // a CoreLabel is a CoreMap with additional token-specific methods
        for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
            // this is the text of the token
            String word = token.get(TextAnnotation.class);
            // this is the POS tag of the token
            String pos = token.get(PartOfSpeechAnnotation.class);
            // this is the NER label of the token
            String ne = token.get(NamedEntityTagAnnotation.class);
            System.out.println(word + " " + pos + " " + ne + " " + token);
        }

        System.out.println("sentiment: " + sentence.get(SentimentCoreAnnotations.AnnotatedTree.class));
        System.out.println("sentiment: " + sentence.get(SentimentCoreAnnotations.ClassName.class));
        
        Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
        tree.pennPrint(out);
        System.out.println(sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class).toString("plain"));

        SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);
        System.out.println(graph.toString("plain"));

    }

    
}
 
开发者ID:automenta,项目名称:netentionj-desktop,代码行数:67,代码来源:CoreNLPDemo.java


注:本文中的edu.stanford.nlp.trees.TreeCoreAnnotations类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。