当前位置: 首页>>代码示例>>Java>>正文


Java StanfordCoreNLP类代码示例

本文整理汇总了Java中edu.stanford.nlp.pipeline.StanfordCoreNLP的典型用法代码示例。如果您正苦于以下问题:Java StanfordCoreNLP类的具体用法?Java StanfordCoreNLP怎么用?Java StanfordCoreNLP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


StanfordCoreNLP类属于edu.stanford.nlp.pipeline包,在下文中一共展示了StanfordCoreNLP类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: extractSentences

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
/**
 * Split a document into array of sentences
 * 
 * @param text
 * @return
 * @throws Exception
 */
public static String[] extractSentences(String text) throws Exception {
	Properties props = new Properties();
	props.put("annotators", "tokenize, ssplit");
	StanfordCoreNLP pipeline = new StanfordCoreNLP();

	Annotation document = new Annotation(text);
	pipeline.annotate(document);

	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	String[] sentenceList = new String[sentences.size()];

	for (int i = 0; i < sentenceList.length; i++) {
		CoreMap sentence = sentences.get(i);
		sentenceList[i] = sentence.toString();
	}

	return sentenceList;
}
 
开发者ID:NLPReViz,项目名称:emr-nlp-server,代码行数:26,代码来源:TextUtil.java

示例2: getStanfordSentimentRate

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public int getStanfordSentimentRate(String sentimentText) {
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    //StanfordCoreNLP
    int totalRate = 0;
    String[] linesArr = sentimentText.split("\\.");
    for (int i = 0; i < linesArr.length; i++) {
        if (linesArr[i] != null) {
            Annotation annotation = pipeline.process(linesArr[i]);
            for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
                Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
                int score = RNNCoreAnnotations.getPredictedClass(tree);
                totalRate = totalRate + (score - 2);
            }
        }
    }
    return totalRate;
}
 
开发者ID:wso2-incubator,项目名称:twitter-sentiment-analysis,代码行数:20,代码来源:StanfordNLP.java

示例3: annotate

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
protected void annotate(StanfordCoreNLP pipeline, Annotation ann) {
	if (ann.get(CoreAnnotations.SentencesAnnotation.class) == null) {
		pipeline.annotate(ann);
	}
	else {
		if (ann.get(CoreAnnotations.SentencesAnnotation.class).size() == 1) {
			CoreMap sentence = ann.get(CoreAnnotations.SentencesAnnotation.class).get(0);

			for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
				token.remove(NaturalLogicAnnotations.OperatorAnnotation.class);
				token.remove(NaturalLogicAnnotations.PolarityAnnotation.class);
			}

			sentence.remove(NaturalLogicAnnotations.RelationTriplesAnnotation.class);
			sentence.remove(NaturalLogicAnnotations.EntailedSentencesAnnotation.class);
			sentence.remove(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
			sentence.remove(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
			sentence.remove(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class);

			pipeline.annotate(ann);
		}
	}
}
 
开发者ID:igr,项目名称:parlo,代码行数:24,代码来源:CoreNLPEngine.java

示例4: minimize

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
/** 
 * Given an input sentence, parser, mode and a dictionary, make extractions and then minimize them accordingly.
 * The parsing occurs INSIDE this function.
 * 
 * @param sentence - input sentence
 * @param parser - dependency parse pipeline for the sentence
 * @param mode - minimization mode
 * @param d - dictionary (for MinIE-D)
 */
public void minimize(String sentence, StanfordCoreNLP parser, Mode mode, Dictionary d) {
    // Run ClausIE first
    ClausIE clausie = new ClausIE();
    clausie.setSemanticGraph(CoreNLPUtils.parse(parser, sentence));
    clausie.detectClauses();
    clausie.generatePropositions(clausie.getSemanticGraph());
    
    // Start minimizing by annotating
    this.setSemanticGraph(clausie.getSemanticGraph());
    this.setPropositions(clausie);
    this.setPolarity();
    this.setModality();
    
    // Minimize according to the modes (COMPLETE mode doesn't minimize) 
    if (mode == Mode.SAFE)
        this.minimizeSafeMode();
    else if (mode == Mode.DICTIONARY)
        this.minimizeDictionaryMode(d.words());
    else if (mode == Mode.AGGRESSIVE)
        this.minimizeAggressiveMode();
    
    this.removeDuplicates();
}
 
开发者ID:gkiril,项目名称:minie,代码行数:33,代码来源:MinIE.java

示例5: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public static void main(String args[]) {
    // Dependency parsing pipeline initialization
    StanfordCoreNLP parser = CoreNLPUtils.StanfordDepNNParser();
    
    // Input sentence
    String sentence = "The Joker believes that the hero Batman was not actually born in foggy Gotham City.";
    
    // Generate the extractions (With SAFE mode)
    MinIE minie = new MinIE(sentence, parser, MinIE.Mode.SAFE);
    
    // Print the extractions
    System.out.println("\nInput sentence: " + sentence);
    System.out.println("=============================");
    System.out.println("Extractions:");
    for (AnnotatedProposition ap: minie.getPropositions()) {
        System.out.println("\tTriple: " + ap.getTripleAsString());
        System.out.print("\tFactuality: " + ap.getFactualityAsString());
        if (ap.getAttribution().getAttributionPhrase() != null) 
            System.out.print("\tAttribution: " + ap.getAttribution().toStringCompact());
        else
            System.out.print("\tAttribution: NONE");
        System.out.println("\n\t----------");
    }
    
    System.out.println("\n\nDONE!");
}
 
开发者ID:gkiril,项目名称:minie,代码行数:27,代码来源:Demo.java

示例6: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public static void main(String[] args) {

        // 载入自定义的Properties文件
        StanfordCoreNLP pipeline = new StanfordCoreNLP("CoreNLP-chinese.properties");

        // 用一些文本来初始化一个注释。文本是构造函数的参数。
        Annotation annotation;

        annotation = pipeline.process("我爱北京天安门");

        // 从注释中获取CoreMap List,并取第0个值
        List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
        CoreMap sentence = sentences.get(0);

        // 从CoreMap中取出CoreLabel List,逐一打印出来
        List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
        System.out.println("字/词");
        System.out.println("-----------------------------");
        for (CoreLabel token : tokens) {
            String word = token.getString(TextAnnotation.class);
//            String pos = token.getString(PartOfSpeechAnnotation.class);
//            String ner = token.getString(NamedEntityTagAnnotation.class);
            System.out.println(word);
        }

    }
 
开发者ID:huyang1,项目名称:LDA,代码行数:27,代码来源:CoreNLPSegment.java

示例7: ExtractPosTagsFile

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
@Override
public List<ExtractPosTag> ExtractPosTagsFile(File filePath) throws Exception {
       List<String> lstData=ExtractData(filePath);
       List<ExtractPosTag> lstTaggedSentences = new ArrayList<>();
       Properties props = new Properties();
       props.setProperty("annotators", "tokenize,ssplit,pos");
       StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
       for(String str:lstData)
       {
       Annotation annotation = new Annotation(str);
       pipeline.annotate(annotation);
       List<CoreMap> senten=annotation.get(CoreAnnotations.SentencesAnnotation.class);
       for(CoreMap map:senten)
       {
           map.get(TokensAnnotation.class).stream().forEach((tok) -> {
               String PosTagg=tok.get(PartOfSpeechAnnotation.class);
               lstTaggedSentences.add(new ExtractPosTag(tok.originalText(),PosTagg));
           });
       }
     } 
    return lstTaggedSentences;
}
 
开发者ID:unsw-cse-soc,项目名称:Data-curation-API,代码行数:23,代码来源:ExtractPosTagData.java

示例8: ExtractPosTags

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
@Override
public List<ExtractPosTag> ExtractPosTags(List<String> inputData) 
{
	List<ExtractPosTag> lstTaggedSentences = new ArrayList<>();		
       Properties props = new Properties();
       props.setProperty("annotators", "tokenize,ssplit,pos");
       StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
       for(String str:inputData)
       {
       Annotation annotation = new Annotation(str);
       pipeline.annotate(annotation);
       List<CoreMap> senten=annotation.get(CoreAnnotations.SentencesAnnotation.class);
       for(CoreMap map:senten)
       {
           map.get(TokensAnnotation.class).stream().forEach((tok) -> {
               String getPosTag=tok.get(PartOfSpeechAnnotation.class);
               lstTaggedSentences.add(new ExtractPosTag(tok.originalText(),getPosTag));
           });
       }
     } 
    return lstTaggedSentences;
}
 
开发者ID:unsw-cse-soc,项目名称:Data-curation-API,代码行数:23,代码来源:ExtractPosTagData.java

示例9: ExtractPosTagsSentence

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
@Override
public List<ExtractPosTag> ExtractPosTagsSentence(String sentence) 
{
       List<ExtractPosTag> lstTaggedSentences = new ArrayList<>();
       Properties props = new Properties();
       props.setProperty("annotators", "tokenize,ssplit,pos");
       StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

       Annotation annotation = new Annotation(sentence);
       pipeline.annotate(annotation);
       List<CoreMap> senten=annotation.get(CoreAnnotations.SentencesAnnotation.class);
       for(CoreMap map:senten)
       {
           map.get(TokensAnnotation.class).stream().forEach((tok) -> {
               String getPosTag=tok.get(PartOfSpeechAnnotation.class);
               lstTaggedSentences.add(new ExtractPosTag(tok.originalText(),getPosTag));
           });
       }        
       return lstTaggedSentences;
}
 
开发者ID:unsw-cse-soc,项目名称:Data-curation-API,代码行数:21,代码来源:ExtractPosTagData.java

示例10: PropertyVerbalizer

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public PropertyVerbalizer(IRIConverter uriConverter, Dictionary wordnetDictionary) {
	this.uriConverter = uriConverter;
	try {
		this.database = wordnetDictionary == null ? Dictionary.getDefaultResourceInstance() : wordnetDictionary;
	} catch (JWNLException e) {
		throw new RuntimeException("Failed to create WordNet instance.", e);
	}

	preposition = new Preposition();

	Properties props = new Properties();
	props.put("annotators", "tokenize, ssplit, pos, lemma, parse");
	props.put("ssplit.isOneSentence", "true");
	pipeline = new StanfordCoreNLPWrapper(new StanfordCoreNLP(props));
	// pipeline = new StanfordCoreNLPWrapper(new
	// StanfordCoreNLPClient(props, "titan.informatik.uni-leipzig.de",
	// 9000));
}
 
开发者ID:dice-group,项目名称:BENGAL,代码行数:19,代码来源:PropertyVerbalizer.java

示例11: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
/**
 * A debugging method to try relation extraction from the console.
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  Properties props = StringUtils.argsToProperties(args);
  props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,regexner,parse,mention,coref,kbp");
  props.setProperty("regexner.mapping", "ignorecase=true,validpospattern=^(NN|JJ).*,edu/stanford/nlp/models/kbp/regexner_caseless.tab;edu/stanford/nlp/models/kbp/regexner_cased.tab");

  StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
  IOUtils.console("sentence> ", line -> {
    Annotation ann = new Annotation(line);
    pipeline.annotate(ann);
    for (CoreMap sentence : ann.get(CoreAnnotations.SentencesAnnotation.class)) {
      sentence.get(CoreAnnotations.KBPTriplesAnnotation.class).forEach(System.err::println);
      System.out.println(sentence);
    }
  });
}
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:20,代码来源:KBPTest.java

示例12: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public static void main(String[] args){
    try{
      Properties props = StringUtils.argsToProperties(args);
//      props.setProperty("annotators", "tokenize,ssplit,lemma,pos,parse,ner");
      StanfordCoreNLP pipeline = new StanfordCoreNLP();
      String sentence = "John Gerspach was named Chief Financial Officer of Citi in July 2009.";
      Annotation doc = new Annotation(sentence);
      pipeline.annotate(doc);
      RelationExtractorAnnotator r = new RelationExtractorAnnotator(props);
      r.annotate(doc);

      for(CoreMap s: doc.get(CoreAnnotations.SentencesAnnotation.class)){
        System.out.println("For sentence " + s.get(CoreAnnotations.TextAnnotation.class));
        List<RelationMention> rls  = s.get(RelationMentionsAnnotation.class);
        for(RelationMention rl: rls){
          System.out.println(rl.toString());
        }
      }
    }catch(Exception e){
      e.printStackTrace();
    }
  }
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:23,代码来源:JavaReExTest.java

示例13: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
    Properties props = StringUtils.argsToProperties(args);
    props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,regexner,parse,mention,coref,kbp");
    props.setProperty("regexner.mapping", "ignorecase=true,validpospattern=^(NN|JJ).*,edu/stanford/nlp/models/kbp/regexner_caseless.tab;edu/stanford/nlp/models/kbp/regexner_cased.tab");
    Set<String> interested = Stream.of("per:title", "per:employee_of", "org:top_members/employees").collect(Collectors.toSet());
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    IOUtils.console("sentence> ", line -> {
        Annotation ann = new Annotation(line);
        pipeline.annotate(ann);
        for (CoreMap sentence : ann.get(CoreAnnotations.SentencesAnnotation.class)) {
            sentence.get(CoreAnnotations.KBPTriplesAnnotation.class).forEach(r -> {
                 String relation = r.relationGloss();
                if(interested.contains(relation)) {
                    System.err.println(r);
                }
            });
        }
    });
}
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:20,代码来源:InteractiveDriver.java

示例14: StanfordLemmetizer

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public StanfordLemmetizer() {
    // Create StanfordCoreNLP object properties, with POS tagging
    // (required for lemmatization), and lemmatization
    Properties props;
    props = new Properties();
    props.put("annotators", "tokenize, ssplit, pos, lemma");

    /*
     * This is a pipeline that takes in a string and returns various analyzed linguistic forms. 
     * The String is tokenized via a tokenizer (such as PTBTokenizerAnnotator), 
     * and then other sequence model style annotation can be used to add things like lemmas, 
     * POS tags, and named entities. These are returned as a list of CoreLabels. 
     * Other analysis components build and store parse trees, dependency graphs, etc. 
     * 
     * This class is designed to apply multiple Annotators to an Annotation. 
     * The idea is that you first build up the pipeline by adding Annotators, 
     * and then you take the objects you wish to annotate and pass them in and 
     * get in return a fully annotated object.
     * 
     *  StanfordCoreNLP loads a lot of models, so you probably
     *  only want to do this once per execution
     */
    this.pipeline = new StanfordCoreNLP(props);
}
 
开发者ID:tudarmstadt-lt,项目名称:sentiment,代码行数:25,代码来源:StanfordLemmetizer.java

示例15: main

import edu.stanford.nlp.pipeline.StanfordCoreNLP; //导入依赖的package包/类
public static void main(String[] s) {
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

    // read some text in the text variable
    String text = "\"But I do not want to go among mad people,\" Alice remarked.\n" +
            "\"Oh, you can not help that,\" said the Cat: \"we are all mad here. I am mad. You are mad.\"\n" +
            "\"How do you know I am mad?\" said Alice.\n" +
            "\"You must be,\" said the Cat, \"or you would not have come here.\" This is awful, bad, disgusting";

    // create an empty Annotation just with the given text
    Annotation document = new Annotation(text);

    // run all Annotators on this text
    pipeline.annotate(document);

    List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
    for (CoreMap sentence : sentences) {
        String sentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class);
        System.out.println(sentiment + "\t" + sentence);
    }
}
 
开发者ID:Vedenin,项目名称:java_in_examples,代码行数:24,代码来源:StanfordCoreNLPTest.java


注:本文中的edu.stanford.nlp.pipeline.StanfordCoreNLP类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。