当前位置: 首页>>代码示例>>Java>>正文


Java SentencesAnnotation类代码示例

本文整理汇总了Java中edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation的典型用法代码示例。如果您正苦于以下问题:Java SentencesAnnotation类的具体用法?Java SentencesAnnotation怎么用?Java SentencesAnnotation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SentencesAnnotation类属于edu.stanford.nlp.ling.CoreAnnotations包,在下文中一共展示了SentencesAnnotation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: extractSentences

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
/**
 * Split a document into array of sentences
 * 
 * @param text
 * @return
 * @throws Exception
 */
public static String[] extractSentences(String text) throws Exception {
	Properties props = new Properties();
	props.put("annotators", "tokenize, ssplit");
	StanfordCoreNLP pipeline = new StanfordCoreNLP();

	Annotation document = new Annotation(text);
	pipeline.annotate(document);

	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	String[] sentenceList = new String[sentences.size()];

	for (int i = 0; i < sentenceList.length; i++) {
		CoreMap sentence = sentences.get(i);
		sentenceList[i] = sentence.toString();
	}

	return sentenceList;
}
 
开发者ID:NLPReViz,项目名称:emr-nlp-server,代码行数:26,代码来源:TextUtil.java

示例2: lemmatize

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public List<List<String>> lemmatize(String documentText)
{
	List<List<String>> lemmas = new ArrayList<List<String>>();

	// create an empty Annotation just with the given text
	Annotation document = new Annotation(documentText);

	// run all Annotators on this text
	this.parser.annotate(document);

	// Iterate over all of the sentences found
	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	for(CoreMap sentence: sentences) {
		// Iterate over all tokens in a sentence
		List<String> sentence_lemmas = new ArrayList<String>();
		for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
			// Retrieve and add the lemma for each word into the
			// list of lemmas
			sentence_lemmas.add(token.get(LemmaAnnotation.class));
		}
		lemmas.add(sentence_lemmas);
	}

	return lemmas;
}
 
开发者ID:uwnlp,项目名称:recipe-interpretation,代码行数:26,代码来源:Lemmatizer.java

示例3: tagAndTokenize

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public Pair<List<String>, List<String>> tagAndTokenize(String documentText)
{
	List<String> tags = new ArrayList<String>();
	List<String> tokens = new ArrayList<String>();

	// create an empty Annotation just with the given text
	Annotation document = new Annotation(documentText);

	// run all Annotators on this text
	this.parser.annotate(document);

	// Iterate over all of the sentences found
	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	for(CoreMap sentence: sentences) {
		// Iterate over all tokens in a sentence
		for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
			// Retrieve and add the lemma for each word into the
			// list of lemmas
			tags.add(token.get(PartOfSpeechAnnotation.class));
			tokens.add(token.word());
		}
	}

	return new Pair<List<String>, List<String>>(tags, tokens);
}
 
开发者ID:uwnlp,项目名称:recipe-interpretation,代码行数:26,代码来源:Lemmatizer.java

示例4: tag

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public List<String> tag(String documentText)
{
	List<String> tags = new ArrayList<String>();

	// create an empty Annotation just with the given text
	Annotation document = new Annotation(documentText);

	// run all Annotators on this text
	this.parser.annotate(document);

	// Iterate over all of the sentences found
	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	for(CoreMap sentence: sentences) {
		// Iterate over all tokens in a sentence
		for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
			// Retrieve and add the lemma for each word into the
			// list of lemmas
			tags.add(token.get(PartOfSpeechAnnotation.class));
		}
	}

	return tags;
}
 
开发者ID:uwnlp,项目名称:recipe-interpretation,代码行数:24,代码来源:Lemmatizer.java

示例5: traffer

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public static String traffer(String word) {
    List<String> lemmas = new LinkedList<String>();
    // create an empty Annotation just with the given text
    Annotation document = new Annotation(word);
    // run all Annotators on this text
    stanfordCoreNLP.annotate(document);
    // Iterate over all of the sentences found
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    for (CoreMap sentence : sentences) {
        // Iterate over all tokens in a sentence
        for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
            // Retrieve and add the lemma for each word into the list of lemmas
            lemmas.add(token.get(LemmaAnnotation.class));
        }
    }
    if (lemmas.size() != 1) {
        System.out.println("bug!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
    }
    return lemmas.get(0);
}
 
开发者ID:guozhaotong,项目名称:FacetExtract,代码行数:21,代码来源:StanfordLemmatizer.java

示例6: lemmatize

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
/**
 * Takes a string and returns a list of lemmas.
 * @param documentText
 * @return
 */
public List<String> lemmatize(String documentText)
{
    List<String> lemmas = new LinkedList<String>();

    // create an empty Annotation just with the given text
    Annotation document = new Annotation(documentText);

    // run all Annotators on this text
    this.pipeline.annotate(document);

    // Iterate over all of the sentences found
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    for(CoreMap sentence: sentences) {
        // Iterate over all tokens in a sentence
        for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
            // Retrieve and add the lemma for each word into the list of lemmas
            lemmas.add(token.get(LemmaAnnotation.class));
        }
    }

    return lemmas;
}
 
开发者ID:JULIELab,项目名称:JEmAS,代码行数:28,代码来源:StanfordLemmatizer.java

示例7: annotationToSentenceList

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
private static List<Sentence> annotationToSentenceList(Annotation anno, int cOffset, final AnalyticUUIDGenerator gen) {
  List<Sentence> slist = new ArrayList<>();
  anno.get(SentencesAnnotation.class).stream()
    .map(cm -> {
      // LOGGER.info("Got Sentence offset: {}", cm.toString());
      try {
        return new CoreMapWrapper(cm, gen).toSentence(cOffset);
      } catch (AnalyticException e) {
        throw new RuntimeException(e);
      }
    })
  .sequential()
  .forEach(st -> slist.add(st));

  return slist;
}
 
开发者ID:hltcoe,项目名称:concrete-stanford-deprecated2,代码行数:17,代码来源:ConcreteStanfordTokensSentenceAnalytic.java

示例8: getObservedMWEs

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
/**
 * Create the observed MWEs in the text
 * @param props the properties
 * @param text the text
 * @return the observed MWEs in the text
 */
protected List<String> getObservedMWEs(Properties props, String text) {
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    // create an Annotation with the text
    Annotation document = new Annotation(text);
    
    // run the Annotators on the text
    pipeline.annotate(document);
    
    // construct observed
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    List<String> mwe_observed = new ArrayList<>();
    for(CoreMap sentence: sentences) {
      for (IMWE<IToken> mwe: sentence.get(JMWEAnnotation.class)) {
          mwe_observed.add(mwe.getForm());
        }
    }
    return mwe_observed;
}
 
开发者ID:toliwa,项目名称:CoreNLP-jMWE,代码行数:25,代码来源:AbstractJMWEAnnotatorTst.java

示例9: parsingTest

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
private static void parsingTest()
{
	//	String exampleText = "The software developer who inserted a major security flaw into OpenSSL 1.2.4.8, using the file foo/bar/blah.php has said the error was \"quite trivial\" despite the severity of its impact, according to a new report.  The Sydney Morning Herald published an interview today with Robin Seggelmann, who added the flawed code to OpenSSL, the world's most popular library for implementing HTTPS encryption in websites, e-mail servers, and applications. The flaw can expose user passwords and potentially the private key used in a website's cryptographic certificate (whether private keys are at risk is still being determined). This is a new paragraph about Apache Tomcat's latest update 7.0.1.";
	String exampleText = "Microsoft Windows 7 before SP1 has Sun Java cross-site scripting vulnerability Java SE in file.php (refer to CVE-2014-1234).";
	//	String exampleText = "Oracle DBRM has vulnerability in ABCD plug-in via abcd.1234 (found on abcd.com).";
	EntityLabeler labeler = new EntityLabeler();
	Annotation doc = labeler.getAnnotatedDoc("My Doc", exampleText);
	
	List<CoreMap> sentences = doc.get(SentencesAnnotation.class);

	for ( CoreMap sentence : sentences) 
	{
		for ( CoreLabel token : sentence.get(TokensAnnotation.class)) 
		{
			System.out.println(token.get(TextAnnotation.class) + "\t" + token.get(CyberAnnotation.class));
		}
		
		System.out.println("Entities:\n" + sentence.get(CyberEntityMentionsAnnotation.class));
	
		System.out.println("Parse Tree:\n" + sentence.get(TreeAnnotation.class));		
	}
}
 
开发者ID:stucco,项目名称:relation-bootstrap,代码行数:23,代码来源:Test.java

示例10: getExampleTextFromSerGz

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
private static String getExampleTextFromSerGz(File f)
{
	String result = "";
	
		Annotation deserDoc = EntityLabeler.deserializeAnnotatedDoc(f.getAbsolutePath());
		List<CoreMap> sentences = deserDoc.get(SentencesAnnotation.class);
		for (int sentencenum = 0; sentencenum < sentences.size(); sentencenum++) 
		{
			CoreMap sentence = sentences.get(sentencenum);
			
			List<CoreLabel> labels = sentence.get(TokensAnnotation.class);
			
		 	for (int i = 0; i < labels.size(); i++) 
		 	{
		 		CoreLabel token = labels.get(i);
		 		String tokenstring = token.get(TextAnnotation.class);
		 		result += " " + tokenstring;
		 	}
		 	result = result.trim() + "\n";
		}
		
	return result;
}
 
开发者ID:stucco,项目名称:relation-bootstrap,代码行数:24,代码来源:Test.java

示例11: lemmatize

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public static List<String> lemmatize(String documentText){
	List<String> lemmas = new LinkedList<String>();

       // create an empty Annotation just with the given text
       Annotation document = new Annotation(documentText);

       // run all Annotators on this text
       pipeline.annotate(document);

       // Iterate over all of the sentences found
       List<CoreMap> sentences = document.get(SentencesAnnotation.class);
       for(CoreMap sentence: sentences) {
           // Iterate over all tokens in a sentence
           for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
               // Retrieve and add the lemma for each word into the list of lemmas
               lemmas.add(token.get(LemmaAnnotation.class));
           }
       }
	return lemmas;
}
 
开发者ID:NEO-IE,项目名称:numbertron,代码行数:21,代码来源:LemmaUtils.java

示例12: lemmatize

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public List<String> lemmatize(String documentText) {
    List<String> lemmas = new LinkedList<String>();
    // Create an empty Annotation just with the given text
    Annotation document = new Annotation(documentText);
    // run all Annotators on this text
    this.pipeline.annotate(document);
    // Iterate over all of the sentences found
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    for (CoreMap sentence : sentences) {
        // Iterate over all tokens in a sentence
        for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
            // Retrieve and add the lemma for each word into the
            // list of lemmas
            lemmas.add(token.get(LemmaAnnotation.class));
        }
    }
    return lemmas;
}
 
开发者ID:tudarmstadt-lt,项目名称:sentiment,代码行数:19,代码来源:StanfordLemmetizer.java

示例13: main

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public static void main(String[] args) {
	// String parse = "\nasfd\n\ndaf";
	// String[] lines = parse.split("\n");
	// System.out.println(lines.length);
	// System.exit(0);
	X.prop.put("tokenized", "true");
	X.prop.put("singleSentences", "true");
	initPipeline(true, true);
	Annotation ann = new Annotation(
			"BOSTON 69 65 .515 5 1/2\n1. Michelle Freeman ( Jamaica ) 12.71 seconds");
	pipeline.annotate(ann);
	for (CoreMap sent : ann.get(SentencesAnnotation.class)) {
		for (CoreLabel token : sent.get(TokensAnnotation.class)) {
			logger.info(token.get(TextAnnotation.class));
		}
		logger.info("sentence done");
	}

}
 
开发者ID:zhangcongle,项目名称:NewsSpikeRe,代码行数:20,代码来源:Preprocessing.java

示例14: parse

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public static void parse(FigerSystem sys, int lineId, String text) {
	Annotation annotation = new Annotation(text);
	Preprocessing.pipeline.annotate(annotation);
	// for each sentence
	int sentId = 0;
	for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
		// System.out.println("[l" + i + "][s"
		// + sentId + "]tokenized sentence="
		// + StringUtils.joinWithOriginalWhiteSpace(sentence
		// .get(TokensAnnotation.class)));
		List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
		for (Pair<Integer, Integer> offset : entityMentionOffsets) {
			String label = sys.predict(annotation, sentId,
					offset.first, offset.second);
			String mention = StringUtils.joinWithOriginalWhiteSpace(sentence.get(
					TokensAnnotation.class).subList(offset.first, offset.second));
			System.out.println("[l" + lineId + "][s" + sentId + "]mention"
					+ mention + "(" + offset.first + ","
					+ offset.second + ") = " + mention + ", pred = "
					+ label);
		}
		sentId++;
	}
}
 
开发者ID:zhangcongle,项目名称:NewsSpikeRe,代码行数:25,代码来源:FigerSystem.java

示例15: parse

import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation; //导入依赖的package包/类
public static void parse(ParseStanfordFigerReverb sys, int lineId, String text) {
	Annotation annotation = new Annotation(text);
	Preprocessing.pipeline.annotate(annotation);
	// for each sentence
	int sentId = 0;
	for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
		// System.out.println("[l" + i + "][s"
		// + sentId + "]tokenized sentence="
		// + StringUtils.joinWithOriginalWhiteSpace(sentence
		// .get(TokensAnnotation.class)));
		List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
		for (Pair<Integer, Integer> offset : entityMentionOffsets) {
			String label = sys.predict(annotation, sentId, offset.first, offset.second);
			String mention = StringUtils.joinWithOriginalWhiteSpace(
					sentence.get(TokensAnnotation.class).subList(offset.first, offset.second));
			System.out.println("[l" + lineId + "][s" + sentId + "]mention" + mention + "(" + offset.first + ","
					+ offset.second + ") = " + mention + ", pred = " + label);
		}
		sentId++;
	}
}
 
开发者ID:zhangcongle,项目名称:NewsSpikeRe,代码行数:22,代码来源:ParseStanfordFigerReverb.java


注:本文中的edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。