本文整理匯總了Java中edu.stanford.nlp.pipeline.Annotation.get方法的典型用法代碼示例。如果您正苦於以下問題:Java Annotation.get方法的具體用法?Java Annotation.get怎麽用?Java Annotation.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.pipeline.Annotation
的用法示例。
在下文中一共展示了Annotation.get方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: extractSentences
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
/**
* Split a document into array of sentences
*
* @param text
* @return
* @throws Exception
*/
public static String[] extractSentences(String text) throws Exception {
Properties props = new Properties();
props.put("annotators", "tokenize, ssplit");
StanfordCoreNLP pipeline = new StanfordCoreNLP();
Annotation document = new Annotation(text);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
String[] sentenceList = new String[sentences.size()];
for (int i = 0; i < sentenceList.length; i++) {
CoreMap sentence = sentences.get(i);
sentenceList[i] = sentence.toString();
}
return sentenceList;
}
示例2: getStanfordSentimentRate
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public int getStanfordSentimentRate(String sentimentText) {
Properties props = new Properties();
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
//StanfordCoreNLP
int totalRate = 0;
String[] linesArr = sentimentText.split("\\.");
for (int i = 0; i < linesArr.length; i++) {
if (linesArr[i] != null) {
Annotation annotation = pipeline.process(linesArr[i]);
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
int score = RNNCoreAnnotations.getPredictedClass(tree);
totalRate = totalRate + (score - 2);
}
}
}
return totalRate;
}
示例3: traffer
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public static String traffer(String word) {
List<String> lemmas = new LinkedList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(word);
// run all Annotators on this text
stanfordCoreNLP.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the list of lemmas
lemmas.add(token.get(LemmaAnnotation.class));
}
}
if (lemmas.size() != 1) {
System.out.println("bug!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
return lemmas.get(0);
}
示例4: lemmatize
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public List<List<String>> lemmatize(String documentText)
{
List<List<String>> lemmas = new ArrayList<List<String>>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(documentText);
// run all Annotators on this text
this.parser.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for(CoreMap sentence: sentences) {
// Iterate over all tokens in a sentence
List<String> sentence_lemmas = new ArrayList<String>();
for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the
// list of lemmas
sentence_lemmas.add(token.get(LemmaAnnotation.class));
}
lemmas.add(sentence_lemmas);
}
return lemmas;
}
示例5: tagAndTokenize
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public Pair<List<String>, List<String>> tagAndTokenize(String documentText)
{
List<String> tags = new ArrayList<String>();
List<String> tokens = new ArrayList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(documentText);
// run all Annotators on this text
this.parser.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for(CoreMap sentence: sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the
// list of lemmas
tags.add(token.get(PartOfSpeechAnnotation.class));
tokens.add(token.word());
}
}
return new Pair<List<String>, List<String>>(tags, tokens);
}
示例6: tag
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public List<String> tag(String documentText)
{
List<String> tags = new ArrayList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(documentText);
// run all Annotators on this text
this.parser.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for(CoreMap sentence: sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the
// list of lemmas
tags.add(token.get(PartOfSpeechAnnotation.class));
}
}
return tags;
}
示例7: lemmatizer
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
/** @return Lemmatized string, input string can be a word, sentence or paragraph */
public static String lemmatizer(String string) {
List<String> lemmas = new ArrayList<>();
// create an empty Annotation just with the given text
Annotation annotation = new Annotation(string);
// run all Annotators on this string
pipeline.annotate(annotation);
// Iterate over all of the sentences found
List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the list of lemmas
lemmas.add(token.get(CoreAnnotations.LemmaAnnotation.class));
}
}
return String.join(" ", lemmas);
}
示例8: prepareSUTParser
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
/**
* Prepares the check for a temporal expression.
*
* @param cell
* Holds the column´s cell
* @param pipeline
* Used for temporal expressions.
* @param result
* Holds the intermediate result before executing this operation.
* @return Holds the intermediate result after executing this operation.
*/
private int prepareSUTParser(String cell, AnnotationPipeline pipeline,
int result) {
if ((!cell.trim().isEmpty()) && (!cell.trim().equals("-")
&& !cell.trim().equals("--") && !cell.trim().equals("---")
&& !cell.trim().equals("n/a") && !cell.trim().equals("N/A")
&& !cell.trim().equals("(n/a)")
&& !cell.trim().equals("Unknown")
&& !cell.trim().equals("unknown") && !cell.trim().equals("?")
&& !cell.trim().equals("??") && !cell.trim().equals(".")
&& !cell.trim().equals("null") && !cell.trim().equals("NULL")
&& !cell.trim().equals("Null"))) {
Annotation annotation = new Annotation(cell);
annotation.set(CoreAnnotations.DocDateAnnotation.class,
"2013-07-14");
pipeline.annotate(annotation);
List<CoreMap> timexAnnsAll = annotation
.get(TimeAnnotations.TimexAnnotations.class);
if (timexAnnsAll != null)
if (!timexAnnsAll.isEmpty())
result++;
}
return result;
}
示例9: extractNER
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public static List<String> extractNER(String doc){
Annotation document = new Annotation(doc);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
List<String> result = new ArrayList<String>();
for(CoreMap sentence: sentences) {
// traversing the words in the current sentence
// a CoreLabel is a CoreMap with additional token-specific methods
for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
// this is the text of the token
String word = token.get(CoreAnnotations.TextAnnotation.class);
// this is the POS tag of the token
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
// this is the NER label of the token
String ne = token.get(CoreAnnotations.NamedEntityTagAnnotation.class);
result.add(ne);
}
}
return result;
}
示例10: getGender
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
@Override
public Gender getGender(String name) {
Annotation document = new Annotation(name);
pipeline.annotate(document);
for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
String gender = token.get(MachineReadingAnnotations.GenderAnnotation.class);
// System.out.println(token + ":" + gender);
if (gender != null) {
if (gender.equals("MALE")) {
return Gender.MALE;
} else if (gender.equals("FEMALE")) {
return Gender.FEMALE;
}
}
}
}
return Gender.UNKNOWN;
}
示例11: getExampleTextFromSerGz
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
private static String getExampleTextFromSerGz(File f)
{
String result = "";
Annotation deserDoc = EntityLabeler.deserializeAnnotatedDoc(f.getAbsolutePath());
List<CoreMap> sentences = deserDoc.get(SentencesAnnotation.class);
for (int sentencenum = 0; sentencenum < sentences.size(); sentencenum++)
{
CoreMap sentence = sentences.get(sentencenum);
List<CoreLabel> labels = sentence.get(TokensAnnotation.class);
for (int i = 0; i < labels.size(); i++)
{
CoreLabel token = labels.get(i);
String tokenstring = token.get(TextAnnotation.class);
result += " " + tokenstring;
}
result = result.trim() + "\n";
}
return result;
}
示例12: simpleTokenization
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public LinkedList<String> simpleTokenization(String text) {
LinkedList<String> res = new LinkedList<>();
if (text != null) {
Annotation qaTokens = new Annotation(text);
pipelineTokens.annotate(qaTokens);
List<CoreMap> qssTokens = qaTokens.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentenceTokens : qssTokens) {
ArrayList<CoreLabel> tokens = (ArrayList<CoreLabel>) sentenceTokens.get(CoreAnnotations.TokensAnnotation.class);
for (CoreLabel t : tokens) {
String lemma = t.lemma();
String pos = t.tag();
if ((pos.startsWith("N") || pos.startsWith("V")) && !stopwords.contains(lemma)) {
res.add(lemma);
}
}
}
}
return res;
}
示例13: parsingTest
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
private static void parsingTest()
{
// String exampleText = "The software developer who inserted a major security flaw into OpenSSL 1.2.4.8, using the file foo/bar/blah.php has said the error was \"quite trivial\" despite the severity of its impact, according to a new report. The Sydney Morning Herald published an interview today with Robin Seggelmann, who added the flawed code to OpenSSL, the world's most popular library for implementing HTTPS encryption in websites, e-mail servers, and applications. The flaw can expose user passwords and potentially the private key used in a website's cryptographic certificate (whether private keys are at risk is still being determined). This is a new paragraph about Apache Tomcat's latest update 7.0.1.";
String exampleText = "Microsoft Windows 7 before SP1 has Sun Java cross-site scripting vulnerability Java SE in file.php (refer to CVE-2014-1234).";
// String exampleText = "Oracle DBRM has vulnerability in ABCD plug-in via abcd.1234 (found on abcd.com).";
EntityLabeler labeler = new EntityLabeler();
Annotation doc = labeler.getAnnotatedDoc("My Doc", exampleText);
List<CoreMap> sentences = doc.get(SentencesAnnotation.class);
for ( CoreMap sentence : sentences)
{
for ( CoreLabel token : sentence.get(TokensAnnotation.class))
{
System.out.println(token.get(TextAnnotation.class) + "\t" + token.get(CyberAnnotation.class));
}
System.out.println("Entities:\n" + sentence.get(CyberEntityMentionsAnnotation.class));
System.out.println("Parse Tree:\n" + sentence.get(TreeAnnotation.class));
}
}
示例14: annotate
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (CoreLabel token : tokens) {
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
if (pos != null) {
token.set(PikesAnnotations.SimplePosAnnotation.class, AnnotatorUtils.getSimplePos(pos));
}
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例15: main
import edu.stanford.nlp.pipeline.Annotation; //導入方法依賴的package包/類
public static void main(String[] s) {
Properties props = new Properties();
props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
// read some text in the text variable
String text = "\"But I do not want to go among mad people,\" Alice remarked.\n" +
"\"Oh, you can not help that,\" said the Cat: \"we are all mad here. I am mad. You are mad.\"\n" +
"\"How do you know I am mad?\" said Alice.\n" +
"\"You must be,\" said the Cat, \"or you would not have come here.\" This is awful, bad, disgusting";
// create an empty Annotation just with the given text
Annotation document = new Annotation(text);
// run all Annotators on this text
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
String sentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class);
System.out.println(sentiment + "\t" + sentence);
}
}