本文整理汇总了Java中edu.stanford.nlp.util.CoreMap.get方法的典型用法代码示例。如果您正苦于以下问题:Java CoreMap.get方法的具体用法?Java CoreMap.get怎么用?Java CoreMap.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.CoreMap
的用法示例。
在下文中一共展示了CoreMap.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getStanfordSentimentRate
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public int getStanfordSentimentRate(String sentimentText) {
Properties props = new Properties();
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
//StanfordCoreNLP
int totalRate = 0;
String[] linesArr = sentimentText.split("\\.");
for (int i = 0; i < linesArr.length; i++) {
if (linesArr[i] != null) {
Annotation annotation = pipeline.process(linesArr[i]);
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
int score = RNNCoreAnnotations.getPredictedClass(tree);
totalRate = totalRate + (score - 2);
}
}
}
return totalRate;
}
示例2: annotate
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
protected void annotate(StanfordCoreNLP pipeline, Annotation ann) {
if (ann.get(CoreAnnotations.SentencesAnnotation.class) == null) {
pipeline.annotate(ann);
}
else {
if (ann.get(CoreAnnotations.SentencesAnnotation.class).size() == 1) {
CoreMap sentence = ann.get(CoreAnnotations.SentencesAnnotation.class).get(0);
for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
token.remove(NaturalLogicAnnotations.OperatorAnnotation.class);
token.remove(NaturalLogicAnnotations.PolarityAnnotation.class);
}
sentence.remove(NaturalLogicAnnotations.RelationTriplesAnnotation.class);
sentence.remove(NaturalLogicAnnotations.EntailedSentencesAnnotation.class);
sentence.remove(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
sentence.remove(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
sentence.remove(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class);
pipeline.annotate(ann);
}
}
}
示例3: traffer
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static String traffer(String word) {
List<String> lemmas = new LinkedList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(word);
// run all Annotators on this text
stanfordCoreNLP.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the list of lemmas
lemmas.add(token.get(LemmaAnnotation.class));
}
}
if (lemmas.size() != 1) {
System.out.println("bug!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
return lemmas.get(0);
}
示例4: lemmatizer
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
/** @return Lemmatized string, input string can be a word, sentence or paragraph */
public static String lemmatizer(String string) {
List<String> lemmas = new ArrayList<>();
// create an empty Annotation just with the given text
Annotation annotation = new Annotation(string);
// run all Annotators on this string
pipeline.annotate(annotation);
// Iterate over all of the sentences found
List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the list of lemmas
lemmas.add(token.get(CoreAnnotations.LemmaAnnotation.class));
}
}
return String.join(" ", lemmas);
}
示例5: tagAndTokenize
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public Pair<List<String>, List<String>> tagAndTokenize(String documentText)
{
List<String> tags = new ArrayList<String>();
List<String> tokens = new ArrayList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(documentText);
// run all Annotators on this text
this.parser.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for(CoreMap sentence: sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the
// list of lemmas
tags.add(token.get(PartOfSpeechAnnotation.class));
tokens.add(token.word());
}
}
return new Pair<List<String>, List<String>>(tags, tokens);
}
示例6: tag
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public List<String> tag(String documentText)
{
List<String> tags = new ArrayList<String>();
// create an empty Annotation just with the given text
Annotation document = new Annotation(documentText);
// run all Annotators on this text
this.parser.annotate(document);
// Iterate over all of the sentences found
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
for(CoreMap sentence: sentences) {
// Iterate over all tokens in a sentence
for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// Retrieve and add the lemma for each word into the
// list of lemmas
tags.add(token.get(PartOfSpeechAnnotation.class));
}
}
return tags;
}
示例7: findSentiment
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static int findSentiment(String tweet) {
int mainSentiment = 0;
if (tweet != null && tweet.length() > 0) {
int longest = 0;
Annotation annotation = pipeline.process(tweet);
for (CoreMap sentence : annotation
.get(CoreAnnotations.SentencesAnnotation.class)) {
Tree tree = sentence
.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
String partText = sentence.toString();
if (partText.length() > longest) {
mainSentiment = sentiment;
longest = partText.length();
}
}
}
return mainSentiment;
}
示例8: extractNER
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static List<String> extractNER(String doc){
Annotation document = new Annotation(doc);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
List<String> result = new ArrayList<String>();
for(CoreMap sentence: sentences) {
// traversing the words in the current sentence
// a CoreLabel is a CoreMap with additional token-specific methods
for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
// this is the text of the token
String word = token.get(CoreAnnotations.TextAnnotation.class);
// this is the NER label of the token
String ne = token.get(CoreAnnotations.NamedEntityTagAnnotation.class);
result.add(ne);
System.out.println(word + "\t" + ne);
}
}
return result;
}
示例9: getObservedMWEs
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
/**
* Create the observed MWEs in the text
* @param props the properties
* @param text the text
* @return the observed MWEs in the text
*/
protected List<String> getObservedMWEs(Properties props, String text) {
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
// create an Annotation with the text
Annotation document = new Annotation(text);
// run the Annotators on the text
pipeline.annotate(document);
// construct observed
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
List<String> mwe_observed = new ArrayList<>();
for(CoreMap sentence: sentences) {
for (IMWE<IToken> mwe: sentence.get(JMWEAnnotation.class)) {
mwe_observed.add(mwe.getForm());
}
}
return mwe_observed;
}
示例10: extract
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static HashMap<RelationTriple, String> extract(String doc) {
Annotation ann = new Annotation(doc);
pipeline.annotate(ann);
HashMap<RelationTriple, String> relations = new HashMap<RelationTriple, String>();
for (CoreMap sentence : ann.get(CoreAnnotations.SentencesAnnotation.class)) {
for(RelationTriple r : sentence.get(CoreAnnotations.KBPTriplesAnnotation.class)){
if(r.relationGloss().trim().equals("per:title")
|| r.relationGloss().trim().equals("per:employee_of")
|| r.relationGloss().trim().equals("org:top_members/employees")){
relations.put(r, sentence.toString());
}
}
}
return relations;
}
示例11: getGender
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
@Override
public Gender getGender(String name) {
Annotation document = new Annotation(name);
pipeline.annotate(document);
for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
String gender = token.get(MachineReadingAnnotations.GenderAnnotation.class);
// System.out.println(token + ":" + gender);
if (gender != null) {
if (gender.equals("MALE")) {
return Gender.MALE;
} else if (gender.equals("FEMALE")) {
return Gender.FEMALE;
}
}
}
}
return Gender.UNKNOWN;
}
示例12: extract
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static HashMap<RelationTriple, String> extract(String doc) {
Annotation ann = new Annotation(doc
.replaceAll("\u00a0", " ")
.replaceAll("\u200B|\u200C|\u200D|\uFEFF", ""));
pipeline.annotate(ann);
HashMap<RelationTriple, String> relations = new HashMap<RelationTriple, String>();
for (CoreMap sentence : ann.get(CoreAnnotations.SentencesAnnotation.class)) {
for (RelationTriple r : sentence.get(CoreAnnotations.KBPTriplesAnnotation.class)) {
if (r.relationGloss().trim().equals("per:title")
|| r.relationGloss().trim().equals("per:employee_of")
|| r.relationGloss().trim().equals("org:top_members/employees")
|| r.relationGloss().trim().equals("per:former_title")) {
relations.put(r, sentence.toString());
}
}
}
return relations;
}
示例13: simpleTokenization
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public LinkedList<String> simpleTokenization(String text) {
LinkedList<String> res = new LinkedList<>();
if (text != null) {
Annotation qaTokens = new Annotation(text);
pipelineTokens.annotate(qaTokens);
List<CoreMap> qssTokens = qaTokens.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentenceTokens : qssTokens) {
ArrayList<CoreLabel> tokens = (ArrayList<CoreLabel>) sentenceTokens.get(CoreAnnotations.TokensAnnotation.class);
for (CoreLabel t : tokens) {
String lemma = t.lemma();
String pos = t.tag();
if ((pos.startsWith("N") || pos.startsWith("V")) && !stopwords.contains(lemma)) {
res.add(lemma);
}
}
}
}
return res;
}
示例14: extractNER
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
public static List<String> extractNER(String doc){
Annotation document = new Annotation(doc);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
List<String> result = new ArrayList<String>();
for(CoreMap sentence: sentences) {
// traversing the words in the current sentence
// a CoreLabel is a CoreMap with additional token-specific methods
for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
// this is the text of the token
String word = token.get(CoreAnnotations.TextAnnotation.class);
// this is the POS tag of the token
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
// this is the NER label of the token
String ne = token.get(CoreAnnotations.NamedEntityTagAnnotation.class);
result.add(ne);
}
}
return result;
}
示例15: annotate
import edu.stanford.nlp.util.CoreMap; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
int tk = 0;
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (int i = 0, sz = tokens.size(); i < sz; i++) {
CoreLabel thisToken = tokens.get(i);
thisToken.set(CoreAnnotations.PartOfSpeechAnnotation.class, pos[tk++].toUpperCase());
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}