本文整理汇总了Java中edu.stanford.nlp.pipeline.Annotation.has方法的典型用法代码示例。如果您正苦于以下问题:Java Annotation.has方法的具体用法?Java Annotation.has怎么用?Java Annotation.has使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.pipeline.Annotation
的用法示例。
在下文中一共展示了Annotation.has方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
int tk = 0;
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (int i = 0, sz = tokens.size(); i < sz; i++) {
CoreLabel thisToken = tokens.get(i);
thisToken.set(CoreAnnotations.LemmaAnnotation.class, thisToken.get(CoreAnnotations.TextAnnotation.class));
}
}
}
else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例2: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
int tk = 0;
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (int i = 0, sz = tokens.size(); i < sz; i++) {
CoreLabel thisToken = tokens.get(i);
thisToken.set(CoreAnnotations.PartOfSpeechAnnotation.class, pos[tk++].toUpperCase());
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例3: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (CoreLabel token : tokens) {
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
if (pos != null) {
token.set(PikesAnnotations.SimplePosAnnotation.class, AnnotatorUtils.getSimplePos(pos));
}
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例4: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
SemanticGraph dependencies = sentence.get(
SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
DepParseInfo info = new DepParseInfo(dependencies);
System.out.println(info);
if (dependencies != null) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (int i = 0; i < tokens.size(); i++) {
CoreLabel token = tokens.get(i);
token.set(CoreAnnotations.CoNLLDepTypeAnnotation.class, info.getDepLabels().get(i + 1));
token.set(CoreAnnotations.CoNLLDepParentIndexAnnotation.class,
info.getDepParents().get(i + 1) - 1);
}
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例5: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
int sentOffset = 0;
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
SemanticGraph dependencies = sentence.get(
SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
DepParseInfo info = new DepParseInfo(dependencies);
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
if (dependencies != null) {
for (int i = 0; i < tokens.size(); i++) {
CoreLabel token = tokens.get(i);
int j = i + sentOffset;
String label = info.getDepLabels().get(j + 1);
int head = info.getDepParents().get(j + 1) - 1 - sentOffset;
if (head < -1) {
head = -1;
}
token.set(CoreAnnotations.CoNLLDepTypeAnnotation.class, label);
token.set(CoreAnnotations.CoNLLDepParentIndexAnnotation.class, head);
}
}
sentOffset += tokens.size();
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例6: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
SemanticGraph dependencies = sentence.get(
SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
if (dependencies != null) {
DepParseInfo info = new DepParseInfo(dependencies);
sentence.set(DepparseAnnotations.MstParserAnnotation.class, info);
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例7: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
if (maxLen > 0 && tokens.size() > maxLen) {
continue;
}
ArrayList<HashMap<String, String>> terms = new ArrayList<>();
for (CoreLabel token : tokens) {
HashMap<String, String> term = new HashMap<>();
term.put("simple_pos", token.get(PikesAnnotations.SimplePosAnnotation.class));
term.put("lemma", token.get(CoreAnnotations.LemmaAnnotation.class));
terms.add(term);
}
try {
tagger.run(terms);
} catch (IOException e) {
e.printStackTrace();
}
for (int i = 0, sz = tokens.size(); i < sz; i++) {
CoreLabel thisToken = tokens.get(i);
String wn = terms.get(i).get("wordnet");
thisToken.set(PikesAnnotations.UKBAnnotation.class, wn);
}
}
}
else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例8: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
if (maxLen > 0 && tokens.size() > maxLen) {
continue;
}
ArrayList<String> forms = new ArrayList<>();
ArrayList<String> poss = new ArrayList<>();
for (CoreLabel stanfordToken : tokens) {
String form = stanfordToken.get(CoreAnnotations.TextAnnotation.class);
String pos = stanfordToken.get(CoreAnnotations.PartOfSpeechAnnotation.class);
forms.add(form);
poss.add(pos);
}
try {
DepParseInfo depParseInfo = parser.tag(forms, poss);
sentence.set(DepparseAnnotations.MstParserAnnotation.class, depParseInfo);
} catch (Exception e) {
e.printStackTrace();
}
}
}
else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例9: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
synchronized (this) {
try {
poss = new ArrayList<>();
ArrayList<String> stringTokens = new ArrayList<>();
for (int i = 0, sz = tokens.size(); i < sz; i++) {
stringTokens.add(tokens.get(i).originalText());
}
tt.process(stringTokens);
for (int i = 0, sz = tokens.size(); i < sz; i++) {
CoreLabel thisToken = tokens.get(i);
String pos = AnnotatorUtils.parenthesisToCode(poss.get(i));
thisToken.set(CoreAnnotations.PartOfSpeechAnnotation.class, pos);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}
示例10: annotate
import edu.stanford.nlp.pipeline.Annotation; //导入方法依赖的package包/类
@Override
public void annotate(Annotation annotation) {
if (annotation.has(CoreAnnotations.SentencesAnnotation.class)) {
for (CoreMap stanfordSentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
List<CoreLabel> tokens = stanfordSentence.get(CoreAnnotations.TokensAnnotation.class);
if (maxLen > 0 && tokens.size() > maxLen) {
continue;
}
List<Token> sentenceTokens = new ArrayList<>();
DepParseInfo depParseInfo = stanfordSentence.get(DepparseAnnotations.MstParserAnnotation.class);
if (depParseInfo == null) {
continue;
}
for (int i = 0; i < tokens.size(); i++) {
CoreLabel token = tokens.get(i);
String form = token.get(CoreAnnotations.TextAnnotation.class);
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
String lemma = token.get(CoreAnnotations.LemmaAnnotation.class);
Integer head = depParseInfo.getDepParents().get(i + 1);
String rel = depParseInfo.getDepLabels().get(i + 1);
Token fnToken = new Token(form, pos, head, rel);
fnToken.setLemma(lemma);
sentenceTokens.add(fnToken);
}
Sentence sentence = new Sentence(sentenceTokens);
try {
SemaforParseResult results = parser.parseSentence(sentence);
stanfordSentence.set(PikesAnnotations.SemaforAnnotation.class, results);
} catch (Exception e) {
e.printStackTrace();
}
}
} else {
throw new RuntimeException("unable to find words/tokens in: " + annotation);
}
}