本文整理匯總了Java中edu.stanford.nlp.ling.CoreLabel.word方法的典型用法代碼示例。如果您正苦於以下問題:Java CoreLabel.word方法的具體用法?Java CoreLabel.word怎麽用?Java CoreLabel.word使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.ling.CoreLabel
的用法示例。
在下文中一共展示了CoreLabel.word方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: ExtendedToken
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
public ExtendedToken(CoreLabel token) {
this.token=token;
boolean containsLetter=false;
boolean containsDigit=false;
String word=token.word();
int i=0;
while (!(containsDigit && containsLetter) && i<word.length()) {
char c=word.charAt(i);
if ('A' <= c && c <='Z') {
containsLetter=true;
} else if ('0' <= c && c <='9') {
containsDigit=true;
}
i++;
}
grantCandidate = containsDigit && containsLetter;
}
示例2: annotateNerClass
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("ner")
public String annotateNerClass(@QueryParam("text") String text) {
List<CoreLabel> labels = annotationService.annotate(text).get(CoreAnnotations.TokensAnnotation.class);
if (labels.size() == 0) {
return new InvalidAttributeResponse("text").respond();
}
List<Annotation> response = new LinkedList<>();
Annotation last = null;
for (CoreLabel label : labels) {
if (last != null && isAllowedType(label.ner()) && last.nerClass.equals(label.ner())) {
last.token += " " + label.word();
} else {
last = new Annotation(label.word(), label.ner());
response.add(last);
}
}
return Response.success(response).respond();
}
示例3: annotatePipeline
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
@GET
@Produces(MediaType.APPLICATION_JSON)
public String annotatePipeline(@QueryParam("text") String text) throws IOException, URISyntaxException {
List<CoreLabel> labels = annotationService.annotate(text).get(CoreAnnotations.TokensAnnotation.class);
if (labels.size() == 0) {
return new InvalidAttributeResponse("text").respond();
}
AnnotationResponse response = new AnnotationResponse();
response.annotations = new LinkedList<>();
response.users = new HashSet<>();
Annotation last = null;
for (CoreLabel label : labels) {
if (last != null && isAllowedType(label.ner()) && last.nerClass.equals(label.ner())) {
last.token += " " + label.word();
} else {
processAlignment(last, response.users, text);
last = new Annotation(label.word(), label.ner());
response.annotations.add(last);
}
}
processAlignment(last, response.users, text);
return Response.success(response).respond();
}
示例4: addingContentWord
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
@Override public void addingContentWord(CoreLabel token) {
super.addingContentWord(token);
String lemma = token.word();
if (model.getLevel1Lemmas().contains(lemma)) {
level1WordSize++;
}
if (model.getLevel2Lemmas().contains(lemma)) {
level2WordSize++;
}
if (model.getLevel3Lemmas().contains(lemma)) {
level3WordSize++;
}
// System.out.println("Adding content word (lemma): " + lemma);
// System.out.println(model.getLevel1Lemmas().contains(lemma));
// System.out.println(model.getLevel2Lemmas().contains(lemma));
// System.out.println(model.getLevel3Lemmas().contains(lemma));
// System.out.println();
// HashMap<Integer, HashMultimap<String, String>> easyWords = model.getEasyWords();
// String simplePos = getGenericPos(token.get(CoreAnnotations.PartOfSpeechAnnotation.class));
// String lemma = token.get(CoreAnnotations.LemmaAnnotation.class);
//
// if (easyWords.get(1).get(simplePos).contains(lemma)) {
// level1WordSize++;
// }
// if (easyWords.get(2).get(simplePos).contains(lemma)) {
// level2WordSize++;
// }
// if (easyWords.get(3).get(simplePos).contains(lemma)) {
// level3WordSize++;
// }
}
示例5: addWord
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
public void addWord(CoreLabel token) {
token.set(ReadabilityAnnotations.ContentWord.class, false);
token.set(ReadabilityAnnotations.LiteralWord.class, false);
String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
// String lemma = token.get(CoreAnnotations.LemmaAnnotation.class);
String word = token.word();
addingToken(token);
if (isWordPos(pos)) {
addingWord(token);
wordCount++;
docLenLettersOnly += token.endPosition() - token.beginPosition();
word = flattenToAscii(word);
Hyphenation hyphenation = hyphenator.hyphenate(word);
boolean done = false;
if (hyphenation != null) {
try {
String h = hyphenation.toString();
incrementHyphenCount(hyphenation.length() + 1);
token.set(ReadabilityAnnotations.HyphenationAnnotation.class, h);
done = true;
hyphenWordCount++;
} catch (Exception e) {
// ignored
}
}
if (!done && word.length() < 5) {
incrementHyphenCount(1);
hyphenWordCount++;
}
if (isContentPos(pos)) {
contentWordSize++;
addingContentWord(token);
}
if (isEasyPos(pos)) {
contentEasyWordSize++;
addingEasyWord(token);
}
}
if (token.get(ReadabilityAnnotations.HyphenationAnnotation.class) == null) {
token.set(ReadabilityAnnotations.HyphenationAnnotation.class, token.originalText());
}
String genericPos = getGenericPos(pos);
posStats.add(pos);
genericPosStats.add(genericPos);
}
示例6: resolveCoRef
import edu.stanford.nlp.ling.CoreLabel; //導入方法依賴的package包/類
public String resolveCoRef(String text) {
// to hold resolved string
String resolved = new String();
// run the pipeline
Annotation document = runPipeline(text);
// get all coref chains and sentences
Map<Integer, CorefChain> corefs = document.get(CorefChainAnnotation.class);
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
// process each sentence
for (CoreMap sentence : sentences) {
int curSentIdx = sentence.get(SentenceIndexAnnotation.class);
List<CoreLabel> tokens = sentence.get(TokensAnnotation.class);
boolean isPronoun = false;
for (CoreLabel token : tokens) {
// process only pronouns
isPronoun = false;
String pos = token.get(PartOfSpeechAnnotation.class);
if (pos.equals("PRP") || pos.equals("PP$")) {
isPronoun = true;
}
Integer corefClustId = token.get(CorefClusterIdAnnotation.class);
CorefChain chain = corefs.get(corefClustId);
// if there is no chain to replace
if (chain == null || chain.getMentionsInTextualOrder().size() == 1 || isPronoun == false) {
resolved += token.word() + token.after();
} else {
int sentIndx = chain.getRepresentativeMention().sentNum - 1;
CorefMention reprMent = chain.getRepresentativeMention();
String rootWord = sentences.get(sentIndx)
.get(TokensAnnotation.class)
.get(reprMent.headIndex - 1)
.originalText();
if (curSentIdx != sentIndx || token.index() < reprMent.startIndex
|| token.index() > reprMent.endIndex) {
if (Character.isUpperCase(token.originalText().charAt(0))) {
rootWord = WordUtils.capitalize(rootWord);
}
resolved += rootWord + token.after();
} else {
resolved += token.word() + token.after();
}
}
}
}
return resolved;
}