本文整理汇总了Java中edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation类的典型用法代码示例。如果您正苦于以下问题:Java BasicDependenciesAnnotation类的具体用法?Java BasicDependenciesAnnotation怎么用?Java BasicDependenciesAnnotation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BasicDependenciesAnnotation类属于edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations包,在下文中一共展示了BasicDependenciesAnnotation类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parse
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
/**
* Given a CoreNLP pipeline and an input sentence, generate dependency parse for the sentence and return
* the SemanticGraph object as a result
* @param pipeline - CoreNLP pipeline
* @param snt - input sentence
* @return dependency parse in SemanticGraph object
*/
public static SemanticGraph parse(StanfordCoreNLP pipeline, String snt) {
Annotation document = new Annotation(snt);
pipeline.annotate(document);
//A CoreMap is a sentence with annotations
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
SemanticGraph semanticGraph = null;
for(CoreMap sentence: sentences) {
semanticGraph = sentence.get(BasicDependenciesAnnotation.class);
}
return semanticGraphUniversalEnglishToEnglish(semanticGraph);
}
示例2: PreNERCoreMapWrapper
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
/**
*
*/
public PreNERCoreMapWrapper(final CoreMap cm, final HeadFinder hf, final AnalyticUUIDGenerator gen) {
this.wrapper = new CoreMapWrapper(cm, gen);
this.hf = hf;
this.tree = Optional.ofNullable(cm.get(TreeAnnotation.class));
this.basicDeps = Optional.ofNullable(cm.get(BasicDependenciesAnnotation.class));
this.colDeps = Optional.ofNullable(cm.get(CollapsedDependenciesAnnotation.class));
this.colCCDeps = Optional.ofNullable(cm.get(CollapsedCCProcessedDependenciesAnnotation.class));
this.gen = gen;
}
示例3: main
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
/**
* Main function
*
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// data input
String text = "John may like an ice cream cake of the shop very much.";
// model loading
StanfordNlpWrapper nlp = new StanfordNlpWrapper(Env.STANFORDNLP_CFG);
nlp.loadAll("tokenize, ssplit, parse");
// task run
Annotation annotation = nlp.annotate(text);
for (CoreMap sent : annotation.get(SentencesAnnotation.class))
System.out.println(sent.get(BasicDependenciesAnnotation.class).toString().trim());
}
示例4: DoAll
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
public void DoAll(String data, String[] TokenizedData, String[] POSTags, String[] LEMMA, Map<String,String> parentEdge,
Map<String,ArrayList<String>> childrenEdge)
{
//if(document == null)
{
document = new Annotation(data);
pipelineTags.annotate(document);
}
int i=0;
List<CoreLabel> tokens = document.get(TokensAnnotation.class);
for (CoreLabel token : tokens) {
String wPOS = token.get(PartOfSpeechAnnotation.class);
String wNER = token.get(NamedEntityTagAnnotation.class);
String wLEMMA = token.get(LemmaAnnotation.class) ;
TokenizedData[i]= token.toString();
POSTags[i]= wPOS;
LEMMA[i]= wLEMMA;
i++;
}
List<CoreMap> sentences = document.get(SentencesAnnotation.class);
if(sentences.size()>0){
SemanticGraph tree = sentences.get(0).get(BasicDependenciesAnnotation.class);
//System.out.println(data+"\n"+tree.toString()+"\n");
createEdgeMap(tree,parentEdge,childrenEdge);
}
}
示例5: annote
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
@Override
void annote(Annotation annotation, List<CoreMap> sentences) {
List<CoreLabel> labels;
CoreLabel label;
CoreMap sentence;
SemanticGraph graph;
String tokens[][], tags[][], lemmas[][], words[];
DependencyTree[] dtree;
Iterator<Lexel> lexels;
List<Integer> indexes;
Lexel lexel = null;
String id;
int size, length, position;
size = sentences.size();
tokens = new String[size][];
tags = new String[size][];
lemmas = new String[size][];
dtree = new DependencyTree[size];
lexels = annotation.iterator();
id = annotation.getID();
indexes = mIndex.get(id);
for (int i = 0; i < size; i++) {
sentence = sentences.get(i);
labels = sentence.get(TokensAnnotation.class);
length = labels.size();
words = new String[length];
tags[i] = new String[length];
lemmas[i] = new String[length];
for (int j = 0; j < length; j++) {
label = labels.get(j);
words[j] = label.get(TextAnnotation.class);
tags[i][j] = label.get(PartOfSpeechAnnotation.class);
lemmas[i][j] = label.get(LemmaAnnotation.class);
position = label.beginPosition();
if (!indexes.isEmpty() && position > indexes.get(0)) {
lexel.setOffset(j - lexel.getTokenIndex() - 1);
lexel = null;
indexes.remove(0);
}
if (!indexes.isEmpty() && position == indexes.get(0)) {
lexel = lexels.next();
lexel.setIndexes(i, j);
indexes.remove(0);
}
}
if (lexel != null)
lexel.setOffset(length - lexel.getTokenIndex() - 1);
tokens[i] = words;
graph = sentence.get(BasicDependenciesAnnotation.class);
if (graph != null)
dtree[i] = getDependencyTree(graph);
}
annotation.annote(tokens, mPOS ? tags : null, mLemma ? lemmas : null, mDepParse ? dtree : null);
mIndex.remove(id);
}
示例6: parse
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
@Override
public ParseResult parse(String text, Set<Pair<Integer,Integer>> entities) {
ParseResult result = new ParseResult();
Annotation document = new Annotation(text);
pipeline.annotate(document);
int i = 0;
List<CoreMap> annotations = document.get(SentencesAnnotation.class);
for (CoreMap s: annotations) {
i++;
// Sentence string
result.addSentence(i,s.toString());
// Tokens and POS tags
for (CoreLabel token: s.get(TokensAnnotation.class)) {
result.addToken(i,token.index(),token.originalText());
result.addPOS(i,token.index(),token.getString(PartOfSpeechAnnotation.class));
// Mark named entities
if (entities == null) {
// Use Stanford NEs
if (!token.get(NamedEntityTagAnnotation.class).equals("O")) {
result.addPOS(i,token.index(),"NE");
}
} else {
// Use NEs provided in input
for (Pair<Integer,Integer> entity : entities) {
if (entity.getLeft() <= token.beginPosition()
&& entity.getRight() >= token.endPosition())
result.addPOS(i,token.index(),"NE");
}
}}
// Dependency parse
SemanticGraph dependencies = s.get(BasicDependenciesAnnotation.class);
result.addParse(i,dependencies.toList().trim());
}
return result;
}
示例7: addDepsBasic
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
static void addDepsBasic(Map<String,Object> sent_info, CoreMap sentence) {
SemanticGraph dependencies = sentence.get(BasicDependenciesAnnotation.class);
List deps = jsonFriendlyDeps(dependencies);
sent_info.put("deps_basic", deps);
}
示例8: getDependenciesFromCoreMap
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
public static HashMap<Integer, Integer> getDependenciesFromCoreMap(CoreMap annotation) {
SemanticGraph semanticGraph = annotation.get(BasicDependenciesAnnotation.class);
Collection<TypedDependency> dependencies = semanticGraph.typedDependencies();
HashMap<Integer, Integer> reverseDependencies = new HashMap<Integer, Integer>() ;
for (TypedDependency dep : dependencies) {
int govIndex = dep.gov().index() - 1;
int depIndex = dep.dep().index() - 1;
reverseDependencies.put(depIndex, govIndex);
}
return reverseDependencies;
}
示例9: testStanfordNlpWrapperForPipeline
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
/**
* StanfordNlpWrapper Test for pipeline functions
*
* @throws IOException
*/
public void testStanfordNlpWrapperForPipeline() throws IOException {
System.out.println("\n----- testStanfordNlpWrapperForPipeline() ------------------------------");
if (!TEST_PIPELINE)
return;
String text = "Samsung Electronics is a South Korean multinational electronics company headquartered in Suwon, South Korea.";
text += " It is the flagship subsidiary of the Samsung Group.";
StanfordNlpWrapper nlp = new StanfordNlpWrapper(Env.STANFORDNLP_CFG);
nlp.loadAll("tokenize, ssplit, pos, lemma, ner, regexner, parse, dcoref");
assertTrue(nlp.annotator != null);
Annotation annotation = nlp.annotate(text);
System.out.println("-toXml--------------------------------------------------------------------------");
System.out.println(nlp.toXml(annotation));
System.out.println("-toPrettyStr--------------------------------------------------------------------");
System.out.println(nlp.toPrettyStr(annotation));
assertEquals(2, annotation.get(SentencesAnnotation.class).size());
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
System.out.println("-TextAnnotation-----------------------------------------------------------------");
System.out.println(sentence.get(TextAnnotation.class));
System.out.println("-toTokens-----------------------------------------------------------------------");
System.out.println(JString.join("\n", StanfordNlpWrapper.toTokens(sentence, text)));
System.out.println("-toPhrases-----------------------------------------------------------------------");
System.out.println(JString.join("\n", StanfordNlpWrapper.toPhrases(sentence, text)));
System.out.println("-TreeAnnotation-----------------------------------------------------------------");
System.out.println(sentence.get(TreeAnnotation.class).pennString().trim());
System.out.println("-BasicDependenciesAnnotation----------------------------------------------------");
System.out.println(sentence.get(BasicDependenciesAnnotation.class).toString().trim());
System.out.println("-CollapsedDependenciesAnnotation------------------------------------------------");
System.out.println(sentence.get(CollapsedDependenciesAnnotation.class).toString().trim());
System.out.println("-CollapsedCCProcessedDependenciesAnnotation-------------------------------------");
System.out.println(sentence.get(CollapsedCCProcessedDependenciesAnnotation.class).toString().trim());
}
System.out.println("-toCoreferenceMap---------------------------------------------------------------");
assertEquals(5, StanfordNlpWrapper.toCoreferenceMap(annotation).entrySet().size());
for (Entry<Integer, List<CorefMention>> e : StanfordNlpWrapper.toCoreferenceMap(annotation).entrySet())
for (CorefMention m : e.getValue())
System.out.printf("%d\t%s\t%s\t%d\t%d\n", e.getKey(), m.mentionType, m.mentionSpan, m.sentNum, m.headIndex);
}
示例10: evaluateSentence
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation; //导入依赖的package包/类
public double evaluateSentence(CoreMap sentence, int startNodeId, ArrayList<Integer> blockedNodes) {
SemanticGraph dependencies = sentence.get(BasicDependenciesAnnotation.class);
String[] dependenciesList = dependencies.toString("list").split("\n");
return evaluateSentence(dependenciesList, startNodeId, blockedNodes);
}