本文整理汇总了Java中edu.emory.clir.clearnlp.dependency.DEPNode类的典型用法代码示例。如果您正苦于以下问题:Java DEPNode类的具体用法?Java DEPNode怎么用?Java DEPNode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DEPNode类属于edu.emory.clir.clearnlp.dependency包,在下文中一共展示了DEPNode类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createTreeFromTokens
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
/**
* Creates the ClearNLP Deptree from word tokens for a sentence.
*
* @param tokens
* the tokens
* @return the DEP tree
*/
private static DEPTree createTreeFromTokens(final List<WordToken> tokens) {
// Generate DEPTree from WordTokens
final DEPTree tree = new DEPTree(tokens.size());
int tokenIndex = 0;
for (final WordToken wt : tokens) {
final DEPNode node = new DEPNode(tokenIndex++, wt.getCoveredText());
node.setPOSTag(wt.getPartOfSpeech());
final FSArray lemmas = wt.getLemmas();
if (lemmas != null && lemmas.size() > 0) {
final WordLemma wl = (WordLemma) lemmas.get(0);
node.setLemma(wl.getLemmaForm());
}
tree.add(node);
}
return tree;
}
示例2: process
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
@Override
public void process(JCas jCas) throws AnalysisEngineProcessException {
for (Annotation window : JCasUtil.select(jCas, this.windowClass)) {
List<TOKEN_TYPE> tokens = this.tokenOps.selectTokens(jCas, window);
if (tokens.size() <= 0) {
return;
}
List<String> tokenStrings = JCasUtil.toText(tokens);
// As of version 1.3.0, ClearNLP does all processing through its own dependency tree
// structure
DEPTree clearNlpDepTree = new DEPTree(tokenStrings);
this.tagger.process(clearNlpDepTree);
// Note the ClearNLP counts index 0 as the sentence dependency node, so the POS tag indices
// are shifted by one from the token indices
for (int i = 0; i < tokens.size(); i++) {
TOKEN_TYPE token = tokens.get(i);
DEPNode node = clearNlpDepTree.get(i+1);
this.tokenOps.setPos(jCas, token, node.getPOSTag());
}
}
}
示例3: process
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
@Override
public void process(JCas jCas) throws AnalysisEngineProcessException {
for (WINDOW_TYPE window : JCasUtil.select(jCas, this.windowClass)) {
List<TOKEN_TYPE> tokens = this.tokenOps.selectTokens(jCas, window);
// Extract data from CAS and stuff it into ClearNLP data structures
DEPTree tree = new DEPTree(tokens.size());
for (int i = 0; i < tokens.size(); i++) {
TOKEN_TYPE token = tokens.get(i);
String lemma = this.tokenOps.getLemma(jCas, token);
String pos = this.tokenOps.getPos(jCas, token);
DEPNode node = new DEPNode(i + 1, token.getCoveredText(), lemma, pos, new DEPFeat());
tree.add(node);
}
// Run the parser
this.parser.process(tree);
// convert ClearNLP output back into CAS type system annotation
this.addTreeToCas(jCas, tree, window, tokens);
}
}
示例4: lemmatize
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
@Override
public void lemmatize(JCas jcas, List<Token> tokens) {
tokens.stream().forEach(token -> {
String text = token.getCoveredText();
String pos = token.getPartOfSpeech();
if (DIGIT_MATCHER.matchesAnyOf(text)) {
token.setLemmaForm(text);
} else {
DEPNode node = new DEPNode(-1, text, null, pos, new DEPFeat());
mpAnalyzer.analyze(node);
token.setLemmaForm(node.getLemma());
}
} );
}
示例5: process
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
List<Token> tokens = TypeUtil.getOrderedTokens(jcas);
// use original ClearNLP lemmatizer in case missing
tokens.stream().filter(token -> token.getLemmaForm() == null).forEach(token -> {
DEPNode node = createNode(token);
mpAnalyzer.analyze(node);
token.setLemmaForm(node.getLemma());
} );
// try to de-downcase for proper nouns
tokens.stream().filter(token -> equalsPosTag("NNP", token))
.forEach(QuestionLemmaDedowncaserDenormalizer::setLemmaByText);
tokens.stream().filter(token -> equalsPosTag("NNPS", token)).forEach(token -> {
char[] tokenText = token.getCoveredText().toCharArray();
char[] lemma = token.getLemmaForm().toCharArray();
for (int i = 0; i < lemma.length; i++) {
if (isUpperCase(tokenText[i]))
lemma[i] = toUpperCase(lemma[i]);
}
token.setLemmaForm(new String(lemma));
} );
// de-normalization
tokens.stream().filter(token -> equalsPosTag("CD", token))
.forEach(QuestionLemmaDedowncaserDenormalizer::setLemmaByText);
tokens.stream().filter(token -> CharMatcher.JAVA_DIGIT.matchesAnyOf(token.getCoveredText()))
.forEach(QuestionLemmaDedowncaserDenormalizer::setLemmaByText);
if (LOG.isTraceEnabled()) {
tokens.forEach(token -> LOG.trace("{} {} {}", token.getCoveredText(), token.getLemmaForm(),
token.getPartOfSpeech()));
}
}
示例6: doProcess
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
@Override
protected void doProcess(final JCas jCas) throws AnalysisEngineProcessException {
for (final Sentence sentence : JCasUtil.select(jCas, Sentence.class)) {
final List<WordToken> tokens = JCasUtil.selectCovered(jCas, WordToken.class, sentence);
final DEPTree tree = ClearNlpParser.createTreeFromTokens(tokens);
// Perform parsing
depParser.process(tree);
// Convert tree back to our annotations
for (int i = 0; i < tree.size(); i++) {
final DEPNode node = tree.get(i);
// Logic taken from DKPro Core (ASL)
// https://github.com/dkpro/dkpro-core/blob/master/dkpro-core-clearnlp-asl/src/main/java/de/tudarmstadt/ukp/dkpro/core/clearnlp/ClearNlpParser.java
if (node.hasHead()) {
final Dependency dep = new Dependency(jCas);
if (node.getHead().getID() != 0) {
dep.setGovernor(tokens.get(node.getHead().getID()));
dep.setDependencyType(node.getLabel());
} else {
dep.setGovernor(tokens.get(node.getID()));
dep.setDependencyType("ROOT");
}
dep.setDependent(tokens.get(node.getID()));
dep.setBegin(dep.getDependent().getBegin());
dep.setEnd(dep.getDependent().getEnd());
addToJCasIndex(dep);
}
}
}
}
示例7: createNode
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
private static DEPNode createNode(Token token) {
return new DEPNode(-1, token.getCoveredText(), null, token.getPartOfSpeech(), new DEPFeat());
}
示例8: extractSRLInfo
import edu.emory.clir.clearnlp.dependency.DEPNode; //导入依赖的package包/类
/**
* Converts the output from the ClearParser Semantic Role Labeler to the ClearTK Predicate and
* SemanticArgument Types.
*
* @param jCas
* @param tokens
* - In order list of tokens
* @param tree
* - DepdendencyTree output by ClearParser SRLPredict
*/
private void extractSRLInfo(JCas jCas, List<TOKEN_TYPE> tokens, DEPTree tree) {
Map<Integer, PREDICATE_TYPE> headIdToPredicate = Maps.newHashMap();
Map<PREDICATE_TYPE, List<ARGUMENT_TYPE>> predicateArguments = Maps.newHashMap();
// Start at node 1, since node 0 is considered the head of the sentence
for (int i = 1; i < tree.size(); i++) {
// Every ClearParser parserNode will contain an srlInfo field.
DEPNode parserNode = tree.get(i);
TOKEN_TYPE token = tokens.get(i - 1);
List<SRLArc> semanticHeads = parserNode.getSemanticHeadArcList();
if (semanticHeads.isEmpty()) {
continue;
}
// Parse semantic head relations to get SRL triplets
for (SRLArc shead : semanticHeads) {
int headId = shead.getNode().getID();
TOKEN_TYPE headToken = tokens.get(headId - 1);
PREDICATE_TYPE pred;
List<ARGUMENT_TYPE> args;
if (!headIdToPredicate.containsKey(headId)) {
String rolesetId = shead.getNode().getFeat(DEPLib.FEAT_PB);
pred = this.srlOps.createPredicate(jCas, headToken, rolesetId);
headIdToPredicate.put(headId, pred);
args = Lists.newArrayList();
predicateArguments.put(pred, args);
} else {
pred = headIdToPredicate.get(headId);
args = predicateArguments.get(pred);
}
args.add(this.srlOps.createArgument(jCas, token, shead.getLabel()));
}
}
// Store Arguments in Predicate
for (Map.Entry<PREDICATE_TYPE, List<ARGUMENT_TYPE>> entry : predicateArguments.entrySet()) {
PREDICATE_TYPE predicate = entry.getKey();
List<ARGUMENT_TYPE> arguments = entry.getValue();
this.srlOps.setPredicateArguments(jCas, predicate, arguments);
}
}