本文整理汇总了Java中edu.stanford.nlp.trees.TypedDependency类的典型用法代码示例。如果您正苦于以下问题:Java TypedDependency类的具体用法?Java TypedDependency怎么用?Java TypedDependency使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TypedDependency类属于edu.stanford.nlp.trees包,在下文中一共展示了TypedDependency类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSubject
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
private static String getSubject(final List<TypedDependency> dependencies) {
String rootSubject = null, subject = null;
for (int i = dependencies.size() - 1; i >= 0; i--) {
final TypedDependency dependency = dependencies.get(i);
if (dependency.reln().toString().contains("subj")) {
rootSubject = subject = dependency.dep().word();
} else if (dependency.reln().toString().contains("compound") && dependency.gov().word().equals(rootSubject)) {
subject = dependency.dep().word() + " " + subject;
}
}
if (subject == null) {
return null;
}
String lemmaSubject = "";
for (final String lemma : new Sentence(subject).lemmas()) {
lemmaSubject += lemma + " ";
}
return lemmaSubject.trim();
}
示例2: getObject
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
private static String getObject(final List<TypedDependency> dependencies) {
String rootDirectObject = null, directObject = null;
for (int i = dependencies.size() - 1; i >= 0; i--) {
final TypedDependency dependency = dependencies.get(i);
if (dependency.reln().toString().contains("dobj")) {
rootDirectObject = directObject = dependency.dep().word();
} else if (dependency.reln().toString().contains("compound") && dependency.gov().word().equals(rootDirectObject)) {
directObject = dependency.dep().word() + " " + directObject;
}
}
if (directObject == null) {
return null;
}
String lemmaDirectObject = "";
for (final String lemma : new Sentence(directObject).lemmas()) {
lemmaDirectObject += lemma + " ";
}
return lemmaDirectObject.trim();
}
示例3: getAbbreviations
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
public static Map<String, String> getAbbreviations(final String text) {
final List<TypedDependency> dependencies = structureFactory.newGrammaticalStructure(parser.parse(text))
.typedDependenciesCCprocessed();
final Map<String, String> abbreviations = new HashMap<>();
String key = null, rootValue = null, value = null;
for (int i = dependencies.size() - 1; i >= 0; i--) {
final TypedDependency dependency = dependencies.get(i);
if (dependency.reln().toString().contains("appos")) {
if (key != null) {
abbreviations.put(key, value);
}
key = dependency.dep().word();
value = rootValue = dependency.gov().word();
} else if (key != null && dependency.reln().toString().contains("compound") && dependency.gov().word().contains(rootValue)) {
value = dependency.dep().word() + " " + value;
}
}
if (key != null) {
abbreviations.put(key, value);
}
return abbreviations;
}
示例4: getSubgraphFromWords
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Given the sentence semantic graph and a list of words, get a subgraph containing just the words in the list
* 'words'. Each typed dependency has each word from the list as a governor.
* @param sg: sentence semantic graph
* @param words: list of words which should contain the semantic graph
* @return subgraph containing the words from 'words'
* TODO: this needs to be double checked! In some cases we have weird graphs, where there are words missing.
* E.g. the sentence 120 from NYT "The International ... ". Try this for getting the subgraph when the source is
* detected.
*/
public static SemanticGraph getSubgraphFromWords(SemanticGraph sg, ObjectArrayList<IndexedWord> words){
// Determining the root
int minInd = Integer.MAX_VALUE;
IndexedWord root = new IndexedWord();
for (IndexedWord w: words){
if (w.index() < minInd){
minInd = w.index();
root = w;
}
}
// Getting the typed dependency
ObjectArrayList<TypedDependency> tds = new ObjectArrayList<TypedDependency>();
for (TypedDependency td: sg.typedDependencies()){
if (words.contains(td.gov()) && words.contains(td.dep()))
tds.add(td);
}
// Create the semantic graph
TreeGraphNode rootTGN = new TreeGraphNode(new CoreLabel(root));
EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(tds, rootTGN);
SemanticGraph phraseSg = SemanticGraphFactory.generateUncollapsedDependencies(gs);
return phraseSg;
}
示例5: getSubgraph
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
private static SemanticGraph getSubgraph(ObjectArrayList<TypedDependency> tds, SemanticGraph sg, IndexedWord parent,
SemanticGraphEdge e, int maxPathLength, ObjectArrayList<IndexedWord> words){
Set<IndexedWord> children = sg.getChildren(parent);
for (IndexedWord child: children){
if (((sg.getShortestDirectedPathEdges(sg.getFirstRoot(), child)).size() <= maxPathLength) &&
words.contains(child)){
e = sg.getEdge(parent, child);
tds.add(new TypedDependency(e.getRelation(), parent, child));
if (sg.hasChildren(child))
getSubgraph(tds, sg, child, e, maxPathLength, words);
} // else break;
}
TreeGraphNode rootTGN = new TreeGraphNode(new CoreLabel(parent));
EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(tds, rootTGN);
return SemanticGraphFactory.generateUncollapsedDependencies(gs);
}
示例6: DepTree
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
public DepTree(TypedDependency root, Collection<TypedDependency> tds,
List<? extends HasWord> sentence, int[] remapping, IntSet stack) {
this.map = new HashMap<>();
int t = root.dep().index();
node = sentence.get(t - 1).word();
//tag = root.dep().tag();
tag = root.dep().label().tag();
this.idx = remapping[t - 1];
if (!stack.contains(t)) {
IntSet stack2 = new IntRBTreeSet(stack);
stack2.add(t);
for (TypedDependency td : tds) {
if (td.gov().index() == t && td.dep().index() != t) {
map.put(td.reln().getShortName(), new DepTree(td, tds, sentence, remapping, stack2));
}
}
}
}
示例7: sortDependenciesByDependent
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* This looks at the token indices, and finds a dependency relation for each index.
* It then stringifies these, and returns them in order of index from low to high.
* The list is as long as the sentence, with null entries for token indices that had
* no such relation.
* @param deps The dependencies of the sentence.
* @param tokens The tokens of the sentence, pulled from the phrase structure tree.
* @param tree The parse tree of the sentence.
* @return
*/
private List<List<String>> sortDependenciesByDependent(Collection<TypedDependency> deps, Tree tree) {
List<String> tokens = TreeOperator.stringLeavesFromTree(tree);
int numTokens = tokens.size();
List<List<String>> ordered = new ArrayList<List<String>>();
for( int ii = 0; ii < numTokens; ii++ ) {
// Get the relation. Dependency tokens are indexed from 1, not 0.
List<String> relations = getRelationForDependent(deps, ii+1, tokens.get(ii), tree);
ordered.add(relations);
}
// Add nulls till the sentence ends.
while( ordered.size() < numTokens ) ordered.add(null);
return ordered;
}
示例8: getDependenciesPerToken
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Pull out a dependency for each token, and return the dependencies in order
* of the tokens so they line up by index.
* @param data The parsed document's file path.
* @return A list of dependencies. null indicates the token had no parent in the graph.
*/
private List<String> getDependenciesPerToken(ProcessedData data) {
List<String> tokens = new ArrayList<String>();
List<Tree> trees = TreeOperator.stringsToTrees(data.getParseStrings());
List<List<TypedDependency>> alldeps = data.getDependencies();
int xx = 0;
for( Collection<TypedDependency> sentdeps : alldeps ) {
Tree tree = trees.get(xx++);
List<List<String>> sortedDeps = sortDependenciesByDependent(sentdeps, tree);
for( List<String> deps : sortedDeps )
for( String dep : deps )
tokens.add(dep.toLowerCase());
}
return tokens;
}
示例9: dependencyPath
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Calculate the shortest dependency path from token index start to end.
* Indices start at 1, so the first word in the sentence is index 1.
* @return A single string representing the shortest path.
*/
public static String dependencyPath(int start, int end, List<TypedDependency> deps) {
List<String> paths = paths(start, end, deps, null);
// One path? Return now!
if( paths.size() == 1 )
return paths.get(0);
// More than one path. Find the shortest!
String shortest = null;
int dist = Integer.MAX_VALUE;
for( String path : paths ) {
int count = path.split("->").length;
count += path.split("<-").length;
if( count < dist ) {
dist = count;
shortest = path;
}
}
return shortest;
}
示例10: objectsInSentence
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Finds all words that have object relations, and returns a map from the word indices
* to a list of all object strings with their indices in the sentence.
* @return A map from token index (governor) to its list of objects.
*/
public static Map<Integer,List<WordPosition>> objectsInSentence(int sid, Collection<TypedDependency> sentDeps) {
Map<Integer,List<WordPosition>> objects = new HashMap<Integer,List<WordPosition>>();
for( TypedDependency dep : sentDeps ) {
String reln = CountTokenPairs.normalizeRelation(dep.reln().toString(), false);
if( reln.equals(WordEvent.DEP_OBJECT) ) {
List<WordPosition> strs = objects.get(dep.gov().index());
if( strs == null ) {
strs = new ArrayList<WordPosition>();
objects.put(dep.gov().index(), strs);
}
strs.add(new WordPosition(sid, dep.dep().index(), dep.dep().label().value().toString().toLowerCase()));
}
}
return objects;
}
示例11: isDefinite
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* @return True if the indexed word has a known definite determiner.
* False otherwise, but this doesn't necessarily mean it's not definite.
*/
public static boolean isDefinite(Vector<TypedDependency> deps, int index) {
// System.out.println("isdef top with " + index);
for( TypedDependency dep : deps ) {
int govIndex = dep.gov().index();
if( govIndex == index ) {
// System.out.println("isdef dep match: " + dep);
if( dep.reln().toString().equals("det") ) {
// System.out.println("isdef " + dep + " index=" + index);
String determiner = dep.dep().toString();
// the, that, this, these, those, them
if( determiner.startsWith("th") )
return true;
}
}
}
return false;
}
示例12: isPossessive
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Determines if the given word index is the possessor in a possessive
* relationship. This is either a "poss" reln, or a "prep_of" relation when
* the index is a definite NP or an NER recognized (proper) noun.
*
* NOTE: For MUC, most important possessives are event nouns, so we could just
* call isNominative and not this function.
*/
public static boolean isPossessive(Tree tree, List<NERSpan> ners,
Vector<TypedDependency> deps, int index) {
Tree subtree = TreeOperator.indexToSubtree(tree, index);
String posTag = subtree.label().value();
if( posTag.startsWith("NN") ) {
for( TypedDependency dep : deps ) {
int depIndex = dep.dep().index();
if( depIndex == index ) {
String reln = dep.reln().toString();
if( reln.equals("poss") ) return true;
if( reln.equals("prep_of") ) {
if( isDefinite(deps, index) || isProper(ners, index) ) {
// String gov = dep.gov().label().value().toString().toLowerCase();
return true;
}
}
}
}
}
return false;
}
示例13: countTokenPairsWithCoref
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
/**
* Count all pairs of tokens with coref arguments in the document.
* This counts token pairs with their relations, not just tokens.
*/
private void countTokenPairsWithCoref(List<Tree> trees, List<List<TypedDependency>> deps, List<EntityMention> mentions, List<NERSpan> ners) {
// Now get the "token:arg" events whose arg may corefer elsewhere.
List<WordEvent> events = extractEvents(trees, deps, mentions, _wordnet, _tokenType, _fullPrep);
// for( WordEvent event : events ) System.out.println("event: " + event.toStringFull());
// Count arguments of tokens with their objects. (collocations)
if( _countObjectCollocations ) {
List<WordEvent> allNewEvents = new ArrayList<WordEvent>();
for( WordEvent event : events ) {
List<WordEvent> newEvents = getCollocations(event, deps.get(event.sentenceID()-1), ners, _wordnet);
if( newEvents != null ) {
// for( WordEvent newEvent : newEvents ) System.out.println("NEW event: " + newEvent.toStringFull());
allNewEvents.addAll(newEvents);
}
}
events.addAll(allNewEvents);
}
// Count the pairs.
countEventPairs(events, 10000);
}
示例14: getDep
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
public static String getDep(String parse) {
Tree t;
StringBuilder sb = new StringBuilder();
try {
t = tf.newTreeReader(new StringReader(parse)).readTree();
GrammaticalStructure gs = gsf.newGrammaticalStructure(t);
Iterator<TypedDependency> it = gs.typedDependenciesCollapsed()
.iterator();
while (it.hasNext()) {
sb.append(it.next() + "\t");
}
} catch (IOException e) {
e.printStackTrace();
}
return sb.toString();
}
示例15: getWords
import edu.stanford.nlp.trees.TypedDependency; //导入依赖的package包/类
public List<String> getWords(List<List<TypedDependency>> deps, List<NERSpan> ners) {
List<String> words = new ArrayList<String>();
for( NERSpan span : ners ) {
// System.out.println("span " + span);
List<TypedDependency> sentdeps = deps.get(span.sid());
for( TypedDependency dep : sentdeps ) {
int govindex = dep.gov().index();
int depindex = dep.dep().index();
if( span.start() <= govindex && span.end() > govindex ) {
words.add(dep.gov().value());
// System.out.println(" adding " + dep.gov().value());
}
if( span.start() <= depindex && span.end() > depindex ) {
words.add(dep.dep().value());
// System.out.println(" addin2 " + dep.dep().value());
}
}
}
return words;
}