本文整理汇总了Java中edu.stanford.nlp.trees.HeadFinder类的典型用法代码示例。如果您正苦于以下问题:Java HeadFinder类的具体用法?Java HeadFinder怎么用?Java HeadFinder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HeadFinder类属于edu.stanford.nlp.trees包,在下文中一共展示了HeadFinder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeConcreteCParse
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
* Whenever there's an empty parse, this method will set the required
* constituent list to be an empty list. It's up to the caller on what to do
* with the returned Parse.
*
* @param n
* is the number of tokens in the sentence
*
* @throws AnalyticException
*/
private Parse makeConcreteCParse(Tree root, int n, UUID tokenizationUUID, HeadFinder hf) throws AnalyticException {
int left = 0;
int right = root.getLeaves().size();
if (right != n)
throw new AnalyticException("number of leaves in the parse (" + right + ") is not equal to the number of tokens in the sentence (" + n + ")");
Parse p = new ParseFactory(this.gen).create();
TheoryDependencies deps = new TheoryDependencies();
deps.addToTokenizationTheoryList(tokenizationUUID);
AnnotationMetadata md = new AnnotationMetadata("Stanford CoreNLP", Timing.currentLocalTime(), 1);
p.setMetadata(md);
constructConstituent(root, left, right, n, p, tokenizationUUID, hf);
if (!p.isSetConstituentList()) {
LOGGER.warn("Setting constituent list to compensate for the empty parse for tokenization id {} and tree {}", tokenizationUUID, root);
p.setConstituentList(new ArrayList<Constituent>());
}
return p;
}
示例2: UnlabeledAttachmentEval
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
public UnlabeledAttachmentEval(String str, boolean runningAverages, HeadFinder headFinder, Filter<String> punctRejectFilter) {
super(str, runningAverages);
this.headFinder = headFinder;
this.punctRejectWordFilter = punctRejectFilter;
this.punctRejectFilter = new Filter<Dependency<Label,Label,Object>>() {
private static final long serialVersionUID = 649358302237611081L;
// Semantics of this method are weird. If accept() returns true, then the dependent is
// *not* a punctuation item. This filter thus accepts everything except punctuation
// dependencies.
public boolean accept(Dependency<Label, Label, Object> dep) {
String depString = dep.dependent().value();
return punctRejectWordFilter.accept(depString);
}
};
}
示例3: PreNERCoreMapWrapper
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
*
*/
public PreNERCoreMapWrapper(final CoreMap cm, final HeadFinder hf, final AnalyticUUIDGenerator gen) {
this.wrapper = new CoreMapWrapper(cm, gen);
this.hf = hf;
this.tree = Optional.ofNullable(cm.get(TreeAnnotation.class));
this.basicDeps = Optional.ofNullable(cm.get(BasicDependenciesAnnotation.class));
this.colDeps = Optional.ofNullable(cm.get(CollapsedDependenciesAnnotation.class));
this.colCCDeps = Optional.ofNullable(cm.get(CollapsedCCProcessedDependenciesAnnotation.class));
this.gen = gen;
}
示例4: annotationToSentenceList
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
private static List<Sentence> annotationToSentenceList(Annotation anno, HeadFinder hf, final List<Sentence> origSentListRef, final AnalyticUUIDGenerator gen)
throws AnalyticException {
List<Sentence> slist = new ArrayList<>();
List<CoreMap> cmList = anno.get(SentencesAnnotation.class);
final int cmListSize = cmList.size();
for (int i = 0; i < cmListSize; i++) {
CoreMap cm = cmList.get(i);
Sentence orig = origSentListRef.get(i);
final int sentOff = orig.getTextSpan().getStart();
Sentence merged = new PreNERCoreMapWrapper(cm, hf, gen).toSentence(sentOff, orig);
slist.add(merged);
}
return slist;
}
示例5: headFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
* returns a NegraHeadFinder
*/
@Override
public HeadFinder headFinder() {
return new TigerHeadFinder();
//return new NegraHeadFinder();
//return new LeftHeadFinder();
}
示例6: tigerSemanticHeadFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/** Vends a "semantic" NegraHeadFinder---one that disprefers modal/auxiliary verbs as the heads of S or VP.
*
* @return a NegraHeadFinder that uses a "semantic" head-finding rule for the S category.
*/
public static HeadFinder tigerSemanticHeadFinder() {
TigerHeadFinder result = new TigerHeadFinder();
result.nonTerminalInfo.put("S", new String[][]{{result.right, "VVFIN", "VVIMP"}, {"right", "VP","CVP"}, { "right", "VMFIN", "VAFIN", "VAIMP"}, {"right", "S","CS"}});
result.nonTerminalInfo.put("VP", new String[][]{{"right","VVINF","VVIZU","VVPP"}, {result.right, "VZ", "VAINF", "VMINF", "VMPP", "VAPP", "PP"}});
result.nonTerminalInfo.put("VZ", new String[][]{{result.right,"VVINF","VAINF","VMINF","VVFIN","VVIZU"}}); // note that VZ < VVIZU is very rare, maybe shouldn't even exist.
return result;
}
示例7: TregexParser
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
public TregexParser(java.io.Reader stream,
Function<String, String> basicCatFunction,
HeadFinder headFinder) {
this(stream);
this.basicCatFunction = basicCatFunction;
this.headFinder = headFinder;
}
示例8: getRelation
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
* Static factory method for all relations with no arguments. Includes:
* DOMINATES, DOMINATED_BY, PARENT_OF, CHILD_OF, PRECEDES,
* IMMEDIATELY_PRECEDES, HAS_LEFTMOST_DESCENDANT, HAS_RIGHTMOST_DESCENDANT,
* LEFTMOST_DESCENDANT_OF, RIGHTMOST_DESCENDANT_OF, SISTER_OF, LEFT_SISTER_OF,
* RIGHT_SISTER_OF, IMMEDIATE_LEFT_SISTER_OF, IMMEDIATE_RIGHT_SISTER_OF,
* HEADS, HEADED_BY, IMMEDIATELY_HEADS, IMMEDIATELY_HEADED_BY, ONLY_CHILD_OF,
* HAS_ONLY_CHILD, EQUALS
*
* @param s The String representation of the relation
* @return The singleton static relation of the specified type
* @throws ParseException If bad relation s
*/
static Relation getRelation(String s,
Function<String, String> basicCatFunction,
HeadFinder headFinder)
throws ParseException
{
if (SIMPLE_RELATIONS_MAP.containsKey(s))
return SIMPLE_RELATIONS_MAP.get(s);
// these are shorthands for relations with arguments
if (s.equals("<,")) {
return getRelation("<", "1", basicCatFunction, headFinder);
} else if (parentOfLastChild.matcher(s).matches()) {
return getRelation("<", "-1", basicCatFunction, headFinder);
} else if (s.equals(">,")) {
return getRelation(">", "1", basicCatFunction, headFinder);
} else if (lastChildOfParent.matcher(s).matches()) {
return getRelation(">", "-1", basicCatFunction, headFinder);
}
// finally try relations with headFinders
Relation r;
if (s.equals(">>#")) {
r = new Heads(headFinder);
} else if (s.equals("<<#")) {
r = new HeadedBy(headFinder);
} else if (s.equals(">#")) {
r = new ImmediatelyHeads(headFinder);
} else if (s.equals("<#")) {
r = new ImmediatelyHeadedBy(headFinder);
} else {
throw new ParseException("Unrecognized simple relation " + s);
}
return Interner.globalIntern(r);
}
示例9: negraSemanticHeadFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/** Vends a "semantic" NegraHeadFinder---one that disprefers modal/auxiliary verbs as the heads of S or VP.
*
* @return a NegraHeadFinder that uses a "semantic" head-finding rule for the S category.
*/
public static HeadFinder negraSemanticHeadFinder() {
NegraHeadFinder result = new NegraHeadFinder();
result.nonTerminalInfo.put("S", new String[][]{{result.right, "VVFIN", "VVIMP"}, {"right", "VP","CVP"}, { "right", "VMFIN", "VAFIN", "VAIMP"}, {"right", "S","CS"}});
result.nonTerminalInfo.put("VP", new String[][]{{"right","VVINF","VVIZU","VVPP"}, {result.right, "VZ", "VAINF", "VMINF", "VMPP", "VAPP", "PP"}});
result.nonTerminalInfo.put("VZ", new String[][]{{result.right,"VVINF","VAINF","VMINF","VVFIN","VVIZU"}}); // note that VZ < VVIZU is very rare, maybe shouldn't even exist.
return result;
}
示例10: main
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
public static void main(String[] args) {
// simple testing code
Treebank treebank = new DiskTreebank();
CategoryWordTag.suppressTerminalDetails = true;
treebank.loadPath(args[0]);
final HeadFinder chf = new NoPunctuationHeadFinder();
treebank.apply(new TreeVisitor() {
public void visitTree(Tree pt) {
pt.percolateHeads(chf);
pt.pennPrint();
System.out.println();
}
});
}
示例11: constructConstituent
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
*
* @param root
* @param left
* @param right
* @param n
* is the length of the sentence is tokens.
* @param p
* @param tokenizationUUID
* @return The constituent ID
* @throws AnalyticException
*/
private static int constructConstituent(Tree root, int left,
int right, int n, Parse p, UUID tokenizationUUID, HeadFinder hf)
throws AnalyticException {
Constituent constituent = new Constituent();
constituent.setId(p.getConstituentListSize());
constituent.setTag(root.value());
constituent.setStart(left);
constituent.setEnding(right);
p.addToConstituentList(constituent);
Tree headTree = null;
if (!root.isLeaf()) {
try {
headTree = hf.determineHead(root);
} catch (java.lang.IllegalArgumentException iae) {
LOGGER.warn("Failed to find head, falling back on rightmost constituent.", iae);
headTree = root.children()[root.numChildren() - 1];
}
}
int i = 0, headTreeIdx = -1;
int leftPtr = left;
for (Tree child : root.getChildrenAsList()) {
int width = child.getLeaves().size();
int childId = constructConstituent(child, leftPtr, leftPtr
+ width, n, p, tokenizationUUID, hf);
constituent.addToChildList(childId);
leftPtr += width;
if (headTree != null && child == headTree) {
assert (headTreeIdx < 0);
headTreeIdx = i;
}
i++;
}
if (headTreeIdx >= 0)
constituent.setHeadChildIndex(headTreeIdx);
if (!constituent.isSetChildList())
constituent.setChildList(new ArrayList<Integer>());
return constituent.getId();
}
示例12: headFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/** {@inheritDoc} */
public HeadFinder headFinder() {
return new TueBaDZHeadFinder();
}
示例13: headFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/** {@inheritDoc} */
public HeadFinder headFinder() {
return new TigerHeadFinder(this);
}
示例14: setHeadFinder
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
static void setHeadFinder(HeadFinder hf) {
headFinder = hf;
}
示例15: main
import edu.stanford.nlp.trees.HeadFinder; //导入依赖的package包/类
/**
* For testing.
*/
public static void main(String[] args) {
if (args.length < 2) {
System.err.println("usage: Relation treebank numberRanges");
return;
}
FileFilter testFilt = new NumberRangesFileFilter(args[1], true);
TreeReaderFactory trf = new PennTreeReaderFactory(new NPTmpRetainingTreeNormalizer());
DiskTreebank testTreebank = new DiskTreebank(trf);
testTreebank.loadPath(new File(args[0]), testFilt);
HeadFinder hf = new ModCollinsHeadFinder();
List<Relation> relations = new ArrayList<Relation>();
relations.addAll(Arrays.asList(SIMPLE_RELATIONS));
relations.add(new HasIthChild(2));
relations.add(new HasIthChild(-1));
relations.add(new IthChildOf(1));
relations.add(new IthChildOf(-2));
relations.add(new HeadedBy(hf));
relations.add(new Heads(hf));
relations.add(new ImmediatelyHeadedBy(hf));
relations.add(new ImmediatelyHeads(hf));
relations.add(new UnbrokenCategoryDominates("NP"));
relations.add(new UnbrokenCategoryDominates("VP"));
relations.add(new UnbrokenCategoryIsDominatedBy("NP"));
relations.add(new UnbrokenCategoryIsDominatedBy("VP"));
int trees = 0, subtrees = 0;
for (Tree root : testTreebank) {
for (Tree tree : root.subTrees()) {
boolean error = false;
for (Relation relation : relations) {
error = error || relation.testRelation(tree, root);
}
if (error) {
System.err.println("Tree: ");
root.pennPrint(System.err);
System.err.println();
System.err.println("SubTree: ");
tree.pennPrint(System.err);
System.err.println();
System.exit(0);
}
subtrees++;
}
trees++;
}
System.out.println("Tested all relations on " + subtrees
+ " subtrees in " + trees + " trees with no errors.");
}