本文整理汇总了Java中edu.stanford.nlp.util.Filter.accept方法的典型用法代码示例。如果您正苦于以下问题:Java Filter.accept方法的具体用法?Java Filter.accept怎么用?Java Filter.accept使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Filter
的用法示例。
在下文中一共展示了Filter.accept方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: spliceOutHelper
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
private List<Tree> spliceOutHelper(Filter<Tree> nodeFilter, TreeFactory tf) {
// recurse over all children first
Tree[] kids = children();
List<Tree> l = new ArrayList<Tree>();
for (int i = 0; i < kids.length; i++) {
l.addAll(kids[i].spliceOutHelper(nodeFilter, tf));
}
// check if this node is being spliced out
if (nodeFilter.accept(this)) {
// no, so add our children and return
Tree t;
if ( ! l.isEmpty()) {
t = tf.newTreeNode(label(), l);
} else {
t = tf.newLeaf(label());
}
l = new ArrayList<Tree>(1);
l.add(t);
return l;
}
// we're out, so return our children
return l;
}
示例2: prune
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Creates a deep copy of the tree, where all nodes that the filter
* does not accept and all children of such nodes are pruned. If all
* of a node's children are pruned, that node is cut as well.
* A <code>Filter</code> can assume
* that it will not be called with a <code>null</code> argument.
*
* @param filter the filter to be apply
* @param tf the TreeFactory to be used to make new Tree nodes if needed
* @return a filtered copy of the tree, including the possibility of
* <code>null</code> if the root node of the tree is filtered
*/
public Tree prune(Filter<Tree> filter, TreeFactory tf) {
// is the current node to be pruned?
if ( ! filter.accept(this)) {
return null;
}
// if not, recurse over all children
List<Tree> l = new ArrayList<Tree>();
Tree[] kids = children();
for (int i = 0; i < kids.length; i++) {
Tree prunedChild = kids[i].prune(filter, tf);
if (prunedChild != null) {
l.add(prunedChild);
}
}
// and check if this node has lost all its children
if (l.isEmpty() && !(kids.length == 0)) {
return null;
}
// if we're still ok, copy the node
if (isLeaf()) {
return tf.newLeaf(label());
}
return tf.newTreeNode(label(), l);
}
示例3: constituents
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Adds the constituents derived from <code>this</code> tree to
* the ordered <code>Constituent</code> <code>Set</code>, beginning
* numbering from the second argument and returning the number of
* the right edge. The reason for the return of the right frontier
* is in order to produce bracketings recursively by threading through
* the daughters of a given tree.
*
* @param constituentsSet set of constituents to add results of bracketing
* this tree to
* @param left left position to begin labeling the bracketings with
* @param cf ConstituentFactory used to build the Constituent objects
* @param charLevel If true, compute constituents without respect to whitespace. Otherwise, preserve whitespace boundaries.
* @param filter A filter to use to decide whether or not to add a tree as a constituent.
* @return Index of right frontier of Constituent
*/
private int constituents(Set<Constituent> constituentsSet, int left, ConstituentFactory cf, boolean charLevel, Filter<Tree> filter) {
if(isPreTerminal())
return left + ((charLevel) ? firstChild().value().length() : 1);
int position = left;
// System.err.println("In bracketing trees left is " + left);
// System.err.println(" label is " + label() +
// "; num daughters: " + children().length);
Tree[] kids = children();
for (Tree kid : kids) {
position = kid.constituents(constituentsSet, position, cf, charLevel, filter);
// System.err.println(" position went to " + position);
}
if (filter == null || filter.accept(this)) {
//Compute span of entire tree at the end of recursion
constituentsSet.add(cf.newConstituent(left, position - 1, label(), score()));
}
// System.err.println(" added " + label());
return position;
}
示例4: prune
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Creates a deep copy of the tree, where all nodes that the filter
* does not accept and all children of such nodes are pruned. If all
* of a node's children are pruned, that node is cut as well.
* A <code>Filter</code> can assume
* that it will not be called with a <code>null</code> argument.
*
* @param filter the filter to be applied
* @param tf the TreeFactory to be used to make new Tree nodes if needed
* @return a filtered copy of the tree, including the possibility of
* <code>null</code> if the root node of the tree is filtered
*/
public Tree prune(Filter<Tree> filter, TreeFactory tf) {
// is the current node to be pruned?
if ( ! filter.accept(this)) {
return null;
}
// if not, recurse over all children
List<Tree> l = new ArrayList<Tree>();
Tree[] kids = children();
for (int i = 0; i < kids.length; i++) {
Tree prunedChild = kids[i].prune(filter, tf);
if (prunedChild != null) {
l.add(prunedChild);
}
}
// and check if this node has lost all its children
if (l.isEmpty() && !(kids.length == 0)) {
return null;
}
// if we're still ok, copy the node
if (isLeaf()) {
return tf.newLeaf(label());
}
return tf.newTreeNode(label(), l);
}
示例5: spliceOutHelper
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
private List<Tree> spliceOutHelper(Filter<Tree> nodeFilter, TreeFactory tf) {
// recurse over all children first
Tree[] kids = children();
List<Tree> l = new ArrayList<Tree>();
for (Tree kid : kids) {
l.addAll(kid.spliceOutHelper(nodeFilter, tf));
}
// check if this node is being spliced out
if (nodeFilter.accept(this)) {
// no, so add our children and return
Tree t;
if ( ! l.isEmpty()) {
t = tf.newTreeNode(label(), l);
} else {
t = tf.newLeaf(label());
}
l = new ArrayList<Tree>(1);
l.add(t);
return l;
}
// we're out, so return our children
return l;
}
示例6: totalIntCount
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Returns the total count for all objects in this Counter that pass the
* given Filter. Passing in a filter that always returns true is equivalent
* to calling {@link #totalCount()}.
*/
public int totalIntCount(Filter<E> filter) {
int total = 0;
for (E key : map.keySet()) {
if (filter.accept(key)) {
total += getIntCount(key);
}
}
return (total);
}
示例7: mapDependencies
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Return a set of Label-Label dependencies, represented as
* Dependency objects, for the Tree. The Labels are the ones of the leaf
* nodes of the tree, without mucking with them.
*
* @param f Dependencies are excluded for which the Dependency is not
* accepted by the Filter
* @param hf The HeadFinder to use to identify the head of constituents.
* The code assumes
* that it can use <code>headPreTerminal(hf)</code> to find a
* tag and word to make a CyclicCoreLabel.
* @return Set of dependencies (each a <code>Dependency</code> between two
* <code>CyclicCoreLabel</code>s, which each contain a tag(), word(),
* and value(), the last two of which are identical).
*/
public Set<Dependency<Label, Label, Object>> mapDependencies(Filter<Dependency<Label, Label, Object>> f, HeadFinder hf) {
if (hf == null) {
throw new IllegalArgumentException("mapDependencies: need headfinder");
}
Set<Dependency<Label, Label, Object>> deps = new HashSet<Dependency<Label, Label, Object>>();
for (Tree node : this) {
if (node.isLeaf() || node.children().length < 2) {
continue;
}
// every child with a different head (or repeated) is an argument
// Label l = node.label();
// System.err.println("doing kids of label: " + l);
//Tree hwt = node.headPreTerminal(hf);
Tree hwt = node.headTerminal(hf);
// System.err.println("have hf, found head preterm: " + hwt);
if (hwt == null) {
throw new IllegalStateException("mapDependencies: headFinder failed!");
}
for (Tree child : node.children()) {
// Label dl = child.label();
// Tree dwt = child.headPreTerminal(hf);
Tree dwt = child.headTerminal(hf);
if (dwt == null) {
throw new IllegalStateException("mapDependencies: headFinder failed!");
}
//System.err.println("kid is " + dl);
//System.err.println("transformed to " + dml.toString("value{map}"));
if (dwt != hwt) {
Dependency<Label, Label, Object> p = new UnnamedDependency(hwt.label(), dwt.label());
if (f.accept(p)) {
deps.add(p);
}
}
}
}
return deps;
}
示例8: dependencies
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Return a set of node-node dependencies, represented as Dependency
* objects, for the Tree.
*
* @param hf The HeadFinder to use to identify the head of constituents.
* If this is <code>null</code>, then nodes are assumed to already
* be marked with their heads.
* @return Set of dependencies (each a <code>Dependency</code>)
*/
@Override
public Set<Dependency<Label, Label, Object>> dependencies(Filter<Dependency<Label, Label, Object>> f, HeadFinder hf) {
Set<Dependency<Label, Label, Object>> deps = Generics.newHashSet();
for (Tree t : this) {
TreeGraphNode node = safeCast(t);
if (node == null || node.isLeaf() || node.children().length < 2) {
continue;
}
TreeGraphNode headWordNode;
if (hf != null) {
headWordNode = safeCast(node.headTerminal(hf));
} else {
headWordNode = node.headWordNode();
}
for (Tree k : node.children()) {
TreeGraphNode kid = safeCast(k);
if (kid == null) {
continue;
}
TreeGraphNode kidHeadWordNode;
if (hf != null) {
kidHeadWordNode = safeCast(kid.headTerminal(hf));
} else {
kidHeadWordNode = kid.headWordNode();
}
if (headWordNode != null && headWordNode != kidHeadWordNode) {
Dependency<Label, Label, Object> d = new UnnamedDependency(headWordNode, kidHeadWordNode);
if (f.accept(d)) {
deps.add(d);
}
}
}
}
return deps;
}
示例9: getDep
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/** Look through the tree t and adds to the List basicDep dependencies
* which aren't in it but which satisfy the filter f.
*
* @param t The tree to examine (not changed)
* @param basicDep The list of dependencies which may be augmented
* @param f Additional dependencies are added only if they pass this filter
*/
private static void getDep(TreeGraphNode t, List<TypedDependency> basicDep,
Filter<TypedDependency> f) {
if (t.numChildren() > 0) { // don't do leaves
Map<Class<? extends CoreAnnotation>, Set<TreeGraphNode>> depMap = getAllDependents(t);
for (Class<? extends CoreAnnotation> depName : depMap.keySet()) {
for (TreeGraphNode depNode : depMap.get(depName)) {
TreeGraphNode gov = t.headWordNode();
TreeGraphNode dep = depNode.headWordNode();
if (gov != dep) {
List<GrammaticalRelation> rels = getListGrammaticalRelation(t, depNode);
if (!rels.isEmpty()) {
for (GrammaticalRelation rel : rels) {
TypedDependency newDep = new TypedDependency(rel, gov, dep);
if (!basicDep.contains(newDep) && f.accept(newDep)) {
newDep.setExtra();
basicDep.add(newDep);
}
}
}
}
}
}
// now recurse into children
for (Tree kid : t.children()) {
getDep((TreeGraphNode) kid, basicDep, f);
}
}
}
示例10: mapDependencies
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Return a set of Label-Label dependencies, represented as
* Dependency objects, for the Tree. The Labels are the ones of the leaf
* nodes of the tree, without mucking with them.
*
* @param f Dependencies are excluded for which the Dependency is not
* accepted by the Filter
* @param hf The HeadFinder to use to identify the head of constituents.
* The code assumes
* that it can use <code>headPreTerminal(hf)</code> to find a
* tag and word to make a CyclicCoreLabel.
* @return Set of dependencies (each a <code>Dependency</code> between two
* <code>CyclicCoreLabel</code>s, which each contain a tag(), word(),
* and value(), the last two of which are identical).
*/
public Set<Dependency<Label, Label, Object>> mapDependencies(Filter<Dependency<Label, Label, Object>> f, HeadFinder hf) {
if (hf == null) {
throw new IllegalArgumentException("mapDependencies: need headfinder");
}
Set<Dependency<Label, Label, Object>> deps = new HashSet<Dependency<Label, Label, Object>>();
for (Tree node : this) {
if (node.isLeaf() || node.children().length < 2) {
continue;
}
// Label l = node.label();
// System.err.println("doing kids of label: " + l);
//Tree hwt = node.headPreTerminal(hf);
Tree hwt = node.headTerminal(hf);
// System.err.println("have hf, found head preterm: " + hwt);
if (hwt == null) {
throw new IllegalStateException("mapDependencies: headFinder failed!");
}
for (Tree child : node.children()) {
// Label dl = child.label();
// Tree dwt = child.headPreTerminal(hf);
Tree dwt = child.headTerminal(hf);
if (dwt == null) {
throw new IllegalStateException("mapDependencies: headFinder failed!");
}
//System.err.println("kid is " + dl);
//System.err.println("transformed to " + dml.toString("value{map}"));
if (dwt != hwt) {
Dependency<Label, Label, Object> p = new UnnamedDependency(hwt.label(), dwt.label());
if (f.accept(p)) {
deps.add(p);
}
}
}
}
return deps;
}
示例11: getTreeDeps
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/** Look through the tree t and adds to the List basicDep dependencies
* which aren't in it but which satisfy the filter puncTypedDepFilter.
*
* @param t The tree to examine (not changed)
* @param basicDep The list of dependencies which may be augmented
* @param f Additional dependencies are added only if they pass this filter
*/
private static void getTreeDeps(TreeGraphNode t, List<TypedDependency> basicDep,
Filter<TypedDependency> puncTypedDepFilter,
Filter<TypedDependency> extraTreeDepFilter) {
if (t.isPhrasal()) { // don't do leaves of POS tags (chris changed this from numChildren > 0 in 2010)
Map<Class<? extends GrammaticalRelationAnnotation>, Set<TreeGraphNode>> depMap = getAllDependents(t);
for (Class<? extends GrammaticalRelationAnnotation> depName : depMap.keySet()) {
for (TreeGraphNode depNode : depMap.get(depName)) {
TreeGraphNode gov = t.headWordNode();
TreeGraphNode dep = depNode.headWordNode();
if (gov != dep) {
List<GrammaticalRelation> rels = getListGrammaticalRelation(t, depNode);
if (!rels.isEmpty()) {
for (GrammaticalRelation rel : rels) {
TypedDependency newDep = new TypedDependency(rel, gov, dep);
if (!basicDep.contains(newDep) && puncTypedDepFilter.accept(newDep) && extraTreeDepFilter.accept(newDep)) {
newDep.setExtra();
basicDep.add(newDep);
}
}
}
}
}
}
// now recurse into children
for (Tree kid : t.children()) {
getTreeDeps((TreeGraphNode) kid, basicDep, puncTypedDepFilter, extraTreeDepFilter);
}
}
}
示例12: checkTimeExpression
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Checks time expression against list of invalid time expressions
* @param timeExpr
*/
protected boolean checkTimeExpression(TimeExpression timeExpr)
{
for (Filter<TimeExpression> filterRule:filterRules) {
if (!filterRule.accept(timeExpr)) {
return false;
}
}
return true;
}
示例13: getTreeDeps
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/** Look through the tree t and adds to the List basicDep dependencies
* which aren't in it but which satisfy the filter f.
*
* @param t The tree to examine (not changed)
* @param basicDep The list of dependencies which may be augmented
* @param f Additional dependencies are added only if they pass this filter
*/
private static void getTreeDeps(TreeGraphNode t, List<TypedDependency> basicDep,
Filter<TypedDependency> f) {
if (t.isPhrasal()) { // don't do leaves of POS tags (chris changed this from numChildren > 0 in 2010)
Map<Class<? extends GrammaticalRelationAnnotation>, Set<TreeGraphNode>> depMap = getAllDependents(t);
for (Class<? extends GrammaticalRelationAnnotation> depName : depMap.keySet()) {
for (TreeGraphNode depNode : depMap.get(depName)) {
TreeGraphNode gov = t.headWordNode();
TreeGraphNode dep = depNode.headWordNode();
if (gov != dep) {
List<GrammaticalRelation> rels = getListGrammaticalRelation(t, depNode);
if (!rels.isEmpty()) {
for (GrammaticalRelation rel : rels) {
TypedDependency newDep = new TypedDependency(rel, gov, dep);
if (!basicDep.contains(newDep) && f.accept(newDep)) {
newDep.setExtra();
basicDep.add(newDep);
}
}
}
}
}
}
// now recurse into children
for (Tree kid : t.children()) {
getTreeDeps((TreeGraphNode) kid, basicDep, f);
}
}
}
示例14: dependencies
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* Return a set of node-node dependencies, represented as Dependency
* objects, for the Tree.
*
* @param hf The HeadFinder to use to identify the head of constituents.
* If this is <code>null</code>, then nodes are assumed to already
* be marked with their heads.
* @return Set of dependencies (each a <code>Dependency</code>)
*/
public Set<Dependency<Label, Label, Object>> dependencies(Filter<Dependency<Label, Label, Object>> f, HeadFinder hf) {
Set<Dependency<Label, Label, Object>> deps = Generics.newHashSet();
for (Tree t : this) {
TreeGraphNode node = safeCast(t);
if (node == null || node.isLeaf() || node.children().length < 2) {
continue;
}
TreeGraphNode headWordNode;
if (hf != null) {
headWordNode = safeCast(node.headTerminal(hf));
} else {
headWordNode = node.headWordNode();
}
for (Tree k : node.children()) {
TreeGraphNode kid = safeCast(k);
if (kid == null) {
continue;
}
TreeGraphNode kidHeadWordNode;
if (hf != null) {
kidHeadWordNode = safeCast(kid.headTerminal(hf));
} else {
kidHeadWordNode = kid.headWordNode();
}
if (headWordNode != null && headWordNode != kidHeadWordNode && kidHeadWordNode != null) {
int headWordNodeIndex = headWordNode.index();
int kidHeadWordNodeIndex = kidHeadWordNode.index();
// If the two indices are equal, then the leaves haven't been indexed. Just return an ordinary
// UnnamedDependency. This mirrors the implementation of super.dependencies().
Dependency<Label, Label, Object> d = (headWordNodeIndex == kidHeadWordNodeIndex) ?
new UnnamedDependency(headWordNode, kidHeadWordNode) :
new UnnamedConcreteDependency(headWordNode, headWordNodeIndex, kidHeadWordNode, kidHeadWordNodeIndex);
if (f.accept(d)) {
deps.add(d);
}
}
}
}
return deps;
}
示例15: getDeps
import edu.stanford.nlp.util.Filter; //导入方法依赖的package包/类
/**
* The constructor builds a list of typed dependencies using
* information from a <code>GrammaticalStructure</code>.
*
* @param getExtra If true, the list of typed dependencies will contain extra ones.
* If false, the list of typed dependencies will respect the tree structure.
*/
private List<TypedDependency> getDeps(boolean getExtra, Filter<TypedDependency> puncTypedDepFilter) {
List<TypedDependency> basicDep = Generics.newArrayList();
for (Dependency<Label, Label, Object> d : dependencies()) {
TreeGraphNode gov = (TreeGraphNode) d.governor();
TreeGraphNode dep = (TreeGraphNode) d.dependent();
//System.out.println("Gov: " + gov);
//System.out.println("Dep: " + dep);
GrammaticalRelation reln = getGrammaticalRelation(gov, dep);
//System.out.println("Reln: " + reln);
basicDep.add(new TypedDependency(reln, gov, dep));
}
// add the root
TreeGraphNode dependencyRoot = new TreeGraphNode(new Word("ROOT"));
dependencyRoot.setIndex(0);
TreeGraphNode rootDep = null;
Collection<TypedDependency> roots = getRoots(basicDep);
if (roots.size() == 0) {
// This can happen if the sentence has only one non-punctuation
// word. In that case, we still want to add the root->word
// dependency, but we won't find any roots using the getRoots()
// method. Instead we use the HeadFinder and the tree.
List<Tree> leaves = Trees.leaves(root());
if (leaves.size() > 0) {
Tree leaf = leaves.get(0);
if (!(leaf instanceof TreeGraphNode)) {
throw new AssertionError("Leaves should be TreeGraphNodes");
}
rootDep = (TreeGraphNode) leaf;
if (rootDep.headWordNode() != null) {
rootDep = rootDep.headWordNode();
}
}
} else {
// since roots.size() > 0, there must be at least one element
Iterator<TypedDependency> iterator = roots.iterator();
rootDep = iterator.next().gov();
}
if (rootDep != null) {
TypedDependency rootTypedDep =
new TypedDependency(ROOT, dependencyRoot, rootDep);
if (puncTypedDepFilter.accept(rootTypedDep)) {
basicDep.add(rootTypedDep);
}
}
postProcessDependencies(basicDep);
if (getExtra) {
getExtras(basicDep);
// adds stuff to basicDep based on the tregex patterns over the tree
getTreeDeps(root(), basicDep, puncTypedDepFilter, extraTreeDepFilter());
}
Collections.sort(basicDep);
return basicDep;
}