本文整理汇总了Java中edu.stanford.nlp.util.Generics.newHashSet方法的典型用法代码示例。如果您正苦于以下问题:Java Generics.newHashSet方法的具体用法?Java Generics.newHashSet怎么用?Java Generics.newHashSet使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Generics
的用法示例。
在下文中一共展示了Generics.newHashSet方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: DepParseInfo
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public DepParseInfo(SemanticGraph dependencies) {
Collection<IndexedWord> rootNodes = dependencies.getRoots();
if (rootNodes.isEmpty()) {
// Shouldn't happen, but return something!
return;
}
StringBuilder sb = new StringBuilder();
Set<IndexedWord> used = Generics.newHashSet();
for (IndexedWord root : rootNodes) {
depParents.put(root.index(), 0);
depLabels.put(root.index(), "root");
sb.append("-> ").append(root).append(" (root)\n");
recToString(root, sb, 1, used, dependencies, depLabels, depParents);
}
Set<IndexedWord> nodes = Generics.newHashSet(dependencies.vertexSet());
nodes.removeAll(used);
while (!nodes.isEmpty()) {
IndexedWord node = nodes.iterator().next();
sb.append(node).append("\n");
recToString(node, sb, 1, used, dependencies, depLabels, depParents);
nodes.removeAll(used);
}
}
示例2: getArc
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Slow implementation.
*/
public Arc getArc(Object source, Object target) {
Set arcsFromSource = arcsBySource.get(source);
Set arcsToTarget = arcsByTarget.get(target);
HashSet result = Generics.newHashSet();
result.addAll(arcsFromSource);
result.retainAll(arcsToTarget); // intersection
if (result.size() < 1) {
return null;
}
if (result.size() > 1) {
throw new RuntimeException("Problem in TransducerGraph data structures.");
}
// get the only member
Iterator iterator = result.iterator();
return (Arc) iterator.next();
}
示例3: arcLabelsToNode
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Finds all arcs between this node and <code>destNode</code>,
* and returns the <code>Set</code> of <code>Object</code>s which
* label those arcs. If no such arcs exist, returns an empty
* <code>Set</code>.
*
* @param destNode the destination node
* @return the <code>Set</code> of <code>Object</code>s which
* label arcs between this node and <code>destNode</code>
*/
public Set<Class<? extends GrammaticalRelationAnnotation>> arcLabelsToNode(TreeGraphNode destNode) {
Set<Class<? extends GrammaticalRelationAnnotation>> arcLabels = Generics.newHashSet();
CyclicCoreLabel cl = label();
for (Iterator<Class<?>> it = cl.keySet().iterator(); it.hasNext();) {
Class<? extends CoreAnnotation> key = (Class<? extends CoreAnnotation>) it.next();//javac doesn't compile properly if generics are fully specified (but eclipse does...)
Object val = cl.get(key);
if (val != null && val instanceof Set) {
if (((Set) val).contains(destNode)) {
if (key != null) {
arcLabels.add((Class<? extends GrammaticalRelationAnnotation>) key);
}
}
}
}
return arcLabels;
}
示例4: findDocType
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/** Find document type: Conversation or article */
private DocType findDocType(Dictionaries dict) {
boolean speakerChange = false;
Set<Integer> discourseWithIorYou = Generics.newHashSet();
for(CoreMap sent : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
for(CoreLabel w : sent.get(CoreAnnotations.TokensAnnotation.class)) {
int utterIndex = w.get(CoreAnnotations.UtteranceAnnotation.class);
if(utterIndex!=0) speakerChange = true;
if(speakerChange && utterIndex==0) return DocType.ARTICLE;
if(dict.firstPersonPronouns.contains(w.get(CoreAnnotations.TextAnnotation.class).toLowerCase())
|| dict.secondPersonPronouns.contains(w.get(CoreAnnotations.TextAnnotation.class).toLowerCase())) {
discourseWithIorYou.add(utterIndex);
}
if(maxUtter < utterIndex) maxUtter = utterIndex;
}
}
if(!speakerChange) return DocType.ARTICLE;
return DocType.CONVERSATION; // in conversation, utter index keep increasing.
}
示例5: getGovMaxChains
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
private static Set<List<TypedDependency>> getGovMaxChains(Map<TreeGraphNode,List<TypedDependency>> govToDepMap, TreeGraphNode gov, int depth) {
Set<List<TypedDependency>> depLists = Generics.newHashSet();
List<TypedDependency> children = govToDepMap.get(gov);
if (depth > 0 && children != null) {
for (TypedDependency child : children) {
TreeGraphNode childNode = child.dep();
if (childNode == null) continue;
Set<List<TypedDependency>> childDepLists = getGovMaxChains(govToDepMap, childNode, depth-1);
if (childDepLists.size() != 0) {
for (List<TypedDependency> childDepList : childDepLists) {
List<TypedDependency> depList = new ArrayList<TypedDependency>(childDepList.size() + 1);
depList.add(child);
depList.addAll(childDepList);
depLists.add(depList);
}
} else {
depLists.add(Arrays.asList(child));
}
}
}
return depLists;
}
示例6: printFullFeatureMatrix
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* prints the full feature matrix in tab-delimited form. These can be BIG
* matrices, so be careful! [Can also use printFullFeatureMatrixWithValues]
*/
public void printFullFeatureMatrix(PrintWriter pw) {
String sep = "\t";
for (int i = 0; i < featureIndex.size(); i++) {
pw.print(sep + featureIndex.get(i));
}
pw.println();
for (int i = 0; i < labels.length; i++) {
pw.print(labelIndex.get(i));
Set<Integer> feats = Generics.newHashSet();
for (int j = 0; j < data[i].length; j++) {
int feature = data[i][j];
feats.add(Integer.valueOf(feature));
}
for (int j = 0; j < featureIndex.size(); j++) {
if (feats.contains(Integer.valueOf(j))) {
pw.print(sep + "1");
} else {
pw.print(sep + "0");
}
}
pw.println();
}
}
示例7: requirementsSatisfied
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
@Override
public Set<Requirement> requirementsSatisfied() {
Set<Requirement> satisfied = Generics.newHashSet();
for (Annotator annotator : annotators) {
satisfied.addAll(annotator.requirementsSatisfied());
}
return satisfied;
}
示例8: dependencies
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Return a set of node-node dependencies, represented as Dependency
* objects, for the Tree.
*
* @param hf The HeadFinder to use to identify the head of constituents.
* If this is <code>null</code>, then nodes are assumed to already
* be marked with their heads.
* @return Set of dependencies (each a <code>Dependency</code>)
*/
@Override
public Set<Dependency<Label, Label, Object>> dependencies(Filter<Dependency<Label, Label, Object>> f, HeadFinder hf) {
Set<Dependency<Label, Label, Object>> deps = Generics.newHashSet();
for (Tree t : this) {
TreeGraphNode node = safeCast(t);
if (node == null || node.isLeaf() || node.children().length < 2) {
continue;
}
TreeGraphNode headWordNode;
if (hf != null) {
headWordNode = safeCast(node.headTerminal(hf));
} else {
headWordNode = node.headWordNode();
}
for (Tree k : node.children()) {
TreeGraphNode kid = safeCast(k);
if (kid == null) {
continue;
}
TreeGraphNode kidHeadWordNode;
if (hf != null) {
kidHeadWordNode = safeCast(kid.headTerminal(hf));
} else {
kidHeadWordNode = kid.headWordNode();
}
if (headWordNode != null && headWordNode != kidHeadWordNode) {
Dependency<Label, Label, Object> d = new UnnamedDependency(headWordNode, kidHeadWordNode);
if (f.accept(d)) {
deps.add(d);
}
}
}
}
return deps;
}
示例9: keysBelow
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns the set of keys whose counts are at or below the given threshold.
* This set may have 0 elements but will not be null.
*/
public static <E> Set<E> keysBelow(Counter<E> c, double countThreshold) {
Set<E> keys = Generics.newHashSet();
for (E key : c.keySet()) {
if (c.getCount(key) <= countThreshold) {
keys.add(key);
}
}
return (keys);
}
示例10: descendants
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns the set of descendants governed by this node in the graph.
*
*/
public Set<IndexedWord> descendants(IndexedWord vertex) {
if (!vertexSet().contains(vertex)) {
throw new IllegalArgumentException();
}
// Do a depth first search
Set<IndexedWord> descendantSet = Generics.newHashSet();
descendantsHelper(vertex, descendantSet);
return descendantSet;
}
示例11: getOpenTags
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns a list of all open class tags
* @return set of open tags
*/
public Set<String> getOpenTags() {
if (openTags == null) { /* cache check */
Set<String> open = Generics.newHashSet();
for (String tag : index) {
if ( ! closed.contains(tag)) {
open.add(tag);
}
}
openTags = open;
} // if
return openTags;
}
示例12: readGazette
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/** Reads a gazette file. Each line of it consists of a class name
* (a String not containing whitespace characters), followed by whitespace
* characters followed by a phrase, which is one or more tokens separated
* by a single space.
*
* @param in Where to read the gazette from
* @throws IOException If IO errors
*/
private void readGazette(BufferedReader in) throws IOException {
Pattern p = Pattern.compile("^(\\S+)\\s+(.+)$");
for (String line; (line = in.readLine()) != null; ) {
Matcher m = p.matcher(line);
if (m.matches()) {
String type = intern(m.group(1));
String phrase = m.group(2);
String[] words = phrase.split(" ");
for (int i = 0; i < words.length; i++) {
String word = intern(words[i]);
if (flags.sloppyGazette) {
Collection<String> entries = wordToGazetteEntries.get(word);
if (entries == null) {
entries = Generics.newHashSet();
wordToGazetteEntries.put(word, entries);
}
String feature = intern(type + "-GAZ" + words.length);
entries.add(feature);
feature = intern(type + "-GAZ");
entries.add(feature);
}
if (flags.cleanGazette) {
Collection<GazetteInfo> infos = wordToGazetteInfos.get(word);
if (infos == null) {
infos = Generics.newHashSet();
wordToGazetteInfos.put(word, infos);
}
GazetteInfo info = new GazetteInfo(intern(type + "-GAZ" + words.length), i, words);
infos.add(info);
info = new GazetteInfo(intern(type + "-GAZ"), i, words);
infos.add(info);
}
}
}
}
}
示例13: initializeTraining
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
@Override
public void initializeTraining(Options op, Lexicon lex,
Index<String> wordIndex,
Index<String> tagIndex, double totalTrees) {
super.initializeTraining(op, lex, wordIndex, tagIndex, totalTrees);
seenCounter = new ClassicCounter<IntTaggedWord>();;
unSeenCounter = new ClassicCounter<IntTaggedWord>();
tagHash = Generics.newHashMap();
tc = new ClassicCounter<Label>();
c = Generics.newHashMap();
seenEnd = Generics.newHashSet();
useEnd = (op.lexOptions.unknownSuffixSize > 0 &&
op.lexOptions.useUnknownWordSignatures > 0);
useFirstCap = op.lexOptions.useUnknownWordSignatures > 0;
useGT = (op.lexOptions.useUnknownWordSignatures == 0);
useFirst = false;
if (useFirst) {
System.err.println("Including first letter for unknown words.");
}
if (useFirstCap) {
System.err.println("Including whether first letter is capitalized for unknown words");
}
if (useEnd) {
System.err.println("Classing unknown word as the average of their equivalents by identity of last " + op.lexOptions.unknownSuffixSize + " letters.");
}
if (useGT) {
System.err.println("Using Good-Turing smoothing for unknown words.");
}
this.indexToStartUnkCounting = (totalTrees * op.trainOptions.fractionBeforeUnseenCounting);
this.unknownGTTrainer = (useGT) ? new UnknownGTTrainer() : null;
this.model = buildUWM();
}
示例14: localTrees
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns a set of one level <code>Tree</code>s that ares the local trees
* of the tree.
* That is, it builds a new tree that copies the mother and daughter
* nodes (but not their Labels), for each phrasal node,
* but zeroes out their children.
*
* @return A set of local tree
*/
public Set<Tree> localTrees() {
Set<Tree> set = Generics.newHashSet();
for (Tree st : this) {
if (st.isPhrasal()) {
set.add(st.localTree());
}
}
return set;
}
示例15: keysAt
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns the set of keys that have exactly the given count. This set may
* have 0 elements but will not be null.
*/
public static <E> Set<E> keysAt(Counter<E> c, double count) {
Set<E> keys = Generics.newHashSet();
for (E key : c.keySet()) {
if (c.getCount(key) == count) {
keys.add(key);
}
}
return (keys);
}