本文整理汇总了Java中edu.stanford.nlp.util.Generics.newArrayList方法的典型用法代码示例。如果您正苦于以下问题:Java Generics.newArrayList方法的具体用法?Java Generics.newArrayList怎么用?Java Generics.newArrayList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Generics
的用法示例。
在下文中一共展示了Generics.newArrayList方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeps
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* The constructor builds a list of typed dependencies using
* information from a <code>GrammaticalStructure</code>.
*
* @param getExtra If true, the list of typed dependencies will contain extra ones.
* If false, the list of typed dependencies will respect the tree structure.
*/
private List<TypedDependency> getDeps(boolean getExtra, Filter<TypedDependency> f) {
List<TypedDependency> basicDep = Generics.newArrayList();
for (Dependency<Label, Label, Object> d : dependencies()) {
TreeGraphNode gov = (TreeGraphNode) d.governor();
TreeGraphNode dep = (TreeGraphNode) d.dependent();
//System.out.println("Gov: " + gov);
//System.out.println("Dep: " + dep);
GrammaticalRelation reln = getGrammaticalRelation(gov, dep);
//System.out.println("Reln: " + reln);
basicDep.add(new TypedDependency(reln, gov, dep));
}
if (getExtra) {
TreeGraphNode rootTree = root();
getDep(rootTree, basicDep, f); // adds stuff to basicDep
}
Collections.sort(basicDep);
return basicDep;
}
示例2: UCPtransform
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Transforms t if it contains an UCP, it will change the UCP tag
* into the phrasal tag of the first word of the UCP
* (UCP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
* will become
* (ADJP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
*
* @param t a tree to be transformed
* @return t transformed
*/
public static Tree UCPtransform(Tree t) {
Tree firstChild = t.firstChild();
if (firstChild != null) {
List<Pair<TregexPattern,TsurgeonPattern>> ops = Generics.newArrayList();
for (int i = 0; i < operations.length; i++) {
for (TregexPattern pattern : matchPatterns[i]) {
ops.add(Generics.newPair(pattern, operations[i]));
}
}
return Tsurgeon.processPatternsOnTree(ops, t);
} else {
return t;
}
}
示例3: UCPtransform
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Transforms t if it contains an UCP, it will change the UCP tag
* into the phrasal tag of the first word of the UCP
* (UCP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
* will become
* (ADJP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
*
* @param t a tree to be transformed
* @return t transformed
*/
public static Tree UCPtransform(Tree t) {
if (t == null) {
return null;
}
Tree firstChild = t.firstChild();
if (firstChild != null) {
List<Pair<TregexPattern,TsurgeonPattern>> ops = Generics.newArrayList();
for (int i = 0; i < operations.length; i++) {
for (TregexPattern pattern : matchPatterns[i]) {
ops.add(Generics.newPair(pattern, operations[i]));
}
}
return Tsurgeon.processPatternsOnTree(ops, t);
} else {
return t;
}
}
示例4: CollocationFinder
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Construct a new {@code CollocationFinder} over the {@code Tree} t.
* @param t parse tree
* @param w wordnet connection
* @param hf {@link HeadFinder} to use
* @param threadSafe whether to include synchronization, etc.
*/
public CollocationFinder(Tree t, WordNetConnection w, HeadFinder hf, boolean threadSafe) {
CoordinationTransformer transformer = new CoordinationTransformer();
this.wnConnect = w;
this.qTree = transformer.transformTree(t);
this.collocationCollector = Generics.newArrayList();
this.hf = hf;
this.getCollocationsList(threadSafe);
if (DEBUG) {
System.err.println("Collected collocations: " + collocationCollector);
}
}
示例5: getStemmedWordTagsFromTree
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
*
* @param t a tree
* @return the WordTags corresponding to the leaves of the tree,
* stemmed according to their POS tags in the tree.
*/
private static List<WordTag> getStemmedWordTagsFromTree(Tree t, boolean threadSafe) {
List<WordTag> stemmedWordTags = Generics.newArrayList();
Sentence<TaggedWord> s = t.taggedYield();
for (TaggedWord w : s) {
WordTag wt = threadSafe ? Morphology.stemStaticSynchronized(w.word(), w.tag())
: Morphology.stemStatic(w.word(), w.tag());
stemmedWordTags.add(wt);
}
return stemmedWordTags;
}
示例6: getNonStemmedWordTagsFromTree
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
private static List<WordTag> getNonStemmedWordTagsFromTree(Tree t) {
List<WordTag> wordTags = Generics.newArrayList();
Sentence<TaggedWord> s = t.taggedYield();
for (TaggedWord w : s) {
WordTag wt = new WordTag(w.word(), w.tag());
wordTags.add(wt);
}
return wordTags;
}
示例7: getNonStemmedWordTagsFromTree
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
private static List<WordTag> getNonStemmedWordTagsFromTree(Tree t) {
List<WordTag> wordTags = Generics.newArrayList();
ArrayList<TaggedWord> s = t.taggedYield();
for (TaggedWord w : s) {
WordTag wt = new WordTag(w.word(), w.tag());
wordTags.add(wt);
}
return wordTags;
}
示例8: process
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
@Override
public List<List<IN>> process(List<? extends IN> words) {
if (isOneSentence) {
List<List<IN>> sentences = Generics.newArrayList();
sentences.add(new ArrayList<IN>(words));
return sentences;
} else {
return wordsToSentences(words);
}
}
示例9: getParentsWithReln
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Returns a list of all parents bearing a certain grammatical relation, or an
* empty list if none.
*/
public List<IndexedWord> getParentsWithReln(IndexedWord vertex, GrammaticalRelation reln) {
if (vertex.equals(IndexedWord.NO_WORD))
return new ArrayList<IndexedWord>();
if (!vertexSet().contains(vertex))
throw new IllegalArgumentException();
List<IndexedWord> parentList = Generics.newArrayList();
for (SemanticGraphEdge edge : incomingEdgeIterable(vertex)) {
if (edge.getRelation().equals(reln)) {
parentList.add(edge.getSource());
}
}
return parentList;
}
示例10: process
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public List<List<IN>> process(List<? extends IN> words) {
if (isOneSentence) {
List<List<IN>> sentences = Generics.newArrayList();
sentences.add(new ArrayList<IN>(words));
return sentences;
} else {
return wordsToSentences(words);
}
}
示例11: getStemmedWordTagsFromTree
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
*
* @param t a tree
* @return the WordTags corresponding to the leaves of the tree,
* stemmed according to their POS tags in the tree.
*/
private static List<WordTag> getStemmedWordTagsFromTree(Tree t, boolean threadSafe) {
List<WordTag> stemmedWordTags = Generics.newArrayList();
ArrayList<TaggedWord> s = t.taggedYield();
for (TaggedWord w : s) {
WordTag wt = threadSafe ? Morphology.stemStaticSynchronized(w.word(), w.tag())
: Morphology.stemStatic(w.word(), w.tag());
stemmedWordTags.add(wt);
}
return stemmedWordTags;
}
示例12: toEnUncollapsedSentenceString
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* Similar to <code>toRecoveredString</code>, but will fill in words that were
* collapsed into relations (i.e. prep_for --> 'for'). Mostly to deal with
* collapsed dependency trees.
*
* TODO: consider merging with toRecoveredString() NOTE: assumptions currently
* are for English. NOTE: currently takes immediate successors to current word
* and expands them. This assumption may not be valid for other conditions or
* languages?
*/
public String toEnUncollapsedSentenceString() {
List<IndexedWord> uncompressedList = Generics.newLinkedList(vertexSet());
List<Pair<String, IndexedWord>> specifics = Generics.newArrayList();
// Collect the specific relations and the governed nodes, and then process
// them one by one,
// to avoid concurrent modification exceptions.
for (IndexedWord word : vertexSet()) {
for (SemanticGraphEdge edge : getIncomingEdgesSorted(word)) {
GrammaticalRelation relation = edge.getRelation();
// Extract the specific: need to account for possiblity that relation
// can
// be a String or GrammaticalRelation (how did it happen this way?)
String specific = relation.getSpecific();
if (specific == null) {
if (edge.getRelation().equals(EnglishGrammaticalRelations.AGENT)) {
specific = "by";
}
}
// Insert the specific at the leftmost token that is not governed by
// this node.
if (specific != null) {
Pair<String, IndexedWord> specPair = new Pair<String, IndexedWord>(specific, word);
specifics.add(specPair);
}
}
}
for (Pair<String, IndexedWord> tuple : specifics) {
insertSpecificIntoList(tuple.first(), tuple.second(), uncompressedList);
}
return StringUtils.join(uncompressedList, " ");
}
示例13: stripEmptyNode
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
protected static Tree stripEmptyNode(Tree t) {
List<Pair<TregexPattern, TsurgeonPattern>> ops = Generics.newArrayList();
ops.add(Generics.newPair(matchPattern, operation));
return Tsurgeon.processPatternsOnTree(ops, t);
}
示例14: getDeps
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* The constructor builds a list of typed dependencies using
* information from a <code>GrammaticalStructure</code>.
*
* @param getExtra If true, the list of typed dependencies will contain extra ones.
* If false, the list of typed dependencies will respect the tree structure.
*/
private List<TypedDependency> getDeps(boolean getExtra, Filter<TypedDependency> puncTypedDepFilter) {
List<TypedDependency> basicDep = Generics.newArrayList();
for (Dependency<Label, Label, Object> d : dependencies()) {
TreeGraphNode gov = (TreeGraphNode) d.governor();
TreeGraphNode dep = (TreeGraphNode) d.dependent();
//System.out.println("Gov: " + gov);
//System.out.println("Dep: " + dep);
GrammaticalRelation reln = getGrammaticalRelation(gov, dep);
//System.out.println("Reln: " + reln);
basicDep.add(new TypedDependency(reln, gov, dep));
}
// add the root
TreeGraphNode dependencyRoot = new TreeGraphNode(new Word("ROOT"));
dependencyRoot.setIndex(0);
TreeGraphNode rootDep = null;
Collection<TypedDependency> roots = getRoots(basicDep);
if (roots.size() == 0) {
// This can happen if the sentence has only one non-punctuation
// word. In that case, we still want to add the root->word
// dependency, but we won't find any roots using the getRoots()
// method. Instead we use the HeadFinder and the tree.
List<Tree> leaves = Trees.leaves(root());
if (leaves.size() > 0) {
Tree leaf = leaves.get(0);
if (!(leaf instanceof TreeGraphNode)) {
throw new AssertionError("Leaves should be TreeGraphNodes");
}
rootDep = (TreeGraphNode) leaf;
if (rootDep.headWordNode() != null) {
rootDep = rootDep.headWordNode();
}
}
} else {
// since roots.size() > 0, there must be at least one element
Iterator<TypedDependency> iterator = roots.iterator();
rootDep = iterator.next().gov();
}
if (rootDep != null) {
TypedDependency rootTypedDep =
new TypedDependency(ROOT, dependencyRoot, rootDep);
if (puncTypedDepFilter.accept(rootTypedDep)) {
basicDep.add(rootTypedDep);
}
}
postProcessDependencies(basicDep);
if (getExtra) {
getExtras(basicDep);
// adds stuff to basicDep based on the tregex patterns over the tree
getTreeDeps(root(), basicDep, puncTypedDepFilter, extraTreeDepFilter());
}
Collections.sort(basicDep);
return basicDep;
}
示例15: getDeps
import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
* The constructor builds a list of typed dependencies using
* information from a <code>GrammaticalStructure</code>.
*
* @param getExtra If true, the list of typed dependencies will contain extra ones.
* If false, the list of typed dependencies will respect the tree structure.
*/
private List<TypedDependency> getDeps(boolean getExtra, Filter<TypedDependency> f) {
List<TypedDependency> basicDep = Generics.newArrayList();
for (Dependency<Label, Label, Object> d : dependencies()) {
TreeGraphNode gov = (TreeGraphNode) d.governor();
TreeGraphNode dep = (TreeGraphNode) d.dependent();
//System.out.println("Gov: " + gov);
//System.out.println("Dep: " + dep);
GrammaticalRelation reln = getGrammaticalRelation(gov, dep);
//System.out.println("Reln: " + reln);
basicDep.add(new TypedDependency(reln, gov, dep));
}
// add the root
TreeGraphNode dependencyRoot = new TreeGraphNode(new Word("ROOT"));
dependencyRoot.setIndex(0);
TreeGraphNode rootDep = null;
Collection<TypedDependency> roots = getRoots(basicDep);
if (roots.size() == 0) {
// This can happen if the sentence has only one non-punctuation
// word. In that case, we still want to add the root->word
// dependency, but we won't find any roots using the getRoots()
// method. Instead we use the HeadFinder and the tree.
List<Tree> leaves = Trees.leaves(root());
if (leaves.size() > 0) {
Tree leaf = leaves.get(0);
if (!(leaf instanceof TreeGraphNode)) {
throw new AssertionError("Leaves should be TreeGraphNodes");
}
rootDep = (TreeGraphNode) leaf;
if (rootDep.headWordNode() != null) {
rootDep = rootDep.headWordNode();
}
}
} else {
// since roots.size() > 0, there must be at least one element
Iterator<TypedDependency> iterator = roots.iterator();
rootDep = iterator.next().gov();
}
if (rootDep != null) {
TypedDependency rootTypedDep =
new TypedDependency(ROOT, dependencyRoot, rootDep);
if (f.accept(rootTypedDep)) {
basicDep.add(rootTypedDep);
}
}
if (getExtra) {
TreeGraphNode rootTree = root();
getDep(rootTree, basicDep, f); // adds stuff to basicDep
}
Collections.sort(basicDep);
return basicDep;
}