本文整理汇总了Java中edu.stanford.nlp.util.IntPair类的典型用法代码示例。如果您正苦于以下问题:Java IntPair类的具体用法?Java IntPair怎么用?Java IntPair使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
IntPair类属于edu.stanford.nlp.util包,在下文中一共展示了IntPair类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getBins
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
public static List<Integer> getBins(CoverageSet cs) {
IntPair previousSeg, currentSeg = null;
List<Integer> bins = new ArrayList<Integer>();
// Compute gap sizes:
for (Iterator<IntPair> it = cs.getSegmentIterator(); it.hasNext();) {
previousSeg = currentSeg;
currentSeg = it.next();
// System.err.printf("seg: %d-%d\n", currentSeg.getSource(),
// currentSeg.getTarget());
if (previousSeg != null) {
int s = currentSeg.getSource() - previousSeg.getTarget() - 1;
bins.add(sizeToBin(s));
}
}
return bins;
}
示例2: getSegmentIterator
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
public Iterator<IntPair> getSegmentIterator() {
return new Iterator<IntPair>() {
int idx = nextSetBit(0);
@Override
public boolean hasNext() {
return idx >= 0;
}
@Override
public IntPair next() {
int startIdx = idx;
int endIdx = nextClearBit(idx);
this.idx = nextSetBit(endIdx);
return new IntPair(startIdx, endIdx - 1);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
示例3: constituentsNodes
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/**
* Same as int constituents but just puts the span as an IntPair
* in the CoreLabel of the nodes.
*
* @param left The left position to begin labeling from
* @return The index of the right frontier of the constituent
*/
private int constituentsNodes(int left) {
if (isPreTerminal()) {
if (label() instanceof CoreLabel) {
((CoreLabel) label()).set(CoreAnnotations.SpanAnnotation.class, new IntPair(left, left));
} else {
throw new UnsupportedOperationException("Can only set spans on trees which use CoreLabel");
}
return (left + 1);
}
int position = left;
// enumerate through daughter trees
Tree[] kids = children();
for (Tree kid : kids)
position = kid.constituentsNodes(position);
//Parent span
if (label() instanceof CoreLabel) {
((CoreLabel) label()).set(CoreAnnotations.SpanAnnotation.class, new IntPair(left, position - 1));
} else {
throw new UnsupportedOperationException("Can only set spans on trees which use CoreLabel");
}
return position;
}
示例4: Feature
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/**
* @param vals a value for each (x,y) pair
*/
public Feature(Experiments e, double[][] vals, Index<IntPair> instanceIndex) {
this.instanceIndex = instanceIndex;
domain = e;
int num = 0;
for (int x = 0; x < e.xSize; x++) {
for (int y = 0; y < e.ySize; y++) {
if (vals[x][y] != 0) {
num++;
}
}
}
indexedValues = new int[num];
valuesI = new double[num];
int current = 0;
for (int x = 0; x < e.xSize; x++) {
for (int y = 0; y < e.ySize; y++) {
if (vals[x][y] != 0) {
indexedValues[current] = indexOf(x, y);
valuesI[current] = vals[x][y];
current++;
}//if
}//for
}
}
示例5: extractPredictedMentions
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/** Main method of mention detection.
* Extract all NP, PRP or NE, and filter out by manually written patterns.
*/
@Override
public List<List<Mention>> extractPredictedMentions(Annotation doc, int _maxID, Dictionaries dict){
this.maxID = _maxID;
List<List<Mention>> predictedMentions = new ArrayList<List<Mention>>();
for(CoreMap s : doc.get(CoreAnnotations.SentencesAnnotation.class)) {
List<Mention> mentions = new ArrayList<Mention>();
predictedMentions.add(mentions);
Set<IntPair> mentionSpanSet = Generics.newHashSet();
Set<IntPair> namedEntitySpanSet = Generics.newHashSet();
extractNamedEntityMentions(s, mentions, mentionSpanSet, namedEntitySpanSet);
extractNPorPRP(s, mentions, mentionSpanSet, namedEntitySpanSet);
extractEnumerations(s, mentions, mentionSpanSet, namedEntitySpanSet);
findHead(s, mentions);
setBarePlural(mentions);
removeSpuriousMentions(s, mentions, dict);
}
return predictedMentions;
}
示例6: extractNPorPRP
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
protected void extractNPorPRP(CoreMap s, List<Mention> mentions, Set<IntPair> mentionSpanSet, Set<IntPair> namedEntitySpanSet) {
List<CoreLabel> sent = s.get(CoreAnnotations.TokensAnnotation.class);
Tree tree = s.get(TreeCoreAnnotations.TreeAnnotation.class);
tree.indexLeaves();
SemanticGraph dependency = s.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class);
final String mentionPattern = "/^(?:NP|PRP)/";
TregexPattern tgrepPattern = TregexPattern.compile(mentionPattern);
TregexMatcher matcher = tgrepPattern.matcher(tree);
while (matcher.find()) {
Tree t = matcher.getMatch();
List<Tree> mLeaves = t.getLeaves();
int beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
int endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
IntPair mSpan = new IntPair(beginIdx, endIdx);
if(!mentionSpanSet.contains(mSpan) && !insideNE(mSpan, namedEntitySpanSet)) {
int mentionID = assignIds? ++maxID:-1;
Mention m = new Mention(mentionID, beginIdx, endIdx, dependency, new ArrayList<CoreLabel>(sent.subList(beginIdx, endIdx)), t);
mentions.add(m);
mentionSpanSet.add(mSpan);
}
}
}
示例7: constituentsNodes
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/**
* Same as int constituents but just puts the span as an IntPair
* in the CoreLabel of the nodes.
*
* @param left The left position to begin labeling from
* @return The index of the right frontier of the constituent
*/
private int constituentsNodes(int left) {
if (isPreTerminal()) {
if(label() instanceof CoreLabel)
((CoreLabel) label()).set(CoreAnnotations.SpanAnnotation.class, new IntPair(left, left));
return (left + 1);
}
int position = left;
// enumerate through daughter trees
Tree[] kids = children();
for (Tree kid : kids)
position = kid.constituentsNodes(position);
//Parent span
if(label() instanceof CoreLabel)
((CoreLabel) label()).set(CoreAnnotations.SpanAnnotation.class, new IntPair(left, position - 1));
return position;
}
示例8: Feature
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/**
* This is if we are given an array of double with a value for each training sample in the order of their occurrence.
*/
public Feature(Experiments e, double[] vals, Index<IntPair> instanceIndex) {
this.instanceIndex = instanceIndex;
Map<Integer, Double> setNonZeros = Generics.newHashMap();
for (int i = 0; i < vals.length; i++) {
if (vals[i] != 0.0) {
Integer in = Integer.valueOf(indexOf(e.get(i)[0], e.get(i)[1]));// new Integer(e.get(i)[0]*e.ySize+e.get(i)[1]);
Double oldVal = setNonZeros.put(in, Double.valueOf(vals[i]));
if (oldVal != null && oldVal.doubleValue() != vals[i]) {
throw new IllegalStateException("Incorrect function specification: Feature has two values at one point: " + oldVal + " and " + vals[i]);
}
}//if
}// for
Integer[] keys = setNonZeros.keySet().toArray(new Integer[setNonZeros.keySet().size()]);
indexedValues = new int[keys.length];
valuesI = new double[keys.length];
for (int j = 0; j < keys.length; j++) {
indexedValues[j] = keys[j].intValue();
valuesI[j] = setNonZeros.get(keys[j]).doubleValue();
} // for
domain = e;
}
示例9: constituentsNodes
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/**
* Same as int constituents but just puts the span as an IntPair
* in the CoreLabel of the nodes.
*
* @param left The left position to begin labeling from
* @return The index of the right frontier of the constituent
*/
private int constituentsNodes(int left) {
if (isPreTerminal()) {
if(label() instanceof CoreLabel)
((CoreLabel) label()).set(SpanAnnotation.class, new IntPair(left, left));
return (left + 1);
}
int position = left;
// enumerate through daughter trees
Tree[] kids = children();
for (Tree kid : kids)
position = kid.constituentsNodes(position);
//Parent span
if(label() instanceof CoreLabel)
((CoreLabel) label()).set(SpanAnnotation.class, new IntPair(left, position - 1));
return position;
}
示例10: print
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
public void print(PrintStream pf) {
for (int i = 0; i < indexedValues.length; i++) {
IntPair iP = getPair(indexedValues[i]);
int x = iP.get(0);
int y = iP.get(1);
// int y=indexedValues[i]-x*domain.ySize;
pf.println(x + ", " + y + ' ' + valuesI[i]);
}
}
示例11: createIndex
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
public Index<IntPair> createIndex() {
Index<IntPair> index = new HashIndex<IntPair>();
for (int x = 0; x < px.length; x++) {
int numberY = numY(x);
for (int y = 0; y < numberY; y++) {
index.add(new IntPair(x, y));
}
}
return index;
}
示例12: filterPredictedMentions
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/** When mention boundaries are given */
public List<List<Mention>> filterPredictedMentions(List<List<Mention>> allGoldMentions, Annotation doc, Dictionaries dict){
List<List<Mention>> predictedMentions = new ArrayList<List<Mention>>();
for(int i = 0 ; i < allGoldMentions.size(); i++){
CoreMap s = doc.get(CoreAnnotations.SentencesAnnotation.class).get(i);
List<Mention> goldMentions = allGoldMentions.get(i);
List<Mention> mentions = new ArrayList<Mention>();
predictedMentions.add(mentions);
mentions.addAll(goldMentions);
findHead(s, mentions);
// todo [cdm 2013]: This block seems to do nothing - the two sets are never used
Set<IntPair> mentionSpanSet = Generics.newHashSet();
Set<IntPair> namedEntitySpanSet = Generics.newHashSet();
for(Mention m : mentions) {
mentionSpanSet.add(new IntPair(m.startIndex, m.endIndex));
if(!m.headWord.get(CoreAnnotations.NamedEntityTagAnnotation.class).equals("O")) {
namedEntitySpanSet.add(new IntPair(m.startIndex, m.endIndex));
}
}
setBarePlural(mentions);
removeSpuriousMentions(s, mentions, dict);
}
return predictedMentions;
}
示例13: extractEnumerations
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/** Extract enumerations (A, B, and C) */
protected void extractEnumerations(CoreMap s, List<Mention> mentions, Set<IntPair> mentionSpanSet, Set<IntPair> namedEntitySpanSet){
List<CoreLabel> sent = s.get(CoreAnnotations.TokensAnnotation.class);
Tree tree = s.get(TreeCoreAnnotations.TreeAnnotation.class);
SemanticGraph dependency = s.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class);
final String mentionPattern = "NP < (/^(?:NP|NNP|NML)/=m1 $.. (/^CC|,/ $.. /^(?:NP|NNP|NML)/=m2))";
TregexPattern tgrepPattern = TregexPattern.compile(mentionPattern);
TregexMatcher matcher = tgrepPattern.matcher(tree);
Map<IntPair, Tree> spanToMentionSubTree = Generics.newHashMap();
while (matcher.find()) {
matcher.getMatch();
Tree m1 = matcher.getNode("m1");
Tree m2 = matcher.getNode("m2");
List<Tree> mLeaves = m1.getLeaves();
int beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
int endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
spanToMentionSubTree.put(new IntPair(beginIdx, endIdx), m1);
mLeaves = m2.getLeaves();
beginIdx = ((CoreLabel)mLeaves.get(0).label()).get(CoreAnnotations.IndexAnnotation.class)-1;
endIdx = ((CoreLabel)mLeaves.get(mLeaves.size()-1).label()).get(CoreAnnotations.IndexAnnotation.class);
spanToMentionSubTree.put(new IntPair(beginIdx, endIdx), m2);
}
for(IntPair mSpan : spanToMentionSubTree.keySet()){
if(!mentionSpanSet.contains(mSpan) && !insideNE(mSpan, namedEntitySpanSet)) {
int mentionID = assignIds? ++maxID:-1;
Mention m = new Mention(mentionID, mSpan.get(0), mSpan.get(1), dependency,
new ArrayList<CoreLabel>(sent.subList(mSpan.get(0), mSpan.get(1))), spanToMentionSubTree.get(mSpan));
mentions.add(m);
mentionSpanSet.add(mSpan);
}
}
}
示例14: insideNE
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
/** Check whether a mention is inside of a named entity */
private static boolean insideNE(IntPair mSpan, Set<IntPair> namedEntitySpanSet) {
for (IntPair span : namedEntitySpanSet){
if(span.get(0) <= mSpan.get(0) && mSpan.get(1) <= span.get(1)) return true;
}
return false;
}
示例15: CorefChain
import edu.stanford.nlp.util.IntPair; //导入依赖的package包/类
public CorefChain(CorefCluster c, Map<Mention, IntTuple> positions){
chainID = c.clusterID;
mentions = new ArrayList<CorefMention>();
mentionMap = Generics.newHashMap();
for (Mention m : c.getCorefMentions()) {
CorefMention men = new CorefMention(m, positions.get(m));
mentions.add(men);
IntPair position = new IntPair(men.sentNum, men.headIndex);
if(!mentionMap.containsKey(position)) mentionMap.put(position, Generics.<CorefMention>newHashSet());
mentionMap.get(position).add(men);
if(men.moreRepresentativeThan(representative)) representative = men;
}
Collections.sort(mentions, new MentionComparator());
}