本文整理汇总了Java中edu.stanford.nlp.util.Pair.second方法的典型用法代码示例。如果您正苦于以下问题:Java Pair.second方法的具体用法?Java Pair.second怎么用?Java Pair.second使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Pair
的用法示例。
在下文中一共展示了Pair.second方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: train
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public void train(Collection data) {
for (Iterator i = data.iterator(); i.hasNext();) {
Pair p = (Pair) i.next();
Object seen = p.first();
Object hidden = p.second();
if (!hiddenToSeen.keySet().contains(hidden)) {
hiddenToSeen.put(hidden, new ClassicCounter());
}
hiddenToSeen.get(hidden).incrementCount(seen);
if (!seenToHidden.keySet().contains(seen)) {
seenToHidden.put(seen, new ClassicCounter());
}
seenToHidden.get(seen).incrementCount(hidden);
}
}
示例2: processPatternsOnTree
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public static Tree processPatternsOnTree(List<Pair<TregexPattern, TsurgeonPattern>> ops, Tree t) {
matchedOnTree = false;
for (Pair<TregexPattern,TsurgeonPattern> op : ops) {
try {
TregexMatcher m = op.first().matcher(t);
while (m.find()) {
matchedOnTree = true;
t = op.second().evaluate(t,m);
if (t == null) {
return null;
}
m = op.first().matcher(t);
}
} catch (NullPointerException npe) {
throw new RuntimeException("Tsurgeon.processPatternsOnTree failed to match label for pattern: " + op.first() + ", " + op.second(), npe);
}
}
return t;
}
示例3: eval
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* @param guesses Collection of guessed objects
* @param golds Collection of gold-standard objects
* @param pw {@link PrintWriter} to print eval stats
*/
public void eval(Collection<IN> guesses, Collection<IN> golds, PrintWriter pw) {
if (verbose) {
System.out.println("evaluating precision...");
}
Pair<ClassicCounter<OUT>, ClassicCounter<OUT>> precision = evalPrecision(guesses, golds);
previousGuessed = precision.first();
Counters.addInPlace(guessed, previousGuessed);
previousGuessedCorrect = precision.second();
Counters.addInPlace(guessedCorrect, previousGuessedCorrect);
if (verbose) {
System.out.println("evaluating recall...");
}
Pair<ClassicCounter<OUT>, ClassicCounter<OUT>> recall = evalPrecision(golds, guesses);
previousGold = recall.first();
Counters.addInPlace(gold, previousGold);
previousGoldCorrect = recall.second();
Counters.addInPlace(goldCorrect, previousGoldCorrect);
}
示例4: processPatternsOnTree
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public static Tree processPatternsOnTree(List<Pair<TregexPattern, TsurgeonPattern>> ops, Tree t) {
matchedOnTree = false;
for (Pair<TregexPattern,TsurgeonPattern> op : ops) {
try {
if (DEBUG) {
System.err.println("Running pattern " + op.first());
}
TregexMatcher m = op.first().matcher(t);
while (m.find()) {
matchedOnTree = true;
t = op.second().evaluate(t,m);
if (t == null) {
return null;
}
m = op.first().matcher(t);
}
} catch (NullPointerException npe) {
throw new RuntimeException("Tsurgeon.processPatternsOnTree failed to match label for pattern: " + op.first() + ", " + op.second(), npe);
}
}
return t;
}
示例5: BisequenceEmpiricalNERPrior
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public BisequenceEmpiricalNERPrior(String backgroundSymbol, Index<String> classIndex, Index<String> tagIndex, List<IN> doc, Pair<double[][], double[][]> matrices, SeqClassifierFlags flags) {
this.flags = flags;
this.classIndex = classIndex;
this.tagIndex = tagIndex;
this.backgroundSymbolIndex = classIndex.indexOf(backgroundSymbol);
this.numClasses = classIndex.size();
this.numTags = tagIndex.size();
this.possibleValues = new int[numClasses];
for (int i=0; i<numClasses; i++) {
possibleValues[i] = i;
}
this.wordDoc = new ArrayList<String>(doc.size());
for (IN w: doc) {
wordDoc.add(w.get(CoreAnnotations.TextAnnotation.class));
}
entityMatrix = matrices.first();
subEntityMatrix = matrices.second();
}
示例6: copy
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Copies the Auxiliary tree. Also, puts the new names->nodes map in the TsurgeonPattern that called copy.
*/
public AuxiliaryTree copy(TsurgeonPattern p) {
Map<String,Tree> newNamesToNodes = new HashMap<String,Tree>();
Pair<Tree,Tree> result = copyHelper(tree,newNamesToNodes);
//if(! result.first().dominates(result.second()))
//System.err.println("Error -- aux tree copy doesn't dominate foot copy.");
p.root.newNodeNames.putAll(newNamesToNodes);
return new AuxiliaryTree(result.first(), result.second(), newNamesToNodes, originalTreeString);
}
示例7: copyHelper
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
private Pair<Tree,Tree> copyHelper(Tree node,Map<String,Tree> newNamesToNodes) {
Tree clone;
Tree newFoot = null;
if (node.isLeaf()) {
if (node == foot) { // found the foot node; pass it up.
clone = node.treeFactory().newTreeNode(node.label(),new ArrayList<Tree>(0));
newFoot = clone;
} else {
clone = node.treeFactory().newLeaf(node.label().labelFactory().newLabel(node.label()));
}
} else {
List<Tree> newChildren = new ArrayList<Tree>(node.children().length);
for (Tree child : node.children()) {
Pair<Tree,Tree> newChild = copyHelper(child,newNamesToNodes);
newChildren.add(newChild.first());
if (newChild.second() != null) {
if (newFoot != null) {
System.err.println("Error -- two feet found when copying auxiliary tree " + tree.toString() + "; using last foot found.");
}
newFoot = newChild.second();
}
}
clone = node.treeFactory().newTreeNode(node.label().labelFactory().newLabel(node.label()),newChildren);
if (nodesToNames.containsKey(node)) {
newNamesToNodes.put(nodesToNames.get(node),clone);
}
}
return new Pair<Tree,Tree>(clone,newFoot);
}
示例8: train
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public void train(Collection<Pair<?,?>> data) {
for (Pair p : data) {
Object seen = p.first();
Object hidden = p.second();
if (!hiddenToSeen.keySet().contains(hidden)) {
hiddenToSeen.put(hidden, new ClassicCounter());
}
hiddenToSeen.get(hidden).incrementCount(seen);
if (!seenToHidden.keySet().contains(seen)) {
seenToHidden.put(seen, new ClassicCounter());
}
seenToHidden.get(seen).incrementCount(hidden);
}
}
示例9: samplePosition
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Samples a single position in the sequence.
* Destructively modifies the sequence in place.
* returns the score of the new sequence
* @param sequence the sequence to start with
* @param pos the position to sample.
* @param temperature the temperature to control annealing
*/
public double samplePosition(SequenceModel model, int[] sequence, int pos, double temperature) {
int oldTag = sequence[pos];
Pair<Integer, Double> newPosProb = samplePositionHelper(model, sequence, pos, temperature);
int newTag = newPosProb.first();
// System.out.println("Sampled " + oldTag + "->" + newTag);
sequence[pos] = newTag;
listener.updateSequenceElement(sequence, pos, oldTag);
return newPosProb.second();
}
示例10: next
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState> next()
{
if (iter == kFold) return null;
int start = originalTrainData.size() * iter / kFold;
int end = originalTrainData.size() * (iter + 1) / kFold;
//System.err.println("##train data size: " + originalTrainData.size() + " start " + start + " end " + end);
Pair<GeneralDataset<L, F>, GeneralDataset<L, F>> split = originalTrainData.split(start, end);
return new Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState>(split.first(),split.second(),savedStates[iter++]);
}
示例11: EmpiricalNERPriorBIO
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public EmpiricalNERPriorBIO(String backgroundSymbol, Index<String> classIndex, Index<String> tagIndex, List<IN> doc, Pair<double[][], double[][]> matrices, SeqClassifierFlags flags) {
super(backgroundSymbol, classIndex, tagIndex, doc);
entityMatrix = matrices.first();
subEntityMatrix = matrices.second();
this.flags = flags;
ORGIndex = tagIndex.indexOf("ORG");
LOCIndex = tagIndex.indexOf("LOC");
}
示例12: copy
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Copies the Auxiliary tree. Also, puts the new names->nodes map in the TsurgeonPattern that called copy.
*/
public AuxiliaryTree copy(TsurgeonPattern p) {
Map<String,Tree> newNamesToNodes = Generics.newHashMap();
Pair<Tree,Tree> result = copyHelper(tree,newNamesToNodes);
//if(! result.first().dominates(result.second()))
//System.err.println("Error -- aux tree copy doesn't dominate foot copy.");
p.root.newNodeNames.putAll(newNamesToNodes);
return new AuxiliaryTree(result.first(), result.second(), newNamesToNodes, originalTreeString);
}
示例13: incrementMonth
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
private void incrementMonth(ISODateInstance referenceDate, Pair<DateField, Integer> relation) {
String origDateString = referenceDate.getStartDate();
String monthString = origDateString.substring(4, 6);
if (monthString.contains("*")) {
isoDate = origDateString;
return;
}
//Month is not a variable
Integer monthNum = Integer.parseInt(monthString);
//Check if we're an edge case
if (((monthNum + relation.second()) > 12) || ((monthNum + relation.second) < 1)) {
boolean decreasing = ((monthNum + relation.second) < 1);
int newMonthNum = (monthNum + relation.second()) % 12;
if (newMonthNum < 0) {
newMonthNum *= -1;
}
//Set the month appropriately
isoDate = makeStringMonthChange(origDateString, newMonthNum);
//Increment the year if possible
String yearString = origDateString.substring(0, 4);
if (!yearString.contains("*")) {
//How much we increment depends on above mod
int numYearsToIncrement = (int) Math.ceil(relation.second() / 12.0);
if (decreasing) {
isoDate = makeStringYearChange(isoDate, Integer.parseInt(yearString) - numYearsToIncrement);
} else {
isoDate = makeStringYearChange(isoDate, Integer.parseInt(yearString) + numYearsToIncrement);
}
}
} else {
isoDate = makeStringMonthChange(origDateString, (monthNum + relation.second()));
}
}
示例14: getParserDataFromTreebank
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public final ParserData getParserDataFromTreebank(Treebank trainTreebank,
GrammarCompactor compactor,
Treebank tuneTreebank) {
System.err.println("Currently " + new Date());
printOptions(true, op);
Pair<List<Tree>,List<Tree>> pair = getAnnotatedBinaryTreebankFromTreebank(trainTreebank, tuneTreebank, op);
List<Tree> binaryTrainTrees = pair.first();
List<Tree> binaryTuneTrees = pair.second();
// extract grammars
//Extractor bgExtractor = new SmoothedBinaryGrammarExtractor();
Extractor<Pair<UnaryGrammar,BinaryGrammar>> bgExtractor = new BinaryGrammarExtractor();
// Extractor lexExtractor = new LexiconExtractor();
Extractor<DependencyGrammar> dgExtractor = op.tlpParams.dependencyGrammarExtractor(op);
//TreeExtractor uwmExtractor = new UnknownWordModelExtractor(binaryTrainTrees.size());
System.err.print("Extracting PCFG...");
Pair<UnaryGrammar,BinaryGrammar> bgug = bgExtractor.extract(binaryTrainTrees);
Timing.tick("done.");
if (compactor != null) {
System.err.print("Compacting grammar...");
bgug = compactor.compactGrammar(bgug);
Timing.tick("done.");
}
System.err.print("Compiling grammar...");
BinaryGrammar bg = bgug.second;
bg.splitRules();
UnaryGrammar ug = bgug.first;
// System.err.println("\nUnary grammar built by BinaryGrammarExtractor");
// ug.writeAllData(new OutputStreamWriter(System.err));
ug.purgeRules();
// System.err.println("Unary grammar after purgeRules");
// ug.writeAllData(new OutputStreamWriter(System.err));
Timing.tick("done");
System.err.print("Extracting Lexicon...");
Lexicon lex = op.tlpParams.lex(op.lexOptions);
lex.train(binaryTrainTrees);
Timing.tick("done.");
DependencyGrammar dg = null;
if (op.doDep) {
System.err.print("Extracting Dependencies...");
dg = dgExtractor.extract(binaryTrainTrees);
// ((ChineseSimWordAvgDepGrammar)dg).setLex(lex);
Timing.tick("done.");
//System.out.println(dg);
//System.err.print("Extracting Unknown Word Model...");
//UnknownWordModel uwm = (UnknownWordModel)uwmExtractor.extract(binaryTrainTrees);
//Timing.tick("done.");
if (tuneTreebank != null) {
System.err.print("Tuning Dependency Model...");
dg.setLexicon(lex); // MG2008: needed if using PwGt model
dg.tune(binaryTuneTrees);
Timing.tick("done.");
}
}
Map<String,Numberer> numbs = Numberer.getNumberers();
System.err.println("Done training parser.");
if (Train.trainTreeFile!=null) {
try {
System.err.print("Writing out binary trees to "+ Train.trainTreeFile+"...");
IOUtils.writeObjectToFile(binaryTrainTrees, Train.trainTreeFile);
Timing.tick("done.");
} catch (Exception e) {
System.err.println("Problem writing out binary trees.");
}
}
return new ParserData(lex, bg, ug, dg, numbs, op);
}
示例15: toTiedRankCounter
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Converts a counter to tied ranks; ranks start from 1
*
* @return A counter where the count is the rank in the original counter; when values are tied, the rank is the average of the ranks of the tied values
*/
public static <E> Counter<E> toTiedRankCounter(Counter<E> c) {
Counter<E> rankCounter = new ClassicCounter<E>();
List<Pair<E, Double>> sortedList = toSortedListWithCounts(c);
int i = 0;
Iterator<Pair<E, Double>> it = sortedList.iterator();
while(it.hasNext()) {
Pair<E, Double> iEn = it.next();
double icount = iEn.second();
E iKey = iEn.first();
List<Integer> l = new ArrayList<Integer>();
List<E> keys = new ArrayList<E>();
l.add(i+1);
keys.add(iKey);
for(int j = i +1; j < sortedList.size(); j++){
Pair<E, Double> jEn = sortedList.get(j);
if( icount == jEn.second()){
l.add(j+1);
keys.add(jEn.first());
}else
break;
}
if(l.size() > 1){
double sum = 0;
for(Integer d: l)
sum += d;
double avgRank = sum/l.size();
for(int k = 0; k < l.size(); k++){
rankCounter.setCount(keys.get(k), avgRank);
if(k != l.size()-1 && it.hasNext())
it.next();
i++;
}
}else{
rankCounter.setCount(iKey, i+1);
i++;
}
}
return rankCounter;
}