本文整理汇总了Java中edu.stanford.nlp.util.Pair.first方法的典型用法代码示例。如果您正苦于以下问题:Java Pair.first方法的具体用法?Java Pair.first怎么用?Java Pair.first使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Pair
的用法示例。
在下文中一共展示了Pair.first方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: train
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public void train(Collection data) {
for (Iterator i = data.iterator(); i.hasNext();) {
Pair p = (Pair) i.next();
Object seen = p.first();
Object hidden = p.second();
if (!hiddenToSeen.keySet().contains(hidden)) {
hiddenToSeen.put(hidden, new ClassicCounter());
}
hiddenToSeen.get(hidden).incrementCount(seen);
if (!seenToHidden.keySet().contains(seen)) {
seenToHidden.put(seen, new ClassicCounter());
}
seenToHidden.get(seen).incrementCount(hidden);
}
}
示例2: processPatternsOnTree
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public static Tree processPatternsOnTree(List<Pair<TregexPattern, TsurgeonPattern>> ops, Tree t) {
matchedOnTree = false;
for (Pair<TregexPattern,TsurgeonPattern> op : ops) {
try {
TregexMatcher m = op.first().matcher(t);
while (m.find()) {
matchedOnTree = true;
t = op.second().evaluate(t,m);
if (t == null) {
return null;
}
m = op.first().matcher(t);
}
} catch (NullPointerException npe) {
throw new RuntimeException("Tsurgeon.processPatternsOnTree failed to match label for pattern: " + op.first() + ", " + op.second(), npe);
}
}
return t;
}
示例3: eval
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* @param guesses Collection of guessed objects
* @param golds Collection of gold-standard objects
* @param pw {@link PrintWriter} to print eval stats
*/
public void eval(Collection<IN> guesses, Collection<IN> golds, PrintWriter pw) {
if (verbose) {
System.out.println("evaluating precision...");
}
Pair<ClassicCounter<OUT>, ClassicCounter<OUT>> precision = evalPrecision(guesses, golds);
previousGuessed = precision.first();
Counters.addInPlace(guessed, previousGuessed);
previousGuessedCorrect = precision.second();
Counters.addInPlace(guessedCorrect, previousGuessedCorrect);
if (verbose) {
System.out.println("evaluating recall...");
}
Pair<ClassicCounter<OUT>, ClassicCounter<OUT>> recall = evalPrecision(golds, guesses);
previousGold = recall.first();
Counters.addInPlace(gold, previousGold);
previousGoldCorrect = recall.second();
Counters.addInPlace(goldCorrect, previousGoldCorrect);
}
示例4: processPatternsOnTree
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public static Tree processPatternsOnTree(List<Pair<TregexPattern, TsurgeonPattern>> ops, Tree t) {
matchedOnTree = false;
for (Pair<TregexPattern,TsurgeonPattern> op : ops) {
try {
if (DEBUG) {
System.err.println("Running pattern " + op.first());
}
TregexMatcher m = op.first().matcher(t);
while (m.find()) {
matchedOnTree = true;
t = op.second().evaluate(t,m);
if (t == null) {
return null;
}
m = op.first().matcher(t);
}
} catch (NullPointerException npe) {
throw new RuntimeException("Tsurgeon.processPatternsOnTree failed to match label for pattern: " + op.first() + ", " + op.second(), npe);
}
}
return t;
}
示例5: makeVertex
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
private IndexedWord makeVertex(String word) {
Integer index; // initialized below
Pair<String, Integer> wordAndIndex = readWordAndIndex(word);
if (wordAndIndex != null) {
word = wordAndIndex.first();
index = wordAndIndex.second();
} else {
index = getNextFreeIndex();
}
indexesUsed.add(index);
// Note that, despite the use of indexesUsed and getNextFreeIndex(),
// nothing is actually enforcing that no indexes are used twice. This
// could occur if some words in the string representation being parsed
// come with index markers and some do not.
IndexedWord ifl = new IndexedWord(null, 0, index);
// System.err.println("SemanticGraphParsingTask>>> word = " + word);
// System.err.println("SemanticGraphParsingTask>>> index = " + index);
// System.err.println("SemanticGraphParsingTask>>> indexesUsed = " +
// indexesUsed);
String[] wordAndTag = word.split("/");
ifl.set(TextAnnotation.class, wordAndTag[0]);
if (wordAndTag.length > 1)
ifl.set(PartOfSpeechAnnotation.class, wordAndTag[1]);
return ifl;
}
示例6: ISODateInstance
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Construct a new ISODate based on its relation to a referenceDate.
* relativeDate should be something like "today" or "tomorrow" or "last year"
* and the resulting ISODate will be the same as the referenceDate, a day later,
* or a year earlier, respectively.
*
*/
public ISODateInstance(ISODateInstance referenceDate, String relativeDate) {
Pair<DateField, Integer> relation = relativeDateMap.get(relativeDate.toLowerCase());
if (relation != null) {
switch (relation.first()) {
case DAY:
incrementDay(referenceDate, relation);
break;
case MONTH:
incrementMonth(referenceDate, relation);
break;
case YEAR:
incrementYear(referenceDate, relation);
break;
}
}
}
示例7: copy
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Copies the Auxiliary tree. Also, puts the new names->nodes map in the TsurgeonPattern that called copy.
*/
public AuxiliaryTree copy(TsurgeonPattern p) {
Map<String,Tree> newNamesToNodes = new HashMap<String,Tree>();
Pair<Tree,Tree> result = copyHelper(tree,newNamesToNodes);
//if(! result.first().dominates(result.second()))
//System.err.println("Error -- aux tree copy doesn't dominate foot copy.");
p.root.newNodeNames.putAll(newNamesToNodes);
return new AuxiliaryTree(result.first(), result.second(), newNamesToNodes, originalTreeString);
}
示例8: copy
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Copies the Auxiliary tree. Also, puts the new names->nodes map in the TsurgeonPattern that called copy.
*/
public AuxiliaryTree copy(TsurgeonPattern p) {
Map<String,Tree> newNamesToNodes = Generics.newHashMap();
Pair<Tree,Tree> result = copyHelper(tree,newNamesToNodes);
//if(! result.first().dominates(result.second()))
//System.err.println("Error -- aux tree copy doesn't dominate foot copy.");
p.root.newNodeNames.putAll(newNamesToNodes);
return new AuxiliaryTree(result.first(), result.second(), newNamesToNodes, originalTreeString);
}
示例9: samplePosition
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Samples a single position in the sequence.
* Destructively modifies the sequence in place.
* returns the score of the new sequence
* @param sequence the sequence to start with
* @param pos the position to sample.
* @param temperature the temperature to control annealing
*/
public double samplePosition(SequenceModel model, int[] sequence, int pos, double temperature) {
int oldTag = sequence[pos];
Pair<Integer, Double> newPosProb = samplePositionHelper(model, sequence, pos, temperature);
int newTag = newPosProb.first();
// System.out.println("Sampled " + oldTag + "->" + newTag);
sequence[pos] = newTag;
listener.updateSequenceElement(sequence, pos, oldTag);
return newPosProb.second();
}
示例10: train
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public void train(Collection<Pair<?,?>> data) {
for (Pair p : data) {
Object seen = p.first();
Object hidden = p.second();
if (!hiddenToSeen.keySet().contains(hidden)) {
hiddenToSeen.put(hidden, new ClassicCounter());
}
hiddenToSeen.get(hidden).incrementCount(seen);
if (!seenToHidden.keySet().contains(seen)) {
seenToHidden.put(seen, new ClassicCounter());
}
seenToHidden.get(seen).incrementCount(hidden);
}
}
示例11: next
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState> next()
{
if (iter == kFold) return null;
int start = originalTrainData.size() * iter / kFold;
int end = originalTrainData.size() * (iter + 1) / kFold;
//System.err.println("##train data size: " + originalTrainData.size() + " start " + start + " end " + end);
Pair<GeneralDataset<L, F>, GeneralDataset<L, F>> split = originalTrainData.split(start, end);
return new Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState>(split.first(),split.second(),savedStates[iter++]);
}
示例12: reln
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Returns the relation that node a has with node b.
*
*/
public GrammaticalRelation reln(IndexedWord a, IndexedWord b) {
if (!vertexSet().contains(a)) {
throw new IllegalArgumentException();
}
List<Pair<GrammaticalRelation, IndexedWord>> pairs = childPairs(a);
for (Pair<GrammaticalRelation, IndexedWord> p : pairs)
if (p.second().equals(b))
return p.first();
return null;
}
示例13: compactGrammar
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
/**
* Compacts the grammar specified by the Pair.
*
* @param grammar a Pair of grammars, ordered UnaryGrammar BinaryGrammar.
* @param allTrainPaths a Map from String passive constituents to Lists of paths
* @param allTestPaths a Map from String passive constituents to Lists of paths
* @return a Pair of grammars, ordered UnaryGrammar BinaryGrammar.
*/
public Triple<Index<String>, UnaryGrammar, BinaryGrammar> compactGrammar(Pair<UnaryGrammar,BinaryGrammar> grammar, Map<String, List<List<String>>> allTrainPaths, Map<String, List<List<String>>> allTestPaths, Index<String> originalStateIndex) {
inputPrior = computeInputPrior(allTrainPaths); // computed once for the whole grammar
// BinaryGrammar bg = grammar.second;
this.stateIndex = originalStateIndex;
List<List<String>> trainPaths, testPaths;
Set<UnaryRule> unaryRules = new HashSet<UnaryRule>();
Set<BinaryRule> binaryRules = new HashSet<BinaryRule>();
Map<String, TransducerGraph> graphs = convertGrammarToGraphs(grammar, unaryRules, binaryRules);
compactedGraphs = new HashSet<TransducerGraph>();
if (verbose) {
System.out.println("There are " + graphs.size() + " categories to compact.");
}
int i = 0;
for (Iterator<Entry<String, TransducerGraph>> graphIter = graphs.entrySet().iterator(); graphIter.hasNext();) {
Map.Entry<String, TransducerGraph> entry = graphIter.next();
String cat = entry.getKey();
TransducerGraph graph = entry.getValue();
if (verbose) {
System.out.println("About to compact grammar for " + cat + " with numNodes=" + graph.getNodes().size());
}
trainPaths = allTrainPaths.remove(cat);// to save memory
if (trainPaths == null) {
trainPaths = new ArrayList<List<String>>();
}
testPaths = allTestPaths.remove(cat);// to save memory
if (testPaths == null) {
testPaths = new ArrayList<List<String>>();
}
TransducerGraph compactedGraph = doCompaction(graph, trainPaths, testPaths);
i++;
if (verbose) {
System.out.println(i + ". Compacted grammar for " + cat + " from " + graph.getArcs().size() + " arcs to " + compactedGraph.getArcs().size() + " arcs.");
}
graphIter.remove(); // to save memory, remove the last thing
compactedGraphs.add(compactedGraph);
}
Pair<UnaryGrammar, BinaryGrammar> ugbg = convertGraphsToGrammar(compactedGraphs, unaryRules, binaryRules);
return new Triple<Index<String>, UnaryGrammar, BinaryGrammar>(newStateIndex, ugbg.first(), ugbg.second());
}
示例14: CaseExpression
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public CaseExpression(List<Pair<Expression,Expression>> conds, Expression elseExpr) {
if (conds.size() == 0) {
throw new IllegalArgumentException("No conditions!");
} else {
expr = elseExpr;
for (int i = conds.size()-1; i>=0; i--) {
Pair<Expression,Expression> p = conds.get(i);
expr = new IfExpression(p.first(), p.second(), expr);
}
}
}
示例15: analyze
import edu.stanford.nlp.util.Pair; //导入方法依赖的package包/类
public TextAnalyzer analyze() {
// Stanford CoreNLP, avoid lemmatization as it's very slow to use Porter2 stemming
// instead. (Porter -> Snowball (Porter2) -> Lancaster is order of stemming
// aggressiveness.
//
// other ideas
// - remove top 10k most common english words
Properties props = new Properties();
props.put("annotators", "tokenize, ssplit, stopword");
props.setProperty("customAnnotatorClass.stopword", "com.asimihsan.handytrowel.nlp.StopwordAnnotator");
List<String> stopWords = null;
try {
stopWords = WordReader.wordReaderWithResourcePath("/nlp/top1000words.txt").getWords();
} catch (IOException e) {
e.printStackTrace();
return this;
}
String customStopWordList = Joiner.on(",").join(stopWords);
props.setProperty(StopwordAnnotator.STOPWORDS_LIST, customStopWordList);
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
Annotation document = new Annotation(body);
pipeline.annotate(document);
List<CoreLabel> inputTokens = document.get(CoreAnnotations.TokensAnnotation.class);
SnowballStemmer stemmer = new englishStemmer();
for (CoreLabel token : inputTokens) {
Pair<Boolean, Boolean> stopword = token.get(StopwordAnnotator.class);
if (stopword.first())
continue;
String word = token.word().toLowerCase();
//!!AI TODO this sucks, should make another annotator and make it optional etc.
//also we're matching full stops! so we lose sentence information.
if (punctuation.matcher(word).matches())
continue;
//!AI TODO again this would be its own annotator and optional
word = number.matcher(word).replaceAll("NUMBER");
stemmer.setCurrent(word);
stemmer.stem();
word = stemmer.getCurrent();
tokens.add(word);
}
return this;
}