本文整理汇总了Java中edu.stanford.nlp.util.Pair类的典型用法代码示例。如果您正苦于以下问题:Java Pair类的具体用法?Java Pair怎么用?Java Pair使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Pair类属于edu.stanford.nlp.util包,在下文中一共展示了Pair类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: computeTopicSimilarity
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
private List<Pair<String, Double>> computeTopicSimilarity(Concept c, int topic) {
if (simMeasures == null) {
simMeasures = new HashMap<String, ConceptSimilarityMeasure>();
simMeasures.put("topic_jaccard", new JaccardDistance());
simMeasures.put("topic_wn", new WordBasedMeasure(WNSimMeasure.RES));
simMeasures.put("topic_w2v", new WordEmbeddingDistance(EmbeddingType.WORD2VEC, 300, false));
}
String[] topicDesc = this.topicDescriptions.get(topic);
Concept dummy = new Concept(StringUtils.join(topicDesc));
dummy = NonUIMAPreprocessor.getInstance().preprocess(dummy);
List<Pair<String, Double>> scores = new ArrayList<Pair<String, Double>>();
for (String sim : simMeasures.keySet()) {
double score = Muter.callMuted(simMeasures.get(sim)::computeSimilarity, c, dummy);
scores.add(new Pair<String, Double>(sim, score));
}
return scores;
}
示例2: compute
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public void compute() {
if (!computed) {
MapBuilder mb = this.parent.getComponent(MapBuilder.class);
ArrayList<Pair<Concept, Concept>> mappedPairs = new ArrayList<Pair<Concept, Concept>>();
for (Pair<Concept, Concept> pair : pairs) {
Concept c1 = mb.getConcept(pair.first());
Concept c2 = mb.getConcept(pair.second());
if (c1 != null && c2 != null)
mappedPairs.add(new Pair<Concept, Concept>(c1, c2));
}
this.textRank.initializeFromConceptPairs(mappedPairs, counted);
this.textRank.run();
List<TermRank> termRanks = this.textRank.getTermRanks();
for (TermRank termRank : termRanks) {
this.scores.put(termRank.getStringRepresentation(), termRank.getScore());
}
computed = true;
}
}
示例3: processSentence
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public void processSentence(JCas jcas, Sentence sent) {
for (CC ca : JCasUtil.selectCovered(jcas, CC.class, sent)) {
Concept c = this.parent.getComponent(ConceptExtractor.class).getConcept(ca);
if (c != null) {
for (Concept cn : this.lastConcepts) {
this.pairs.add(new Pair<Concept, Concept>(cn, c));
}
this.lastConcepts.offer(c);
if (this.lastConcepts.size() > windowSize)
this.lastConcepts.poll();
}
}
}
示例4: interseciton2
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static Boolean interseciton2(){
List<Integer> temp = new ArrayList<Integer>();
for(int i=0;i<numberOfSentence+1;i++){
temp.add(i);
}
for(Pair<String,Pair<String,String>> L : tobeCheked){
if(DependencySentence.containsKey(L)){
temp = intersection2(temp,DependencySentence.get(L));
}
else {
return false;
}
}
if(temp.size()==0){
return false;
}
else {
return true;
}
}
示例5: classify
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
for (RelationType rel : RelationType.values()) {
if (rules.containsKey(rel) &&
rel.entityType == input.subjectType &&
rel.validNamedEntityLabels.contains(input.objectType)) {
Collection<SemgrexPattern> rulesForRel = rules.get(rel);
CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph);
boolean matches
= matches(sentence, rulesForRel, input,
sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) ||
matches(sentence, rulesForRel, input,
sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class));
if (matches) {
//logger.log("MATCH for " + rel + ". " + sentence: + sentence + " with rules for " + rel);
return Pair.makePair(rel.canonicalName, 1.0);
}
}
}
return Pair.makePair(NO_RELATION, 1.0);
}
示例6: classify
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
switch (ensembleStrategy) {
case DEFAULT:
return classifyDefault(input);
case HIGHEST_SCORE:
return classifyWithHighestScore(input);
case VOTE:
return classifyWithVote(input);
case WEIGHTED_VOTE:
return classifyWithWeightedVote(input);
case HIGH_RECALL:
return classifyWithHighRecall(input);
case HIGH_PRECISION:
return classifyWithHighPrecision(input);
default:
throw new UnsupportedClassVersionError(ensembleStrategy + " not supported");
}
}
示例7: classify
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
for (RelationType rel : RelationType.values()) {
if (rules.containsKey(rel) &&
rel.entityType == input.subjectType &&
rel.validNamedEntityLabels.contains(input.objectType)) {
Collection<SemgrexPattern> rulesForRel = rules.get(rel);
CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph);
boolean matches
= matches(sentence, rulesForRel, input,
sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) ||
matches(sentence, rulesForRel, input,
sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class));
if (matches) {
//logger.log("MATCH for " + rel + ". " + sentence: + sentence + " with rules for " + rel);
return Pair.makePair(rel.canonicalName, 1.0);
}
}
}
return Pair.makePair(NO_RELATION, 1.0);
}
示例8: parse
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void parse(FigerSystem sys, int lineId, String text) {
Annotation annotation = new Annotation(text);
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
// System.out.println("[l" + i + "][s"
// + sentId + "]tokenized sentence="
// + StringUtils.joinWithOriginalWhiteSpace(sentence
// .get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId,
offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(sentence.get(
TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + lineId + "][s" + sentId + "]mention"
+ mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = "
+ label);
}
sentId++;
}
}
示例9: parse
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void parse(ParseStanfordFigerReverb sys, int lineId, String text) {
Annotation annotation = new Annotation(text);
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
// System.out.println("[l" + i + "][s"
// + sentId + "]tokenized sentence="
// + StringUtils.joinWithOriginalWhiteSpace(sentence
// .get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId, offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(
sentence.get(TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + lineId + "][s" + sentId + "]mention" + mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = " + label);
}
sentId++;
}
}
示例10: loadDependencies
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public void loadDependencies(String filename) throws IOException {
LineNumberReader reader = IOTools.getReaderFromFile(filename);
forwardDependenciesCache = new HashMap<Integer, Map<Integer, HashSet<Integer>>>();
reverseDependenciesCache = new HashMap<Integer, Map<Integer, Integer>>();
reachableNodesCache = new HashMap<Integer, Map<Integer, Set<Integer>>>();
HashMap<Integer, Pair<IndexedWord, List<Integer>>> deps;
int i = 0;
while ((deps = DependencyUtils.getDependenciesFromCoNLLFileReader(reader, true, true)) != null) {
reverseDependenciesCache.put(i,DependencyUtils.getReverseDependencies(deps));
Map<Integer, HashSet<Integer>> forwardDeps = new HashMap<Integer, HashSet<Integer>>();
for (Integer gov : deps.keySet()) {
List<Integer> children = deps.get(gov).second;
forwardDeps.put(gov, new HashSet<Integer>());
for (Integer child : children) {
forwardDeps.get(gov).add(child);
}
}
forwardDependenciesCache.put(i, forwardDeps);
i++;
}
reader.close();
}
示例11: addTranslationRow
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public boolean addTranslationRow(String name, String trans, Color bgColor) {
JLabel label = new JLabel(trans);
label.setOpaque(true);
label.setBackground(bgColor);
label.setForeground(Color.WHITE);
GridBagConstraints c = new GridBagConstraints();
c.fill = GridBagConstraints.HORIZONTAL;
c.gridx = 0;
c.ipady = 20;
c.gridwidth = numColumns;
if (unusedRows.isEmpty()) {
++numFullTranslationRows;
c.gridy = numRows + numFullTranslationRows;
} else {
c.gridy = unusedRows.removeFirst();
}
if (panel != null)
panel.add(label, c);
fullTranslations.put(name, new Pair<Integer, JLabel>(c.gridy, label));
return true;
}
示例12: _unpronoun
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
private static Map<Integer, Pair<CorefMention, CorefMention>> _unpronoun(Phrase p) {
Stream<Pair<CorefMention, CorefMention>> s =
Stream.of(p.memo(Phrase.coreNLP).get(CorefChainAnnotation.class))
.filter(Objects::nonNull) // Do nothing with an empty map
.flatMap(chains -> chains.entrySet().stream()) // Disassemble the map
.flatMap(entry -> {
// Link each entry to it's main mention
CorefMention main = entry.getValue().getRepresentativeMention();
return entry.getValue().getMentionsInTextualOrder().stream()
.filter(mention -> mention != main)
.map(mention -> makePair(mention, main));
});
// Type inference chokes here so write it down then return.
return s.collect(HashMap::new,
(m, pair) -> m.put(pair.first.headIndex, pair),
(l, r) -> {});
}
示例13: getStanfordSentence
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
/**
* returns
* @param sentence
* input sentence, space delimited
* @param discardStopWords
* true if stopwords are to be discarded from the sentence
* @return
* a pair containing <list of word-pos, remaining not-handled terms>
*
*/
public Pair<List<String>, List<String>> getStanfordSentence(String sentence)
{
List<WordLemmaTag> wlts = SentenceProcessor.getInstance().processSentence(sentence, false);
List<String> terms = null;
StanfordSentence sSentence = StanfordSentence.fromLine(Strings.join(wlts," "));
try
{
terms = sSentence.getTerms(TAGS,
Language.EN,
null,
MultiwordBelongingTo.WORDNET,
CompoundingParameter.ALLOW_MULTIWORD_EXPRESSIONS,
CompoundingParameter.APPEND_POS);
}
catch(Exception e)
{
e.printStackTrace();
}
//discards OOVs, and tries to map incorrect pos-tags to the correct ones
return fixTerms(terms, discardStopwords);
}
示例14: fixAllCasings
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void fixAllCasings(List<Pair<String,String>> pairs, String path)
{
try
{
BufferedWriter bw = new BufferedWriter(new FileWriter(path, false));
for(Pair<String,String> aPair : pairs)
{
Pair<String,String> fixedPair = caseFixer(aPair);
bw.write(fixedPair.first+"\t"+fixedPair.second+"\n");
}
bw.close();
}
catch(Exception e)
{
e.printStackTrace();
}
}
示例15: findTree
import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
/**
* Find the Lowest Common Ancestor of [from, to] in the tree t
*
* @param t
* @param e
* @return null if nothing found
*/
public static Pair<Tree, Tree> findTree(Tree t, Range<Integer> range) {
Tree tnF = null;
Tree tnT = null;
for (Tree leaf : t.getLeaves()) {
OffsetLabel label = (OffsetLabel) leaf.label();
if (range.getMinimum() == label.beginPosition()) {
tnF = leaf.parent(t);
}
if (range.getMaximum() == label.endPosition()) {
tnT = leaf.parent(t);
}
}
if (tnF == null || tnT == null) {
return null;
}
return new Pair<Tree, Tree>(tnF, tnT);
}