当前位置: 首页>>代码示例>>Java>>正文


Java Pair类代码示例

本文整理汇总了Java中edu.stanford.nlp.util.Pair的典型用法代码示例。如果您正苦于以下问题:Java Pair类的具体用法?Java Pair怎么用?Java Pair使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Pair类属于edu.stanford.nlp.util包,在下文中一共展示了Pair类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: computeTopicSimilarity

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
private List<Pair<String, Double>> computeTopicSimilarity(Concept c, int topic) {

		if (simMeasures == null) {
			simMeasures = new HashMap<String, ConceptSimilarityMeasure>();
			simMeasures.put("topic_jaccard", new JaccardDistance());
			simMeasures.put("topic_wn", new WordBasedMeasure(WNSimMeasure.RES));
			simMeasures.put("topic_w2v", new WordEmbeddingDistance(EmbeddingType.WORD2VEC, 300, false));
		}

		String[] topicDesc = this.topicDescriptions.get(topic);
		Concept dummy = new Concept(StringUtils.join(topicDesc));
		dummy = NonUIMAPreprocessor.getInstance().preprocess(dummy);

		List<Pair<String, Double>> scores = new ArrayList<Pair<String, Double>>();
		for (String sim : simMeasures.keySet()) {
			double score = Muter.callMuted(simMeasures.get(sim)::computeSimilarity, c, dummy);
			scores.add(new Pair<String, Double>(sim, score));
		}
		return scores;
	}
 
开发者ID:UKPLab,项目名称:ijcnlp2017-cmaps,代码行数:21,代码来源:FeatureExtractor.java

示例2: compute

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public void compute() {
	if (!computed) {

		MapBuilder mb = this.parent.getComponent(MapBuilder.class);
		ArrayList<Pair<Concept, Concept>> mappedPairs = new ArrayList<Pair<Concept, Concept>>();
		for (Pair<Concept, Concept> pair : pairs) {
			Concept c1 = mb.getConcept(pair.first());
			Concept c2 = mb.getConcept(pair.second());
			if (c1 != null && c2 != null)
				mappedPairs.add(new Pair<Concept, Concept>(c1, c2));
		}

		this.textRank.initializeFromConceptPairs(mappedPairs, counted);
		this.textRank.run();
		List<TermRank> termRanks = this.textRank.getTermRanks();
		for (TermRank termRank : termRanks) {
			this.scores.put(termRank.getStringRepresentation(), termRank.getScore());
		}

		computed = true;
	}
}
 
开发者ID:UKPLab,项目名称:emnlp2017-cmapsum-corpus,代码行数:23,代码来源:TextRankScorer.java

示例3: processSentence

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public void processSentence(JCas jcas, Sentence sent) {

	for (CC ca : JCasUtil.selectCovered(jcas, CC.class, sent)) {
		Concept c = this.parent.getComponent(ConceptExtractor.class).getConcept(ca);
		if (c != null) {
			for (Concept cn : this.lastConcepts) {
				this.pairs.add(new Pair<Concept, Concept>(cn, c));
			}
			this.lastConcepts.offer(c);
			if (this.lastConcepts.size() > windowSize)
				this.lastConcepts.poll();
		}
	}

}
 
开发者ID:UKPLab,项目名称:emnlp2017-cmapsum-corpus,代码行数:17,代码来源:TextRankScorer.java

示例4: interseciton2

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static Boolean interseciton2(){
	List<Integer> temp = new ArrayList<Integer>();
	for(int i=0;i<numberOfSentence+1;i++){
		temp.add(i);
	}
	for(Pair<String,Pair<String,String>> L : tobeCheked){
		if(DependencySentence.containsKey(L)){
			temp = intersection2(temp,DependencySentence.get(L));
		}
		else {
			return false;
		}
	}
	if(temp.size()==0){
		return false;
	}
	else {
		return true;
	}
}
 
开发者ID:kunal15595,项目名称:smart-question-answering-nlp,代码行数:21,代码来源:processMain.java

示例5: classify

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
    for (RelationType rel : RelationType.values()) {

        if (rules.containsKey(rel) &&
                rel.entityType == input.subjectType &&
                rel.validNamedEntityLabels.contains(input.objectType)) {
            Collection<SemgrexPattern> rulesForRel = rules.get(rel);
            CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph);
            boolean matches
                    = matches(sentence, rulesForRel, input,
                    sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) ||
                    matches(sentence, rulesForRel, input,
                            sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class));
            if (matches) {
                //logger.log("MATCH for " + rel +  ". " + sentence: + sentence + " with rules for  " + rel);
                return Pair.makePair(rel.canonicalName, 1.0);
            }
        }
    }

    return Pair.makePair(NO_RELATION, 1.0);
}
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:24,代码来源:IntelKBPSemgrexExtractor.java

示例6: classify

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
    switch (ensembleStrategy) {
        
        case DEFAULT:
            return classifyDefault(input);
        case HIGHEST_SCORE:
            return classifyWithHighestScore(input);
        case VOTE:
            return classifyWithVote(input);
        case WEIGHTED_VOTE:
            return classifyWithWeightedVote(input);
        case HIGH_RECALL:
            return classifyWithHighRecall(input);
        case HIGH_PRECISION:
            return classifyWithHighPrecision(input);
        default:
            throw new UnsupportedClassVersionError(ensembleStrategy + " not supported");
    }
}
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:21,代码来源:IntelKBPEnsembleExtractor.java

示例7: classify

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
@Override
public Pair<String, Double> classify(KBPInput input) {
  for (RelationType rel : RelationType.values()) {

    if (rules.containsKey(rel) &&
        rel.entityType == input.subjectType &&
        rel.validNamedEntityLabels.contains(input.objectType)) {
      Collection<SemgrexPattern> rulesForRel = rules.get(rel);
      CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph);
      boolean matches
          = matches(sentence, rulesForRel, input,
          sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) ||
          matches(sentence, rulesForRel, input,
              sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class));
      if (matches) {
        //logger.log("MATCH for " + rel +  ". " + sentence: + sentence + " with rules for  " + rel);
        return Pair.makePair(rel.canonicalName, 1.0);
      }
    }
  }

  return Pair.makePair(NO_RELATION, 1.0);
}
 
开发者ID:intel-analytics,项目名称:InformationExtraction,代码行数:24,代码来源:KBPSemgrexExtractor.java

示例8: parse

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void parse(FigerSystem sys, int lineId, String text) {
	Annotation annotation = new Annotation(text);
	Preprocessing.pipeline.annotate(annotation);
	// for each sentence
	int sentId = 0;
	for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
		// System.out.println("[l" + i + "][s"
		// + sentId + "]tokenized sentence="
		// + StringUtils.joinWithOriginalWhiteSpace(sentence
		// .get(TokensAnnotation.class)));
		List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
		for (Pair<Integer, Integer> offset : entityMentionOffsets) {
			String label = sys.predict(annotation, sentId,
					offset.first, offset.second);
			String mention = StringUtils.joinWithOriginalWhiteSpace(sentence.get(
					TokensAnnotation.class).subList(offset.first, offset.second));
			System.out.println("[l" + lineId + "][s" + sentId + "]mention"
					+ mention + "(" + offset.first + ","
					+ offset.second + ") = " + mention + ", pred = "
					+ label);
		}
		sentId++;
	}
}
 
开发者ID:zhangcongle,项目名称:NewsSpikeRe,代码行数:25,代码来源:FigerSystem.java

示例9: parse

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void parse(ParseStanfordFigerReverb sys, int lineId, String text) {
	Annotation annotation = new Annotation(text);
	Preprocessing.pipeline.annotate(annotation);
	// for each sentence
	int sentId = 0;
	for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
		// System.out.println("[l" + i + "][s"
		// + sentId + "]tokenized sentence="
		// + StringUtils.joinWithOriginalWhiteSpace(sentence
		// .get(TokensAnnotation.class)));
		List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
		for (Pair<Integer, Integer> offset : entityMentionOffsets) {
			String label = sys.predict(annotation, sentId, offset.first, offset.second);
			String mention = StringUtils.joinWithOriginalWhiteSpace(
					sentence.get(TokensAnnotation.class).subList(offset.first, offset.second));
			System.out.println("[l" + lineId + "][s" + sentId + "]mention" + mention + "(" + offset.first + ","
					+ offset.second + ") = " + mention + ", pred = " + label);
		}
		sentId++;
	}
}
 
开发者ID:zhangcongle,项目名称:NewsSpikeRe,代码行数:22,代码来源:ParseStanfordFigerReverb.java

示例10: loadDependencies

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public void loadDependencies(String filename) throws IOException {
  LineNumberReader reader = IOTools.getReaderFromFile(filename);
  forwardDependenciesCache = new HashMap<Integer, Map<Integer, HashSet<Integer>>>();
  reverseDependenciesCache = new HashMap<Integer, Map<Integer, Integer>>();
  reachableNodesCache = new HashMap<Integer, Map<Integer, Set<Integer>>>();

  
  HashMap<Integer, Pair<IndexedWord, List<Integer>>> deps;
  int i = 0;
  while ((deps = DependencyUtils.getDependenciesFromCoNLLFileReader(reader, true, true)) != null) {
    reverseDependenciesCache.put(i,DependencyUtils.getReverseDependencies(deps));
    Map<Integer, HashSet<Integer>> forwardDeps = new HashMap<Integer, HashSet<Integer>>();
    for (Integer gov : deps.keySet()) {
      List<Integer> children = deps.get(gov).second;
      forwardDeps.put(gov, new HashSet<Integer>());
      for (Integer child : children) {
        forwardDeps.get(gov).add(child);
      }
    }
    forwardDependenciesCache.put(i, forwardDeps);
    i++;
  }
  
  reader.close();
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:26,代码来源:AbstractDependencyLanguageModelFeaturizer.java

示例11: addTranslationRow

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public boolean addTranslationRow(String name, String trans, Color bgColor) {
  JLabel label = new JLabel(trans);
  label.setOpaque(true);
  label.setBackground(bgColor);
  label.setForeground(Color.WHITE);

  GridBagConstraints c = new GridBagConstraints();
  c.fill = GridBagConstraints.HORIZONTAL;
  c.gridx = 0;
  c.ipady = 20;
  c.gridwidth = numColumns;

  if (unusedRows.isEmpty()) {
    ++numFullTranslationRows;
    c.gridy = numRows + numFullTranslationRows;
  } else {
    c.gridy = unusedRows.removeFirst();
  }

  if (panel != null)
    panel.add(label, c);
  fullTranslations.put(name, new Pair<Integer, JLabel>(c.gridy, label));

  return true;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:26,代码来源:TranslationLayout.java

示例12: _unpronoun

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
private static Map<Integer, Pair<CorefMention, CorefMention>> _unpronoun(Phrase p) {
	Stream<Pair<CorefMention, CorefMention>> s =
			Stream.of(p.memo(Phrase.coreNLP).get(CorefChainAnnotation.class))
		.filter(Objects::nonNull)  // Do nothing with an empty map
		.flatMap(chains -> chains.entrySet().stream()) // Disassemble the map
	    .flatMap(entry -> {
			// Link each entry to it's main mention
			CorefMention main = entry.getValue().getRepresentativeMention();
			return entry.getValue().getMentionsInTextualOrder().stream()
				.filter(mention -> mention != main)
				.map(mention -> makePair(mention, main));
		});
	// Type inference chokes here so write it down then return.
	return s.collect(HashMap::new,
			(m, pair) -> m.put(pair.first.headIndex, pair),
			(l, r) -> {});
}
 
开发者ID:SeanTater,项目名称:uncc2014watsonsim,代码行数:18,代码来源:Phrase.java

示例13: getStanfordSentence

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
/**
 * returns 
 * @param sentence
 * 			input sentence, space delimited
 * @param discardStopWords
 * 			true if stopwords are to be discarded from the sentence 			
 * @return
 * 		a pair containing <list of word-pos, remaining not-handled terms>  
 * 		
 */
public Pair<List<String>, List<String>> getStanfordSentence(String sentence)
{
	List<WordLemmaTag> wlts = SentenceProcessor.getInstance().processSentence(sentence, false);
	
	List<String> terms = null;
	StanfordSentence sSentence = StanfordSentence.fromLine(Strings.join(wlts," "));
	
	try
	{
		 terms = sSentence.getTerms(TAGS, 
				 Language.EN, 
				 null, 
				 MultiwordBelongingTo.WORDNET, 
				 CompoundingParameter.ALLOW_MULTIWORD_EXPRESSIONS,
				 CompoundingParameter.APPEND_POS);	 
	}
	catch(Exception e)
	{
		e.printStackTrace();
	}

	//discards OOVs, and tries to map incorrect pos-tags to the correct ones
	return fixTerms(terms, discardStopwords);
}
 
开发者ID:pschuette22,项目名称:Zeppa-AppEngine,代码行数:35,代码来源:TextualSimilarity.java

示例14: fixAllCasings

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
public static void fixAllCasings(List<Pair<String,String>> pairs, String path)
{
	try
	{
		BufferedWriter bw = new BufferedWriter(new FileWriter(path, false)); 
		
		for(Pair<String,String> aPair : pairs)
		{
			Pair<String,String> fixedPair = caseFixer(aPair);
			
			bw.write(fixedPair.first+"\t"+fixedPair.second+"\n");
		}
		
		bw.close();
	}
	catch(Exception e)
	{
		e.printStackTrace();
	}
}
 
开发者ID:pschuette22,项目名称:Zeppa-AppEngine,代码行数:21,代码来源:Preprocess.java

示例15: findTree

import edu.stanford.nlp.util.Pair; //导入依赖的package包/类
/**
 * Find the Lowest Common Ancestor of [from, to] in the tree t
 * 
 * @param t
 * @param e
 * @return null if nothing found
 */
public static Pair<Tree, Tree> findTree(Tree t, Range<Integer> range) {
  Tree tnF = null;
  Tree tnT = null;
  for (Tree leaf : t.getLeaves()) {
    OffsetLabel label = (OffsetLabel) leaf.label();
    if (range.getMinimum() == label.beginPosition()) {
      tnF = leaf.parent(t);
    }
    if (range.getMaximum() == label.endPosition()) {
      tnT = leaf.parent(t);
    }
  }
  if (tnF == null || tnT == null) {
    return null;
  }
  return new Pair<Tree, Tree>(tnF, tnT);
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:25,代码来源:Utils.java


注:本文中的edu.stanford.nlp.util.Pair类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。