本文整理汇总了Java中org.apache.solr.common.util.Pair类的典型用法代码示例。如果您正苦于以下问题:Java Pair类的具体用法?Java Pair怎么用?Java Pair使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Pair类属于org.apache.solr.common.util包,在下文中一共展示了Pair类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readAlgorithmResults
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
/**
* the program will look for files in the pattern '[algorithm_name].[ext]'
*
* @param inFolder that contains output of algorithms
* @param algAndWeight a map that contains algorithm name and its weight
* @return
*/
public Pair[] readAlgorithmResults(String inFolder,
Map<String, Double> algAndWeight,
Map<String, FileOutputReader> algAndReader) {
Pair[] pairs = new Pair[algAndWeight.size()];
int i=0;
for(Map.Entry<String, Double> en: algAndWeight.entrySet()){
File f = new File(inFolder+File.separator+en.getKey());
FileOutputReader reader= algAndReader.get(en.getKey());
List<JATETerm> terms = null;
try {
terms = reader.read(f.toString());
} catch (IOException e) {
e.printStackTrace();
}
pairs[i] = new Pair<>(terms, en.getValue());
i++;
}
return pairs;
}
示例2: iterateAddDomainTermFields
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
private void iterateAddDomainTermFields(boolean isBoosted, String domainTermsFieldName,
IndexSchema indexSchema, Document doc,
List<Pair<String, Double>> filteredCandidateTerms) {
// remove previous fields if exists
doc.removeFields(domainTermsFieldName);
for (Pair<String, Double> filteredTerm : filteredCandidateTerms) {
if (filteredTerm == null) {
continue;
}
if (isBoosted) {
doc.add(indexSchema.getField(domainTermsFieldName).createField(filteredTerm.getKey(),
filteredTerm.getValue().floatValue()));
} else {
doc.add(indexSchema.getField(domainTermsFieldName).createField(filteredTerm.getKey(),
DEFAULT_BOOST_VALUE));
}
}
}
示例3: applyPairAnalysis
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
@Override
public void applyPairAnalysis(ChronixPairAnalysis analysis, ChronixTimeSeries subQueryTimeSeries, FunctionValueMap functionValues) {
ChronixPairAnalysis<Pair<MetricTimeSeries, MetricTimeSeries>> pairAnalysis = ((ChronixPairAnalysis<Pair<MetricTimeSeries, MetricTimeSeries>>) analysis);
ChronixMetricTimeSeries secondMetricTimeSeries = (ChronixMetricTimeSeries) subQueryTimeSeries;
pairAnalysis.execute(new Pair(timeSeries, secondMetricTimeSeries.timeSeries), functionValues);
}
示例4: execute
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
@Override
public void execute(Pair<MetricTimeSeries, MetricTimeSeries> timeSeriesPair, FunctionValueMap functionValueMap) {
//We have to build a multivariate time series
MultivariateTimeSeries origin = buildMultiVariateTimeSeries(timeSeriesPair.first());
MultivariateTimeSeries other = buildMultiVariateTimeSeries(timeSeriesPair.second());
//Call the fast dtw library
TimeWarpInfo result = FastDTW.getWarpInfoBetween(origin, other, searchRadius, distanceFunction);
//Check the result. If it lower equals the threshold, we can return the other time series
functionValueMap.add(this, result.getNormalizedDistance() <= maxNormalizedWarpingCost, timeSeriesPair.second().getName());
}
示例5: computeSingleWorker
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
@Override
protected int[] computeSingleWorker(List<String> taskTerms) {
int count = 0;
LOG.info("Total terms to process=" + taskTerms.size());
for (String termString : taskTerms) {
String[] tokens = termString.split(" ");
Set<String> compareCandidates = new HashSet<>();
for (String tok : tokens) {
List<Pair<String, Integer>> candidates = featureTermCompIndex.getSorted(tok);
Iterator<Pair<String, Integer>> it = candidates.iterator();
while (it.hasNext()) {
Pair<String, Integer> c = it.next();
if (c.getValue() <= tokens.length)
break;
compareCandidates.add(c.getKey());
}
}
StringBuilder pStr = new StringBuilder("(?<!\\w)");
pStr.append(Pattern.quote(termString)).append("(?!\\w)");
Pattern pattern = Pattern.compile(pStr.toString());
for (String pterm : compareCandidates) {
if (pattern.matcher(pterm).find()) { //ref term contains term
feature.add(termString, pterm);
}
}
count++;
if (count % 2000 == 0)
LOG.debug(count + "/" + taskTerms.size());
}
return new int[]{count, taskTerms.size()};
}
示例6: add
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
public synchronized void add(String unigram, String term, int numTokens) {
List<Pair<String, Integer>> contained = index.get(unigram);
if (contained == null)
contained = new ArrayList<>();
contained.add(new Pair<>(term, numTokens));
index.put(unigram, contained);
}
示例7: getSorted
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
public List<Pair<String, Integer>> getSorted(String unigram) {
List<Pair<String, Integer>> sorted = new ArrayList<>();
List<Pair<String, Integer>> values=index.get(unigram);
if (values!=null)
sorted.addAll(values);
Collections.sort(sorted, (o1, o2) -> o2.getValue().compareTo(o1.getValue()));
return sorted;
}
示例8: main
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
String inFolder = "/home/zqz/GDrive/papers/cicling2017/data/seed-terms/genia";
String outFile="/home/zqz/GDrive/papers/cicling2017/data/seed-terms/genia/voted.json";
Map<String, Double> weights = new HashMap<>();
weights.put("genia_attf_seed_terms.json",1.0);
weights.put("genia_chisquare_seed_terms.json",1.0);
weights.put("genia_cvalue_seed_terms.json",1.0);
weights.put("genia_cvalue_seed_terms_mttf1.json",1.0);
weights.put("genia_glossex_seed_terms.json",1.0);
weights.put("genia_rake_seed_terms.json",1.0);
weights.put("genia_termex_seed_terms.json",1.0);
weights.put("genia_tfidf_seed_terms.json",1.0);
weights.put("genia_ttf_seed_terms.json",1.0);
weights.put("genia_weirdness_seed_terms.json",1.0);
weights.put("genia_text_rank_result.csv",1.0);
Map<String, FileOutputReader> readers = new HashMap<>();
FileOutputReader jsonFileOutputReader = new JSONFileOutputReader(new Gson());
FileOutputReader csvFileOutputReader = new CSVFileOutputReader(CSVFormat.DEFAULT);
readers.put("genia_attf_seed_terms.json",jsonFileOutputReader);
readers.put("genia_chisquare_seed_terms.json",jsonFileOutputReader);
readers.put("genia_cvalue_seed_terms.json",jsonFileOutputReader);
readers.put("genia_cvalue_seed_terms_mttf1.json",jsonFileOutputReader);
readers.put("genia_glossex_seed_terms.json",jsonFileOutputReader);
readers.put("genia_rake_seed_terms.json",jsonFileOutputReader);
readers.put("genia_termex_seed_terms.json",jsonFileOutputReader);
readers.put("genia_tfidf_seed_terms.json",jsonFileOutputReader);
readers.put("genia_ttf_seed_terms.json",jsonFileOutputReader);
readers.put("genia_weirdness_seed_terms.json",jsonFileOutputReader);
readers.put("genia_text_rank_result.csv",csvFileOutputReader);
Voting voting = new Voting();
Pair[] results = voting.readAlgorithmResults(inFolder, weights, readers);
List<JATETerm> newResult = voting.vote(results);
Writer w = IOUtil.getUTF8Writer(outFile);
new Gson().toJson(newResult, w);
w.close();
}
示例9: vote
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
public List<JATETerm> vote(Pair... algResultWithWeight) {
if (algResultWithWeight.length == 0)
return new ArrayList<>();
Map<String, Double> voteScores = new HashMap<>();
List<JATETerm> out = new ArrayList<>();
for (Pair result : algResultWithWeight) {
Pair<List<JATETerm>, Double> pair = (Pair<List<JATETerm>, Double>) result;
for (int i = 0; i < pair.getKey().size(); i++) {
JATETerm jt = pair.getKey().get(i);
String termStr = jt.getString();
double rankScore = 1.0 / (i + 1) * pair.getValue();
Double finalScore = voteScores.get(termStr);
if (finalScore == null)
finalScore = 0.0;
finalScore += rankScore;
voteScores.put(termStr, finalScore);
}
}
for (Map.Entry<String, Double> entry : voteScores.entrySet()) {
out.add(new JATETerm(entry.getKey(), entry.getValue()));
}
Collections.sort(out);
return out;
}
示例10: getSelectedWeightedCandidates
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
private List<Pair<String, Double>> getSelectedWeightedCandidates(List<JATETerm> filteredTerms, List<String> candidateTerms) {
List<Pair<String, Double>> filteredCandidateTerms = new ArrayList<>();
candidateTerms.parallelStream().forEach(candidateTerm -> {
filteredTerms.parallelStream().forEach(filteredTerm -> {
if (filteredTerm != null && candidateTerm != null
&& filteredTerm.getString() != null &&
filteredTerm.getString().equalsIgnoreCase(candidateTerm)) {
Pair<String, Double> selectedTerm =
new Pair<String, Double>(filteredTerm.getString(), filteredTerm.getScore());
filteredCandidateTerms.add(selectedTerm);
}
});
});
return filteredCandidateTerms;
}
示例11: computeSingleWorker
import org.apache.solr.common.util.Pair; //导入依赖的package包/类
@Override
protected List<JATETerm> computeSingleWorker(List<String> candidates) {
List<JATETerm> result = new ArrayList<>();
int count=0;
for (String tString : candidates) {
/* if(tString.equals("receptors in human mononuclear leukocyte"))
System.out.println();*/
String[] elements = tString.split(" ");
double score = 0;
//a term's RAKE score is the sum of its elements
for (String e : elements) {
//now compute RAKE score of individual words
/* if(e.equals("t"))
System.out.printf("here");*/
//first, frequency
int freq = fFeatureWords.getTTF(e);
if(freq==0) //composing word can be stop words that have been filtered
continue;
//second, degree. Degree adds up frequency
int degree = freq;
//for the remaining part of degree, it depends on terms (parent term) that contain this element
List<Pair<String, Integer>> parentTerms=fTermCompIndex.getSorted(e);
for(Pair<String, Integer> pTerm: parentTerms){
String pTermStr = pTerm.getKey();
if(pTerm.getValue()==1) //we are only interested in multi-word expressions for computing degree
continue;
int pTF = fFeatureTerms.getTTF(pTermStr); //how many times this parent term appear in corpus
String[] pTermElements = pTermStr.split(" "); //components of this parent term
for(String ep: pTermElements){
if (ep.equals(e)) //discount the word element itself
continue;
//does stop words matter?
degree+=pTF;
}
}
double wScore = (double) degree / freq; //score of this element word
score += wScore;
}
JATETerm term = new JATETerm(tString, score);
result.add(term);
count++;
if(count%2000==0) {
LOG.info("done ="+count+"/"+candidates.size());
}
}
return result;
}