本文整理汇总了Java中org.elasticsearch.action.search.SearchResponse.getAggregations方法的典型用法代码示例。如果您正苦于以下问题:Java SearchResponse.getAggregations方法的具体用法?Java SearchResponse.getAggregations怎么用?Java SearchResponse.getAggregations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.action.search.SearchResponse
的用法示例。
在下文中一共展示了SearchResponse.getAggregations方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSimpleSubAggregations
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Test
public void testSimpleSubAggregations() throws Exception {
final String query = String.format("SELECT /*! DOCS_WITH_AGGREGATION(10) */ * FROM %s/account GROUP BY (gender), (state) ", TEST_INDEX);
SqlElasticSearchRequestBuilder select = getSearchRequestBuilder(query);
SearchResponse response = (SearchResponse) select.get();
Aggregations result = response.getAggregations();
Terms gender = result.get("gender");
for(Terms.Bucket genderBucket : gender.getBuckets()) {
String genderKey = genderBucket.getKey().toString();
Assert.assertTrue("Gender should be m or f", genderKey.equals("m") || genderKey.equals("f"));
}
Assert.assertEquals(2, gender.getBuckets().size());
Terms state = result.get("state");
for(Terms.Bucket stateBucket : state.getBuckets()) {
if(stateBucket.getKey().toString().equalsIgnoreCase("ak")) {
Assert.assertTrue("There are 22 entries for state ak", stateBucket.getDocCount() == 22);
}
}
Assert.assertEquals(response.getHits().totalHits(), 1000);
Assert.assertEquals(response.getHits().hits().length, 10);
}
示例2: mapResults
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Override
public <T> FacetedPageImpl<T> mapResults(SearchResponse response, Class<T> clazz, Pageable pageable) {
Aggregations aggregations = response.getAggregations();
Terms agg = aggregations.get(term) ;
long total = agg.getSumOfOtherDocCounts() ;
List<T> results = new ArrayList<T>();
if(agg.getBuckets()!=null && agg.getBuckets().size()>0){
for (Terms.Bucket entry : agg.getBuckets()) {
if(!StringUtils.isBlank(name) && entry.getAggregations().get(name)!=null){
TopHits topHits = entry.getAggregations().get(name);
for (SearchHit hit : topHits.getHits().getHits()) {
T data = mapEntity(hit.getSourceAsString() , hit , clazz) ;
if(data instanceof UKAgg){
((UKAgg) data).setRowcount((int) topHits.getHits().getTotalHits());
}
results.add(data) ;
}
}
}
}
return new FacetedPageImpl<T>(results, pageable, total);
}
示例3: SearchResult
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
public SearchResult(SearchResponse resp, Select select) throws SqlParseException {
Aggregations aggs = resp.getAggregations();
if (aggs.get("filter") != null) {
InternalFilter inf = aggs.get("filter");
aggs = inf.getAggregations();
}
if (aggs.get("group by") != null) {
InternalTerms terms = aggs.get("group by");
Collection<Bucket> buckets = terms.getBuckets();
this.total = buckets.size();
results = new ArrayList<>(buckets.size());
for (Bucket bucket : buckets) {
Map<String, Object> aggsMap = toAggsMap(bucket.getAggregations().getAsMap());
aggsMap.put("docCount", bucket.getDocCount());
results.add(aggsMap);
}
} else {
results = new ArrayList<>(1);
this.total = 1;
Map<String, Object> map = new HashMap<>();
for (Aggregation aggregation : aggs) {
map.put(aggregation.getName(), covenValue(aggregation));
}
results.add(map);
}
}
示例4: testSubAggregations
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Test
public void testSubAggregations() throws Exception {
Set expectedAges = new HashSet<>(ContiguousSet.create(Range.closed(20, 40), DiscreteDomain.integers()));
final String query = String.format("SELECT /*! DOCS_WITH_AGGREGATION(10) */" +
" * FROM %s/account GROUP BY (gender, terms('field'='age','size'=200,'alias'='age')), (state) LIMIT 200,200", TEST_INDEX);
Map<String, Set<Integer>> buckets = new HashMap<>();
SqlElasticSearchRequestBuilder select = getSearchRequestBuilder(query);
SearchResponse response = (SearchResponse) select.get();
Aggregations result = response.getAggregations();
Terms gender = result.get("gender");
for(Terms.Bucket genderBucket : gender.getBuckets()) {
String genderKey = genderBucket.getKey().toString();
buckets.put(genderKey, new HashSet<Integer>());
Terms ageBuckets = (Terms) genderBucket.getAggregations().get("age");
for(Terms.Bucket ageBucket : ageBuckets.getBuckets()) {
buckets.get(genderKey).add(Integer.parseInt(ageBucket.getKey().toString()));
}
}
Assert.assertEquals(2, buckets.keySet().size());
Assert.assertEquals(expectedAges, buckets.get("m"));
Assert.assertEquals(expectedAges, buckets.get("f"));
Terms state = result.get("state");
for(Terms.Bucket stateBucket : state.getBuckets()) {
if(stateBucket.getKey().toString().equalsIgnoreCase("ak")) {
Assert.assertTrue("There are 22 entries for state ak", stateBucket.getDocCount() == 22);
}
}
Assert.assertEquals(response.getHits().totalHits(), 1000);
Assert.assertEquals(response.getHits().hits().length, 10);
}
示例5: mapResults
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Override
public <T> AggregatedPage<T> mapResults(SearchResponse response, Class<T> clazz, Pageable pageable) {
List<File> result = new ArrayList<>();
long totalHits = response.getHits().getTotalHits();
for (SearchHit searchHit : response.getHits()) {
if (response.getHits().getHits().length <= 0) {
return null;
}
String content = (String) searchHit.getSource().get("content");
File oneFile = new File(
(String) searchHit.getSource().get("id"),
(String) searchHit.getSource().get("name"),
(String) searchHit.getSource().get("extension"),
(String) searchHit.getSource().get("path"),
(String) searchHit.getSource().get("project"),
content == null ? null : content.substring(0, Math.min(Constants.TRUNCATED_CONTENT, content.length())),
(String) searchHit.getSource().get("version"),
//conversion en string puis en long, très bizarre, à l'origine, il était préférable de réaliser :
//(Long) searchHit.getSource().get("size")
//mais cela jette un classCastException Integer to Long
Long.valueOf(searchHit.getSource().get("size").toString())
);
result.add(oneFile);
}
return new AggregatedPageImpl<>((List<T>) result, pageable, totalHits, response.getAggregations());
}
示例6: testBackgroundVsSeparateSet
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset,
SignificanceHeuristic significanceHeuristicExpectingSeparateSets) throws Exception {
SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
.addAggregation(terms("class")
.field(CLASS_FIELD)
.subAggregation(
significantTerms("sig_terms")
.field(TEXT_FIELD)
.minDocCount(1)
.significanceHeuristic(
significanceHeuristicExpectingSuperset)))
.execute()
.actionGet();
assertSearchResponse(response1);
SearchResponse response2 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
.addAggregation(filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0"))
.subAggregation(significantTerms("sig_terms")
.field(TEXT_FIELD)
.minDocCount(1)
.backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1"))
.significanceHeuristic(significanceHeuristicExpectingSeparateSets)))
.addAggregation(filter("1", QueryBuilders.termQuery(CLASS_FIELD, "1"))
.subAggregation(significantTerms("sig_terms")
.field(TEXT_FIELD)
.minDocCount(1)
.backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0"))
.significanceHeuristic(significanceHeuristicExpectingSeparateSets)))
.execute()
.actionGet();
StringTerms classes = response1.getAggregations().get("class");
SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms")));
assertThat(sigTerms0.getBuckets().size(), equalTo(2));
double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore();
double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore();
SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms")));
double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore();
double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore();
Aggregations aggs = response2.getAggregations();
sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms");
double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore();
double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore();
sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms");
double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore();
double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore();
assertThat(score00Background, equalTo(score00SeparateSets));
assertThat(score01Background, equalTo(score01SeparateSets));
assertThat(score10Background, equalTo(score10SeparateSets));
assertThat(score11Background, equalTo(score11SeparateSets));
}
示例7: onResponse
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeStartESQuery;
Aggregations aggregs = response.getAggregations();
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
synchronized (buffer) {
// For each entry
for (Terms.Bucket entry : agg.getBuckets()) {
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.sourceAsMap();
String url = (String) keyValues.get("url");
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
esQueryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// remove lock
isInESQuery.set(false);
}
示例8: mapResults
import org.elasticsearch.action.search.SearchResponse; //导入方法依赖的package包/类
@Override
public <T> AggregatedPage<T> mapResults(SearchResponse response, Class<T> clazz, Pageable pageable) {
List<File> result = new ArrayList<>();
long totalHits = response.getHits().getTotalHits();
for (SearchHit searchHit : response.getHits()) {
if (response.getHits().getHits().length <= 0) {
return null;
}
//System.out.println(response.toString());
String summaryWithHighlight = null;
String pathWithHighlight = null;
HighlightField highlightFieldContent = searchHit.getHighlightFields().get("content");
HighlightField highlightFieldPath = searchHit.getHighlightFields().get("path");
if (highlightFieldContent != null) {
summaryWithHighlight = Arrays.stream(highlightFieldContent.fragments())
.map(text -> EncodingUtil.convertToUTF8(text.toString()))
.collect(Collectors.joining("\n[...]\n"));
}
if (highlightFieldPath != null && highlightFieldPath.fragments() != null) {
pathWithHighlight = EncodingUtil.unEscapeString(highlightFieldPath.fragments()[0].toString());
}
File oneFile = new File(
(String) searchHit.getSource().get("id"),
(String) searchHit.getSource().get("name"),
(String) searchHit.getSource().get("extension"),
pathWithHighlight != null ? pathWithHighlight : (String) searchHit.getSource().get("path"),
(String) searchHit.getSource().get("project"),
summaryWithHighlight,
(String) searchHit.getSource().get("version"),
//conversion en string puis en long, très bizarre, à l'origine, il était préférable de réaliser :
//(Long) searchHit.getSource().get("size")
//mais cela jette un classCastException Integer to Long
Long.valueOf(searchHit.getSource().get("size").toString())
);
oneFile.setScore(searchHit.getScore());
result.add(oneFile);
}
return new AggregatedPageImpl<>((List<T>) result, pageable, totalHits, response.getAggregations());
}