本文整理汇总了Java中org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.getBuckets方法的典型用法代码示例。如果您正苦于以下问题:Java MultiBucketsAggregation.getBuckets方法的具体用法?Java MultiBucketsAggregation.getBuckets怎么用?Java MultiBucketsAggregation.getBuckets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation
的用法示例。
在下文中一共展示了MultiBucketsAggregation.getBuckets方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reduce
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; //导入方法依赖的package包/类
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
double sum = 0;
for (Bucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
sum += thisBucketValue;
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
newBuckets.add(newBucket);
}
return factory.createAggregation(newBuckets);
}
示例2: reduce
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; //导入方法依赖的package包/类
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
Number lastBucketKey = null;
Double lastBucketValue = null;
for (Bucket bucket : buckets) {
Number thisBucketKey = factory.getKey(bucket);
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
if (lastBucketValue != null && thisBucketValue != null) {
double gradient = thisBucketValue - lastBucketValue;
double xDiff = -1;
if (xAxisUnits != null) {
xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits;
}
final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
newBuckets.add(newBucket);
} else {
newBuckets.add(bucket);
}
lastBucketKey = thisBucketKey;
lastBucketValue = thisBucketValue;
}
return factory.createAggregation(newBuckets);
}
示例3: reduce
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; //导入方法依赖的package包/类
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
List<? extends Bucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<Bucket> newBuckets = new ArrayList<>();
EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag);
int counter = 0;
for (Bucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
Bucket newBucket = bucket;
counter += 1;
// Still under the initial lag period, add nothing and move on
Double lagValue;
if (counter <= lag) {
lagValue = Double.NaN;
} else {
lagValue = lagWindow.peek(); // Peek here, because we rely on add'ing to always move the window
}
// Normalize null's to NaN
if (thisBucketValue == null) {
thisBucketValue = Double.NaN;
}
// Both have values, calculate diff and replace the "empty" bucket
if (!Double.isNaN(thisBucketValue) && !Double.isNaN(lagValue)) {
double diff = thisBucketValue - lagValue;
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
}
newBuckets.add(newBucket);
lagWindow.add(thisBucketValue);
}
return factory.createAggregation(newBuckets);
}
示例4: handleAggregations
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; //导入方法依赖的package包/类
private void handleAggregations(Aggregations aggregations, List<String> headers, List<List<String>> lines) throws CsvExtractorException {
if(allNumericAggregations(aggregations)){
lines.get(this.currentLineIndex).addAll(fillHeaderAndCreateLineForNumericAggregations(aggregations, headers));
return;
}
//aggregations with size one only supported when not metrics.
List<Aggregation> aggregationList = aggregations.asList();
if(aggregationList.size() > 1){
throw new CsvExtractorException("currently support only one aggregation at same level (Except for numeric metrics)");
}
Aggregation aggregation = aggregationList.get(0);
//we want to skip singleBucketAggregations (nested,reverse_nested,filters)
if(aggregation instanceof SingleBucketAggregation){
Aggregations singleBucketAggs = ((SingleBucketAggregation) aggregation).getAggregations();
handleAggregations(singleBucketAggs, headers, lines);
return;
}
if(aggregation instanceof NumericMetricsAggregation){
handleNumericMetricAggregation(headers, lines.get(currentLineIndex), aggregation);
return;
}
if(aggregation instanceof GeoBounds){
handleGeoBoundsAggregation(headers, lines, (GeoBounds) aggregation);
return;
}
if(aggregation instanceof TopHits){
//todo: handle this . it returns hits... maby back to normal?
//todo: read about this usages
// TopHits topHitsAggregation = (TopHits) aggregation;
}
if(aggregation instanceof MultiBucketsAggregation){
MultiBucketsAggregation bucketsAggregation = (MultiBucketsAggregation) aggregation;
String name = bucketsAggregation.getName();
//checking because it can comes from sub aggregation again
if(!headers.contains(name)){
headers.add(name);
}
Collection<? extends MultiBucketsAggregation.Bucket> buckets = bucketsAggregation.getBuckets();
//clone current line.
List<String> currentLine = lines.get(this.currentLineIndex);
List<String> clonedLine = new ArrayList<>(currentLine);
//call handle_Agg with current_line++
boolean firstLine = true;
for (MultiBucketsAggregation.Bucket bucket : buckets) {
//each bucket need to add new line with current line copied => except for first line
String key = bucket.getKeyAsString();
if(firstLine){
firstLine = false;
}
else {
currentLineIndex++;
currentLine = new ArrayList<String>(clonedLine);
lines.add(currentLine);
}
currentLine.add(key);
handleAggregations(bucket.getAggregations(),headers,lines);
}
}
}
示例5: handleAggregations
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; //导入方法依赖的package包/类
private void handleAggregations(Aggregations aggregations, List<String> headers, List<List<Object>> lines) throws ObjectResultsExtractException {
if (allNumericAggregations(aggregations)) {
lines.get(this.currentLineIndex).addAll(fillHeaderAndCreateLineForNumericAggregations(aggregations, headers));
return;
}
//aggregations with size one only supported when not metrics.
List<Aggregation> aggregationList = aggregations.asList();
if (aggregationList.size() > 1) {
throw new ObjectResultsExtractException("currently support only one aggregation at same level (Except for numeric metrics)");
}
Aggregation aggregation = aggregationList.get(0);
//we want to skip singleBucketAggregations (nested,reverse_nested,filters)
if (aggregation instanceof SingleBucketAggregation) {
Aggregations singleBucketAggs = ((SingleBucketAggregation) aggregation).getAggregations();
handleAggregations(singleBucketAggs, headers, lines);
return;
}
if (aggregation instanceof NumericMetricsAggregation) {
handleNumericMetricAggregation(headers, lines.get(currentLineIndex), aggregation);
return;
}
if (aggregation instanceof GeoBounds) {
handleGeoBoundsAggregation(headers, lines, (GeoBounds) aggregation);
return;
}
if (aggregation instanceof TopHits) {
//todo: handle this . it returns hits... maby back to normal?
//todo: read about this usages
// TopHits topHitsAggregation = (TopHits) aggregation;
}
if (aggregation instanceof MultiBucketsAggregation) {
MultiBucketsAggregation bucketsAggregation = (MultiBucketsAggregation) aggregation;
String name = bucketsAggregation.getName();
//checking because it can comes from sub aggregation again
if (!headers.contains(name)) {
headers.add(name);
}
Collection<? extends MultiBucketsAggregation.Bucket> buckets = bucketsAggregation.getBuckets();
//clone current line.
List<Object> currentLine = lines.get(this.currentLineIndex);
List<Object> clonedLine = new ArrayList<>(currentLine);
//call handle_Agg with current_line++
boolean firstLine = true;
for (MultiBucketsAggregation.Bucket bucket : buckets) {
//each bucket need to add new line with current line copied => except for first line
String key = bucket.getKeyAsString();
if (firstLine) {
firstLine = false;
} else {
currentLineIndex++;
currentLine = new ArrayList<Object>(clonedLine);
lines.add(currentLine);
}
currentLine.add(key);
handleAggregations(bucket.getAggregations(), headers, lines);
}
}
}