本文整理汇总了Java中org.elasticsearch.search.aggregations.bucket.histogram.Histogram.getBuckets方法的典型用法代码示例。如果您正苦于以下问题:Java Histogram.getBuckets方法的具体用法?Java Histogram.getBuckets怎么用?Java Histogram.getBuckets使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.search.aggregations.bucket.histogram.Histogram
的用法示例。
在下文中一共展示了Histogram.getBuckets方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSingleValuedFieldOrderedByKeyAsc
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testSingleValuedFieldOrderedByKeyAsc() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(dateHistogram("histo")
.field("date")
.dateHistogramInterval(DateHistogramInterval.MONTH)
.order(Histogram.Order.KEY_ASC))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(3));
int i = 0;
for (Histogram.Bucket bucket : buckets) {
assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
i++;
}
}
示例2: testSingleValuedFieldOrderedBySubAggregationAsc
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(dateHistogram("histo")
.field("date")
.dateHistogramInterval(DateHistogramInterval.MONTH)
.order(Histogram.Order.aggregation("sum", true))
.subAggregation(max("sum").field("value")))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
assertThat(histo.getBuckets().size(), equalTo(3));
int i = 0;
for (Histogram.Bucket bucket : histo.getBuckets()) {
assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
i++;
}
}
示例3: testScriptSingleValue
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testScriptSingleValue() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(
histogram("histo")
.script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap()))
.interval(interval))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
}
}
示例4: testDSTEndTransition
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
/**
* When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets:
* "2015-10-25T02:00:00.000+02:00",
* "2015-10-25T02:00:00.000+01:00",
* "2015-10-25T03:00:00.000+01:00",
* "2015-10-25T04:00:00.000+01:00".
*/
public void testDSTEndTransition() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.setQuery(new MatchNoneQueryBuilder())
.addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("Europe/Oslo"))
.dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds(
new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")))
.execute().actionGet();
Histogram histo = response.getAggregations().get("histo");
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(4));
assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L));
assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L));
assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L));
}
示例5: testMultiValuedFieldOrderedByKeyDesc
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testMultiValuedFieldOrderedByKeyDesc() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
// TODO: use diamond once JI-9019884 is fixed
List<Histogram.Bucket> buckets = new ArrayList<>(histo.getBuckets());
for (int i = 0; i < numValuesBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
}
}
示例6: testScriptMultiValued
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testScriptMultiValued() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(
histogram("histo")
.script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap()))
.interval(interval))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(numValuesBuckets));
for (int i = 0; i < numValuesBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
}
}
示例7: testMultiValuedFieldOrderedByKeyDesc
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testMultiValuedFieldOrderedByKeyDesc() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(dateHistogram("histo")
.field("dates")
.dateHistogramInterval(DateHistogramInterval.MONTH)
.order(Histogram.Order.COUNT_DESC))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
assertThat(histo.getBuckets().size(), equalTo(4));
// TODO: use diamond once JI-9019884 is fixed
List<Histogram.Bucket> buckets = new ArrayList<>(histo.getBuckets());
Histogram.Bucket bucket = buckets.get(0);
assertThat(bucket, notNullValue());
assertThat(bucket.getDocCount(), equalTo(5L));
bucket = buckets.get(1);
assertThat(bucket, notNullValue());
assertThat(bucket.getDocCount(), equalTo(3L));
bucket = buckets.get(2);
assertThat(bucket, notNullValue());
assertThat(bucket.getDocCount(), equalTo(3L));
bucket = buckets.get(3);
assertThat(bucket, notNullValue());
assertThat(bucket.getDocCount(), equalTo(1L));
}
示例8: testPartiallyUnmappedWithExtendedBounds
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testPartiallyUnmappedWithExtendedBounds() throws Exception {
SearchResponse response = client()
.prepareSearch("idx", "idx_unmapped")
.addAggregation(
histogram("histo")
.field(SINGLE_VALUED_FIELD_NAME)
.interval(interval)
.extendedBounds(-1 * 2 * interval, valueCounts.length * interval))
.get();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets + 3));
Histogram.Bucket bucket = buckets.get(0);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval));
assertThat(bucket.getDocCount(), equalTo(0L));
bucket = buckets.get(1);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * interval));
assertThat(bucket.getDocCount(), equalTo(0L));
for (int i = 2; i < numValueBuckets + 2; ++i) {
bucket = buckets.get(i);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) (i - 2) * interval));
assertThat(bucket.getDocCount(), equalTo(valueCounts[i - 2]));
}
}
示例9: testSingleValuedFieldOrderedByCountAsc
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testSingleValuedFieldOrderedByCountAsc() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC))
.execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
LongHashSet buckets = new LongHashSet();
// TODO: use diamond once JI-9019884 is fixed
List<Histogram.Bucket> histoBuckets = new ArrayList<>(histo.getBuckets());
long previousCount = Long.MIN_VALUE;
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = histoBuckets.get(i);
assertThat(bucket, notNullValue());
long key = ((Number) bucket.getKey()).longValue();
assertEquals(0, key % interval);
assertTrue(buckets.add(key));
assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount));
previousCount = bucket.getDocCount();
}
}
示例10: testSingleValuedField_normalised
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
/**
* test first and second derivative on the sing
*/
public void testSingleValuedField_normalised() {
SearchResponse response = client()
.prepareSearch("idx")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0)
.subAggregation(derivative("deriv", "_count").unit("1ms"))
.subAggregation(derivative("2nd_deriv", "deriv").unit("10ms"))).execute().actionGet();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
Derivative docCountDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(docCountDeriv, notNullValue());
assertThat(docCountDeriv.value(), closeTo((firstDerivValueCounts[i - 1]), 0.00001));
assertThat(docCountDeriv.normalizedValue(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001));
} else {
assertThat(docCountDeriv, nullValue());
}
Derivative docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv");
if (i > 1) {
assertThat(docCount2ndDeriv, notNullValue());
assertThat(docCount2ndDeriv.value(), closeTo((secondDerivValueCounts[i - 2]), 0.00001));
assertThat(docCount2ndDeriv.normalizedValue(), closeTo((double) (secondDerivValueCounts[i - 2]) * 2, 0.00001));
} else {
assertThat(docCount2ndDeriv, nullValue());
}
}
}
示例11: testPartiallyUnmapped
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testPartiallyUnmapped() throws Exception {
SearchResponse response = client()
.prepareSearch("idx", "idx_unmapped")
.addAggregation(
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.subAggregation(derivative("deriv", "_count"))).execute().actionGet();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
checkBucketKeyAndDocCount("InternalBucket " + i, bucket, i * interval, valueCounts[i]);
SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
if (i > 0) {
assertThat(docCountDeriv, notNullValue());
assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1]));
} else {
assertThat(docCountDeriv, nullValue());
}
}
}
示例12: testDocCountTopLevel
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testDocCountTopLevel() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.extendedBounds(minRandomValue, maxRandomValue))
.addAggregation(statsBucket("stats_bucket", "histo>_count")).execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
double sum = 0;
int count = 0;
double min = Double.POSITIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
count++;
sum += bucket.getDocCount();
min = Math.min(min, bucket.getDocCount());
max = Math.max(max, bucket.getDocCount());
}
double avgValue = count == 0 ? Double.NaN : (sum / count);
StatsBucket statsBucketValue = response.getAggregations().get("stats_bucket");
assertThat(statsBucketValue, notNullValue());
assertThat(statsBucketValue.getName(), equalTo("stats_bucket"));
assertThat(statsBucketValue.getAvg(), equalTo(avgValue));
assertThat(statsBucketValue.getMin(), equalTo(min));
assertThat(statsBucketValue.getMax(), equalTo(max));
}
示例13: testSingleValuedFieldNormalised_timeZone_CET_DstEnd
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
/**
* Do a derivative on a date histogram with time zone CET at DST end
*/
public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Exception {
createIndex(IDX_DST_END);
DateTimeZone timezone = DateTimeZone.forID("CET");
List<IndexRequestBuilder> builders = new ArrayList<>();
addNTimes(1, IDX_DST_END, new DateTime("2012-10-27T01:00:00", timezone), builders);
addNTimes(2, IDX_DST_END, new DateTime("2012-10-28T01:00:00", timezone), builders); // day with dst shift -1h, 25h long
addNTimes(3, IDX_DST_END, new DateTime("2012-10-29T01:00:00", timezone), builders);
addNTimes(4, IDX_DST_END, new DateTime("2012-10-30T01:00:00", timezone), builders);
indexRandom(true, builders);
ensureSearchable();
SearchResponse response = client()
.prepareSearch(IDX_DST_END)
.addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY)
.timeZone(timezone).minDocCount(0)
.subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR)))
.execute()
.actionGet();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(4));
assertBucket(buckets.get(0), new DateTime("2012-10-27", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, null);
assertBucket(buckets.get(1), new DateTime("2012-10-28", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, 1d / 24d);
// the following is normalized using a 25h bucket width
assertBucket(buckets.get(2), new DateTime("2012-10-29", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, 1d / 25d);
assertBucket(buckets.get(3), new DateTime("2012-10-30", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, 1d / 24d);
}
示例14: testSingleValuedFieldNormalised_timeZone_AsiaKathmandu
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
/**
* also check for time zone shifts that are not one hour, e.g.
* "Asia/Kathmandu, 1 Jan 1986 - Time Zone Change (IST → NPT), at 00:00:00 clocks were turned forward 00:15 minutes
*/
public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exception {
createIndex(IDX_DST_KATHMANDU);
DateTimeZone timezone = DateTimeZone.forID("Asia/Kathmandu");
List<IndexRequestBuilder> builders = new ArrayList<>();
addNTimes(1, IDX_DST_KATHMANDU, new DateTime("1985-12-31T22:30:00", timezone), builders);
// the shift happens during the next bucket, which includes the 45min that do not start on the full hour
addNTimes(2, IDX_DST_KATHMANDU, new DateTime("1985-12-31T23:30:00", timezone), builders);
addNTimes(3, IDX_DST_KATHMANDU, new DateTime("1986-01-01T01:30:00", timezone), builders);
addNTimes(4, IDX_DST_KATHMANDU, new DateTime("1986-01-01T02:30:00", timezone), builders);
indexRandom(true, builders);
ensureSearchable();
SearchResponse response = client()
.prepareSearch(IDX_DST_KATHMANDU)
.addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.HOUR)
.timeZone(timezone).minDocCount(0)
.subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.MINUTE)))
.execute()
.actionGet();
assertSearchResponse(response);
Histogram deriv = response.getAggregations().get("histo");
assertThat(deriv, notNullValue());
assertThat(deriv.getName(), equalTo("histo"));
List<? extends Bucket> buckets = deriv.getBuckets();
assertThat(buckets.size(), equalTo(4));
assertBucket(buckets.get(0), new DateTime("1985-12-31T22:00:00", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null,
null);
assertBucket(buckets.get(1), new DateTime("1985-12-31T23:00:00", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d,
1d / 60d);
// the following is normalized using a 105min bucket width
assertBucket(buckets.get(2), new DateTime("1986-01-01T01:00:00", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d,
1d / 105d);
assertBucket(buckets.get(3), new DateTime("1986-01-01T02:00:00", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d,
1d / 60d);
}
示例15: testDocCountTopLevel
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; //导入方法依赖的package包/类
public void testDocCountTopLevel() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
.extendedBounds(minRandomValue, maxRandomValue))
.addAggregation(minBucket("min_bucket", "histo>_count")).execute().actionGet();
assertSearchResponse(response);
Histogram histo = response.getAggregations().get("histo");
assertThat(histo, notNullValue());
assertThat(histo.getName(), equalTo("histo"));
List<? extends Bucket> buckets = histo.getBuckets();
assertThat(buckets.size(), equalTo(numValueBuckets));
List<String> minKeys = new ArrayList<>();
double minValue = Double.POSITIVE_INFINITY;
for (int i = 0; i < numValueBuckets; ++i) {
Histogram.Bucket bucket = buckets.get(i);
assertThat(bucket, notNullValue());
assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
if (bucket.getDocCount() < minValue) {
minValue = bucket.getDocCount();
minKeys = new ArrayList<>();
minKeys.add(bucket.getKeyAsString());
} else if (bucket.getDocCount() == minValue) {
minKeys.add(bucket.getKeyAsString());
}
}
InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket");
assertThat(minBucketValue, notNullValue());
assertThat(minBucketValue.getName(), equalTo("min_bucket"));
assertThat(minBucketValue.value(), equalTo(minValue));
assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
}