本文整理汇总了Java中io.druid.query.aggregation.AggregatorFactory类的典型用法代码示例。如果您正苦于以下问题:Java AggregatorFactory类的具体用法?Java AggregatorFactory怎么用?Java AggregatorFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AggregatorFactory类属于io.druid.query.aggregation包,在下文中一共展示了AggregatorFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createDruidSegments
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
public static QueryableIndex createDruidSegments() throws IOException {
// Create druid segments from raw data
Reader reader = new BufferedReader(new FileReader(new File("./src/test/resources/report.csv")));
List<String> columns = Arrays.asList("colo", "pool", "report", "URL", "TS", "metric", "value", "count", "min", "max", "sum");
List<String> exclusions = Arrays.asList("_Timestamp", "_Machine", "_ThreadId", "_Query");
List<String> metrics = Arrays.asList("value", "count", "min", "max", "sum");
List<DimensionSchema> dimensions = new ArrayList<DimensionSchema>();
for(String dim : columns){
dimensions.add(new StringDimensionSchema(dim));
}
dimensions.removeAll(exclusions);
dimensions.removeAll(metrics);
Loader loader = new CSVLoader(reader, columns, columns, "TS");
DimensionsSpec dimensionsSpec = new DimensionsSpec(dimensions, null, null);
AggregatorFactory[] metricsAgg = new AggregatorFactory[] {
new LongSumAggregatorFactory("agg_count", "count"),
new LongMaxAggregatorFactory("agg_max", "max"),
new LongMinAggregatorFactory("agg_min", "min"),
new DoubleSumAggregatorFactory("agg_sum", "sum"),
};
IncrementalIndexSchema indexSchema = new IncrementalIndexSchema(0, QueryGranularity.fromString("ALL"), dimensionsSpec, metricsAgg);
QueryableIndex index = IndexHelper.getQueryableIndex(loader, indexSchema);
return index;
}
示例2: AggregatorFactoryAdapter
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
public AggregatorFactoryAdapter(String aggFactorySpec, String metricType) {
ObjectMapper jsonMapper = HadoopDruidIndexerConfig.JSON_MAPPER;
try {
this.aggFactory = jsonMapper.readValue(aggFactorySpec, AggregatorFactory.class);
} catch(IOException ex) {
throw new IllegalArgumentException("failed to create aggregator factory", ex);
}
if (DruidUtils.isComplex(metricType)) {
ComplexMetricSerde cms = ComplexMetrics.getSerdeForType(metricType);
if (cms != null)
strategy = cms.getObjectStrategy();
else
throw new IllegalArgumentException("failed to find object strategy for " + metricType);
} else {
strategy = null;
}
}
示例3: getMergingFactory
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Override
public AggregatorFactory getMergingFactory(AggregatorFactory other) throws AggregatorFactoryNotMergeableException
{
if (other.getName().equals(this.getName()) && this.getClass() == other.getClass()) {
return getCombiningFactory();
} else {
throw new AggregatorFactoryNotMergeableException(this, other);
}
}
示例4: topNQuery
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Test
public void topNQuery() throws IOException {
QueryableIndex index = createDruidSegments();
List<DimFilter> filters = new ArrayList<DimFilter>();
filters.add(DimFilters.dimEquals("report", "URLTransaction"));
filters.add(DimFilters.dimEquals("pool", "r1cart"));
filters.add(DimFilters.dimEquals("metric", "Duration"));
TopNQuery query =
new TopNQueryBuilder()
.threshold(5)
.metric("agg_count")
.dataSource("test")
.intervals(QuerySegmentSpecs.create(new Interval(0, new DateTime().getMillis())))
.granularity(QueryGranularity.fromString("NONE"))
.dimension("colo")
.aggregators(
Arrays.<AggregatorFactory>asList(
new LongSumAggregatorFactory("agg_count", "agg_count"),
new LongMaxAggregatorFactory("agg_max", "agg_max"),
new LongMinAggregatorFactory("agg_min", "agg_min"),
new DoubleSumAggregatorFactory("agg_sum", "agg_sum"))
)
.filters(DimFilters.and(filters)).build();
@SuppressWarnings("unchecked")
Sequence<Result> sequence = QueryHelper.run(query, index);
ArrayList<Result> results = Sequences.toList(sequence, Lists.<Result>newArrayList());
Assert.assertEquals(results.size(), 1);
}
示例5: makeBeam
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Override
public Beam<Map<String, Object>> makeBeam(Map<?, ?> conf, IMetricsContext metrics) {
List<AggregatorFactory> aggregator = getAggregatorList();
// Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>).
final Timestamper<Map<String, Object>> timestamper = new StreamlineTimestamper(timestampField);
// Tranquility uses ZooKeeper (through Curator) for coordination.
final CuratorFramework curator = CuratorFrameworkFactory
.builder()
.connectString(tranquilityZKconnect) // we can use Storm conf to get config values
.retryPolicy(new ExponentialBackoffRetry(1000, 20, 30000))
.build();
curator.start();
// The JSON serialization of your object must have a timestamp field in a format that Druid understands. By default,
// Druid expects the field to be called "timestamp" and to be an ISO8601 timestamp.
final TimestampSpec timestampSpec = new TimestampSpec(timestampField, "auto", null);
// Tranquility needs to be able to serialize your object type to JSON for transmission to Druid. By default this is
// done with Jackson. If you want to provide an alternate serializer, you can provide your own via ```.objectWriter(...)```.
// In this case, we won't provide one, so we're just using Jackson.
final Beam<Map<String, Object>> beam = DruidBeams
.builder(timestamper)
.curator(curator)
.discoveryPath(discoveryPath)
.location(DruidLocation.create(indexService, dataSource))
.timestampSpec(timestampSpec)
.rollup(DruidRollup.create(DruidDimensions.specific(getTrimmedDimensions(dimensions)), aggregator, getQueryGranularity()))
.tuning(
ClusteredBeamTuning
.builder()
.segmentGranularity(getSegmentGranularity())
.windowPeriod(new Period(windowPeriod))
.partitions(clusterPartitions)
.replicants(clusterReplication)
.build()
)
.druidBeamConfig(
DruidBeamConfig
.builder()
.indexRetryPeriod(new Period(indexRetryPeriod))
.build())
.buildBeam();
return beam;
}
示例6: build
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
public static List<AggregatorFactory> build(String rawAggregators) {
// final List<String> splittedAggregators = Arrays.asList(rawAggregators.split(","));
// List<AggregatorFactory> list = new ArrayList<AggregatorFactory>();
// for (String aggregator : aggregators) {
// list.add(null);
// }
//TODO pending implementation
List<AggregatorFactory> aggregators = new ArrayList<AggregatorFactory>();
aggregators.add(new CountAggregatorFactory("count"));
return aggregators;
}
示例7: getCombiningFactory
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Override
public AggregatorFactory getCombiningFactory()
{
return new ExampleSumAggregatorFactory(name, name);
}
示例8: getRequiredColumns
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Override
public List<AggregatorFactory> getRequiredColumns()
{
return ImmutableList.of(new ExampleSumAggregatorFactory(fieldName, fieldName));
}
示例9: configure
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
@Override
public void configure(Context context) {
indexService = context.getString(INDEX_SERVICE);
discoveryPath = context.getString(DISCOVERY_PATH);
dimensions = Arrays.asList(context.getString(DIMENSIONS).split(","));
firehosePattern = context.getString(FIREHOSE_PATTERN, DEFAULT_FIREHOSE);
dataSource = context.getString(DATA_SOURCE, DEFAUL_DATASOURCE);
aggregators = AggregatorsHelper.build(context.getString(AGGREGATORS));
queryGranularity = QueryGranularity.fromString(context.getString(QUERY_GRANULARITY, DEFAULT_QUERY_GRANULARITY));
segmentGranularity = Granularity.valueOf(context.getString(SEGMENT_GRANULARITY, DEFAULT_SEGMENT_GRANULARITY));
windowPeriod = context.getString(WINDOW_PERIOD, DEFAULT_PERIOD);
partitions = context.getInteger(PARTITIONS, DEFAULT_PARTITIONS);
replicants = context.getInteger(REPLICANTS, DEFAULT_REPLICANTS);
timestampField = context.getString(TIMESTAMP_FIELD, DEFAULT_TIMESTAMP_FIELD);
timestampFormat = context.getString(TIMESTAMP_FORMAT, null);
zookeeperLocation = context.getString(ZOOKEEPER_LOCATION, DEFAULT_ZOOKEEPER_LOCATION);
baseSleepTime = context.getInteger(ZOOKEEPPER_BASE_SLEEP_TIME, DEFAULT_ZOOKEEPER_BASE_SLEEP);
maxRetries = context.getInteger(ZOOKEEPER_MAX_RETRIES, DEFAULT_ZOOKEEPER_MAX_RETRIES);
maxSleep = context.getInteger(ZOOKEEPER_MAX_SLEEP, DEFAULT_ZOOKEEPER_MAX_SLEEP);
batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH_SIZE);
druidService = sinkStrategy.getDruidService();
if (druidService != null) {
sinkCounter = new SinkCounter(this.getName());
if (timestampFormat.equals("auto"))
dateTimeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
else if (timestampFormat.equals("millis"))
dateTimeFormatter = null;
else
dateTimeFormatter = DateTimeFormat.forPattern(timestampFormat);
// Filter defined fields
Set<String> filter = new HashSet<String>();
filter.add(timestampField);
filter.addAll(dimensions);
for (AggregatorFactory aggregatorFactory : aggregators) {
filter.addAll(aggregatorFactory.requiredFields());
}
eventParser = new FlumeEventParser(timestampField, dateTimeFormatter, filter);
}
}
示例10: getLongMinAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getLongMinAggregator(Map<String, String> map) {
return new LongMinAggregatorFactory(map.get("name"), map.get("fieldName"));
}
示例11: getLongMaxAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getLongMaxAggregator(Map<String, String> map) {
return new LongMaxAggregatorFactory(map.get("name"), map.get("fieldName"));
}
示例12: getLongSumAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getLongSumAggregator(Map<String, String> map) {
return new LongSumAggregatorFactory(map.get("name"), map.get("fieldName"));
}
示例13: getDoubleMinAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getDoubleMinAggregator(Map<String, String> map) {
return new DoubleMinAggregatorFactory(map.get("name"), map.get("fieldName"));
}
示例14: getDoubleMaxAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getDoubleMaxAggregator(Map<String, String> map) {
return new DoubleMaxAggregatorFactory(map.get("name"), map.get("fieldName"));
}
示例15: getDoubleSumAggregator
import io.druid.query.aggregation.AggregatorFactory; //导入依赖的package包/类
private AggregatorFactory getDoubleSumAggregator(Map<String, String> map) {
return new DoubleSumAggregatorFactory(map.get("name"), map.get("fieldName"));
}