本文整理汇总了Java中io.druid.timeline.partition.PartitionChunk类的典型用法代码示例。如果您正苦于以下问题:Java PartitionChunk类的具体用法?Java PartitionChunk怎么用?Java PartitionChunk使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PartitionChunk类属于io.druid.timeline.partition包,在下文中一共展示了PartitionChunk类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getShardSpec
import io.druid.timeline.partition.PartitionChunk; //导入依赖的package包/类
private static ShardSpec getShardSpec(final int partitionNum)
{
return new ShardSpec()
{
@Override
public <T> PartitionChunk<T> createChunk(T obj)
{
return null;
}
@Override
public boolean isInChunk(long timestamp, InputRow inputRow)
{
return false;
}
@Override
public int getPartitionNum()
{
return partitionNum;
}
@Override
public ShardSpecLookup getLookup(List<ShardSpec> shardSpecs)
{
return null;
}
};
}
示例2: getSplits
import io.druid.timeline.partition.PartitionChunk; //导入依赖的package包/类
@Override
public List<InputSplit> getSplits(final JobContext jobContext) throws IOException, InterruptedException
{
final Configuration conf = jobContext.getConfiguration();
final String dataSource = getDataSource(conf);
final List<Interval> intervals = getIntervals(conf);
final List<DataSegment> segments;
try (final HttpClientHolder httpClient = HttpClientHolder.create()) {
segments = new DruidMetadataClient(
httpClient.get(),
objectMapper(),
getCoordinatorHost(conf)
).usedSegments(dataSource, intervals);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
log.info(
"Got %,d used segments for dataSource[%s], intervals[%s] from coordinator.",
segments.size(),
dataSource,
Joiner.on(", ").join(intervals)
);
// Window the DataSegments by putting them in a timeline.
final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
for (DataSegment segment : segments) {
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
}
final List<InputSplit> splits = Lists.newArrayList();
for (Interval interval : intervals) {
final List<TimelineObjectHolder<String, DataSegment>> lookup = timeline.lookup(interval);
for (final TimelineObjectHolder<String, DataSegment> holder : lookup) {
for (final PartitionChunk<DataSegment> chunk : holder.getObject()) {
final WindowedDataSegment windowedDataSegment = new WindowedDataSegment(
chunk.getObject(),
holder.getInterval()
);
splits.add(DruidInputSplit.create(windowedDataSegment));
}
}
}
log.info(
"Found %,d splits for dataSource[%s], intervals[%s].",
splits.size(),
dataSource,
Joiner.on(", ").join(intervals)
);
return splits;
}
示例3: createChunk
import io.druid.timeline.partition.PartitionChunk; //导入依赖的package包/类
@Override
public <T> PartitionChunk<T> createChunk(T obj) {
throw new NotImplementedException("createChunk method is not implemented");
}
示例4: getSplits
import io.druid.timeline.partition.PartitionChunk; //导入依赖的package包/类
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException
{
Configuration conf = context.getConfiguration();
String overlordUrl = conf.get(CONF_DRUID_OVERLORD_HOSTPORT);
Preconditions.checkArgument(
overlordUrl != null && !overlordUrl.isEmpty(),
CONF_DRUID_OVERLORD_HOSTPORT + " not defined"
);
logger.info("druid overlord url = " + overlordUrl);
String schemaStr = conf.get(CONF_DRUID_SCHEMA);
Preconditions.checkArgument(
schemaStr != null && !schemaStr.isEmpty(),
"schema undefined, provide " + CONF_DRUID_SCHEMA
);
logger.info("schema = " + schemaStr);
DatasourceIngestionSpec ingestionSpec = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(
schemaStr,
DatasourceIngestionSpec.class
);
String segmentsStr = getSegmentsToLoad(
ingestionSpec.getDataSource(),
Iterables.getOnlyElement(ingestionSpec.getIntervals()),
overlordUrl
);
logger.info(String.format("segments list received from overlord = [%s]", segmentsStr));
List<DataSegment> segmentsList = (HadoopDruidIndexerConfig.JSON_MAPPER.readValue(
segmentsStr,
Segments.class
)).getSegments();
VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
for (DataSegment segment : segmentsList) {
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
}
final List<TimelineObjectHolder<String, DataSegment>> timeLineSegments = timeline.lookup(Iterables.getOnlyElement(
ingestionSpec.getIntervals()
));
final List<WindowedDataSegment> windowedSegments = new ArrayList<>();
for (TimelineObjectHolder<String, DataSegment> holder : timeLineSegments) {
for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
windowedSegments.add(new WindowedDataSegment(chunk.getObject(), holder.getInterval()));
}
}
conf.set(CONF_INPUT_SEGMENTS, HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString(windowedSegments));
return super.getSplits(context);
}