本文整理汇总了Java中backtype.storm.task.WorkerTopologyContext类的典型用法代码示例。如果您正苦于以下问题:Java WorkerTopologyContext类的具体用法?Java WorkerTopologyContext怎么用?Java WorkerTopologyContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
WorkerTopologyContext类属于backtype.storm.task包,在下文中一共展示了WorkerTopologyContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
System.out.println("preparing HBaseStreamPartitioner for streamId " + stream.get_streamId());
this.targetTasks = targetTasks;
this.targetTasksSize = this.targetTasks.size();
Configuration conf = HBaseConfiguration.create();
try {
hTable = new HTable(conf, tableName);
refreshRegionInfo(tableName);
System.out.println("regionStartKeyRegionNameMap: " + regionStartKeyRegionNameMap);
} catch (IOException e) {
e.printStackTrace();
}
}
示例2: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
this.targetTasks = targetTasks;
int numTasks = targetTasks.size();
if (numTasks % numShards != 0)
throw new IllegalArgumentException("Number of tasks ("+numTasks+") should be a multiple of the number of shards ("+numShards+")!");
this.tasksPerShard = numTasks/numShards;
this.random = new UniformIntegerDistribution(0, tasksPerShard-1);
CompositeIdRouter docRouter = new CompositeIdRouter();
this.ranges = docRouter.partitionRange(numShards, docRouter.fullRange());
}
示例3: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
this.targetTasks = targetTasks;
int numTasks = targetTasks.size();
int numShards = initShardInfo(); // setup for doing shard to task mapping
if (numTasks % numShards != 0)
throw new IllegalArgumentException("Number of tasks ("+numTasks+") should be a multiple of the number of shards ("+numShards+")!");
this.tasksPerShard = numTasks/numShards;
this.random = new UniformIntegerDistribution(0, tasksPerShard-1);
}
示例4: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> tasks) {
List<Integer> sourceTasks = new ArrayList<Integer>(context.getComponentTasks(stream.get_componentId()));
Collections.sort(sourceTasks);
if(sourceTasks.size()!=tasks.size()) {
throw new RuntimeException("Can only do an identity grouping when source and target have same number of tasks");
}
tasks = new ArrayList<Integer>(tasks);
Collections.sort(tasks);
for(int i=0; i<sourceTasks.size(); i++) {
int s = sourceTasks.get(i);
int t = tasks.get(i);
_precomputed.put(s, Arrays.asList(t));
}
}
示例5: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream,
List<Integer> targetTasks) {
targetTasks = new ArrayList<Integer>(targetTasks);
Collections.sort(targetTasks);
_outTasks = new ArrayList<Integer>();
for (int i = 0; i < _n; i++) {
_outTasks.add(targetTasks.get(i));
}
}
示例6: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> tasks) {
List<Integer> sourceTasks = new ArrayList<Integer>(context.getComponentTasks(stream.get_componentId()));
Collections.sort(sourceTasks);
if (sourceTasks.size() != tasks.size()) {
throw new RuntimeException("Can only do an identity grouping when source and target have same number of tasks");
}
tasks = new ArrayList<Integer>(tasks);
Collections.sort(tasks);
for (int i = 0; i < sourceTasks.size(); i++) {
int s = sourceTasks.get(i);
int t = tasks.get(i);
_precomputed.put(s, Arrays.asList(t));
}
}
示例7: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
this.targetTasks = targetTasks;
targetTaskStats = new long[this.targetTasks.size()];
if (this.fields != null) {
this.outFields = context.getComponentOutputFields(stream);
}
}
示例8: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
targetTasks = new ArrayList<Integer>(targetTasks);
Collections.sort(targetTasks);
_outTasks = new ArrayList<Integer>();
for (int i = 0; i < _n; i++) {
_outTasks.add(targetTasks.get(i));
}
}
示例9: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
List<Integer> sources = context.getComponentTasks(stream.get_componentId());
for(int i=0; i<sources.size(); i++){
idMapping.put(sources.get(i), targetTasks.get(i%targetTasks.size()));
}
}
示例10: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
this.targetTasks = targetTasks;
Fields tupleFields = context.getComponentOutputFields(stream);
for(int i=0; i<tupleFields.size(); i++){
if(tupleFields.get(i).equals(CVParticleSerializer.STREAMID)){
streamIdIndex = i;
}else if(tupleFields.get(i).equals(CVParticleSerializer.SEQUENCENR)){
sequenceNrIndex = i;
}
}
}
示例11: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
@Override
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
targetTasks = new ArrayList<Integer>(targetTasks);
Collections.sort(targetTasks);
_outTasks = new ArrayList<Integer>();
for(int i=0; i<_n; i++) {
_outTasks.add(targetTasks.get(i));
}
}
示例12: prepare
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public void prepare(WorkerTopologyContext context,
GlobalStreamId stream, List<Integer> targetTasks) {
if (gr.is_set_fields()) {
gr_fields = new FFields(gr.get_fields());
} else if (gr.is_set_custom_serialized()) {
wrapped = (CustomStreamGrouping) Utils.deserialize(gr.get_custom_serialized());
wrapped.prepare(context, stream, targetTasks);
}
this.targetTasks = targetTasks;
num_tasks = targetTasks.size();
}
示例13: makeWorkerTopologyContext
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public WorkerTopologyContext makeWorkerTopologyContext(StormTopology topology) {
Map stormConf = workerData.getStormConf();
String topologyId = workerData.getTopologyId();
HashMap<String, Map<String, Fields>> componentToStreamToFields =
workerData.generateComponentToStreamToFields(topology);
return new WorkerTopologyContext(topology, stormConf, workerData.getTasksToComponent(),
workerData.getComponentToSortedTasks(), componentToStreamToFields,
topologyId, resourcePath, workerId, workerData.getPort(), workerTasks,
workerData.getDefaultResources(), workerData.getUserResources());
}
示例14: updateKryoSerializer
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
public void updateKryoSerializer() {
WorkerTopologyContext workerTopologyContext = contextMaker.makeWorkerTopologyContext(sysTopology);
KryoTupleDeserializer kryoTupleDeserializer = new KryoTupleDeserializer(stormConf, workerTopologyContext, workerTopologyContext.getRawTopology());
KryoTupleSerializer kryoTupleSerializer = new KryoTupleSerializer(stormConf, workerTopologyContext.getRawTopology());
atomKryoDeserializer.getAndSet(kryoTupleDeserializer);
atomKryoSerializer.getAndSet(kryoTupleSerializer);
}
示例15: setDeserializeThreads
import backtype.storm.task.WorkerTopologyContext; //导入依赖的package包/类
protected List<AsyncLoopThread> setDeserializeThreads() {
WorkerTopologyContext workerTopologyContext = contextMaker.makeWorkerTopologyContext(sysTopology);
int tasksNum = shutdownTasks.size();
double workerRatio = ConfigExtension.getWorkerDeserializeThreadRatio(stormConf);
int workerDeserThreadNum = Utils.getInt(Math.ceil(workerRatio * tasksNum));
if (workerDeserThreadNum > 0 && tasksNum > 0) {
double average = tasksNum / (double) workerDeserThreadNum;
for (int i = 0; i < workerDeserThreadNum; i++) {
int startRunTaskIndex = Utils.getInt(Math.rint(average * i));
deserializeThreads.add(new AsyncLoopThread(new WorkerDeserializeRunnable(
shutdownTasks, stormConf, workerTopologyContext, startRunTaskIndex, i)));
}
}
return deserializeThreads;
}