本文整理汇总了Java中backtype.storm.task.TopologyContext.getThisTaskId方法的典型用法代码示例。如果您正苦于以下问题:Java TopologyContext.getThisTaskId方法的具体用法?Java TopologyContext.getThisTaskId怎么用?Java TopologyContext.getThisTaskId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类backtype.storm.task.TopologyContext
的用法示例。
在下文中一共展示了TopologyContext.getThisTaskId方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
public void prepare(Map conf, TopologyContext ctx, OutputCollector collector) {
this.collector = collector;
this.myId = ctx.getThisComponentId() + "-" + ctx.getThisTaskId();
this.summary = new Summary();
this.publisher = new ZkPublisher();
try {
this.publisher.init(conf);
} catch (Exception e) {
throw new RuntimeException(e);
}
this.lastPublishedTimestamp = 0;
}
示例2: open
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
MessageDigest md;
int counter;
this.thisTaskIndex = context.getThisTaskIndex();
this.numSpouts = context.getComponentTasks(context.getThisComponentId()).size();
counter = 0;
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Couldn't find MD5 algorithm.", e);
}
// we want to create a message that hashes to exacly one of the following spouts. As there are the same number
// of bolts on each level as there are spouts, we just keep looking until we find a uuid whose hash code would
// be assigned to the id of this spout (if it were a bolt).
do {
if (++counter > 1000 * 1000) {
throw new RuntimeException("Unable to generate required UUID in 1 mio tries.");
}
byte[] bytes = md.digest(UUID.randomUUID().toString().getBytes());
this.uuid = new String(bytes);
} while (this.uuid.hashCode() % this.numSpouts != this.thisTaskIndex);
this.collector = collector;
if (!this.disableAniello) {
// this will create/configure the worker monitor once per worker
WorkerMonitor.getInstance().setContextInfo(context);
// this object is used in the emit/execute method to compute the number of inter-node messages
this.taskMonitor = new TaskMonitor(context.getThisTaskId());
}
}
示例3: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
if (!this.disableAniello) {
// this will create/configure the worker monitor once per worker
WorkerMonitor.getInstance().setContextInfo(context);
// this object is used in the emit/execute method to compute the number of inter-node messages
this.taskMonitor = new TaskMonitor(context.getThisTaskId());
}
}
示例4: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
// this object is used in the emit/execute method to compute the number of inter-node messages
this.taskMonitor = new TaskMonitor(context.getThisTaskId());
this.collector = collector;
if (!this.disableAniello) {
// this will create/configure the worker monitor once per worker
WorkerMonitor.getInstance().setContextInfo(context);
// this object is used in the emit/execute method to compute the number of inter-node messages
this.taskMonitor = new TaskMonitor(context.getThisTaskId());
}
}
示例5: open
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
this.rnd = new Random();
if (!this.disableAniello) {
// this will create/configure the worker monitor once per worker
WorkerMonitor.getInstance().setContextInfo(context);
// this object is used in the emit/execute method to compute the number of inter-node messages
this.taskMonitor = new TaskMonitor(context.getThisTaskId());
}
}
示例6: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
public void prepare(Map conf, TopologyContext ctx, OutputCollector collector) {
this.collector = collector;
this.myId = ctx.getThisComponentId() + "-" + ctx.getThisTaskId();
this.sentimentData = (Map<String, Long>) conf.get("sentimentData");
if (this.sentimentData != null) {
LOG.info("SentiCalcBolt " + myId + " has received sentimentData");
}
}
示例7: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void prepare(Map conf, TopologyContext topologyContext) {
this.componentId = topologyContext.getThisComponentId();
this.taskId = topologyContext.getThisTaskId();
try {
this.host = Utils.localHostname();
} catch (UnknownHostException e) {
throw new RuntimeException(e);
}
}
示例8: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void prepare(Map conf, final TopologyContext context) {
if (context.getThisTaskId() < 0) {
LOG.debug("Skipping installation of metrics hook for negative task id {}", context.getThisTaskId());
} else {
int intervalSecs;
LOG.info("Initializing metrics hook for task {}", context.getThisTaskId());
this.sendgraphRef = new AtomicReference<>();
intervalSecs = getConfiguredSchedulingIntervalSecs(conf);
/*
* We register one metric for each task. The full send graph will then be built up in the metric
* consumer.
*/
context.registerMetric(METRIC_EMITTED_MESSAGES, new IMetric() {
@Override
public Object getValueAndReset() {
Map<Integer, AtomicLong> currentValue;
// don't reset sendgraph! todo: make this configurable
// currentValue = SchedulingMetricsCollectionHook.this.sendgraphRef.getAndSet(createEmptySendgraphMap());
currentValue = SchedulingMetricsCollectionHook.this.sendgraphRef.get();
LOG.trace("Reset values for task {} and returning: {}", context.getThisTaskId(), currentValue.toString());
return currentValue;
}
}, intervalSecs); // call every n seconds
// put an empty send graph object.
this.sendgraphRef.compareAndSet(null, createEmptySendgraphMap());
// put a zero weight for the task at hand, so we have a complete send graph in the metrics. Without this
// step, tasks that don't send or receive anything (for example the metrics-consumers) would not be
// contained in the sendgraph. todo: change the schedule format to contain task=>partition assignements, so
// we could do away with this workaround
this.sendgraphRef.get().get(context.getThisTaskId()).set(0);
}
}
示例9: prepare
import backtype.storm.task.TopologyContext; //导入方法依赖的package包/类
@Override
public void prepare(Map conf, TopologyContext topologyContext) {
this.componentId = topologyContext.getThisComponentId();
this.taskId = topologyContext.getThisTaskId();
}