本文整理汇总了Java中com.alibaba.jstorm.daemon.worker.WorkerData类的典型用法代码示例。如果您正苦于以下问题:Java WorkerData类的具体用法?Java WorkerData怎么用?Java WorkerData使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
WorkerData类属于com.alibaba.jstorm.daemon.worker包,在下文中一共展示了WorkerData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MkLocalShuffer
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkLocalShuffer(List<Integer> workerTasks, List<Integer> allOutTasks, WorkerData workerData) {
super(workerData);
List<Integer> localOutTasks = new ArrayList<Integer>();
for (Integer outTask : allOutTasks) {
if (workerTasks.contains(outTask)) {
localOutTasks.add(outTask);
}
}
if (localOutTasks.size() != 0) {
this.outTasks = localOutTasks;
isLocal = true;
} else {
this.outTasks = new ArrayList<Integer>() ;
this.outTasks.addAll(allOutTasks);
isLocal = false;
}
randomrange = new RandomRange(outTasks.size());
}
示例2: MkGrouper
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkGrouper(TopologyContext _topology_context, Fields _out_fields,
Grouping _thrift_grouping, List<Integer> _outTasks,
String streamId, WorkerData workerData) {
this.topology_context = _topology_context;
this.out_fields = _out_fields;
this.thrift_grouping = _thrift_grouping;
this.streamId = streamId;
this.out_tasks = new ArrayList<Integer>();
this.out_tasks.addAll(_outTasks);
Collections.sort(this.out_tasks);
this.local_tasks = _topology_context.getThisWorkerTasks();
this.fields = Thrift.groupingType(thrift_grouping);
this.grouptype = this.parseGroupType(workerData);
String id = _topology_context.getThisTaskId() + ":" + streamId;
LOG.info(id + " grouptype is " + grouptype);
}
示例3: WorkerHeartbeatRunable
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public WorkerHeartbeatRunable(WorkerData workerData) {
this.workerData = workerData;
this.conf = workerData.getStormConf();
this.worker_id = workerData.getWorkerId();
this.port = workerData.getPort();
this.topologyId = workerData.getTopologyId();
this.task_ids = new CopyOnWriteArraySet<Integer>(workerData.getTaskids());
this.shutdown = workerData.getShutdown();
String key = Config.WORKER_HEARTBEAT_FREQUENCY_SECS;
frequence = JStormUtils.parseInt(conf.get(key), 10);
this.workerStates = new HashMap<String, LocalState>();
}
示例4: MkLocalShuffer
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkLocalShuffer(List<Integer> workerTasks, List<Integer> allOutTasks,
WorkerData workerData) {
super(workerData);
List<Integer> localOutTasks = new ArrayList<Integer>();
for (Integer outTask : allOutTasks) {
if (workerTasks.contains(outTask)) {
localOutTasks.add(outTask);
}
}
this.workerData = workerData;
intervalCheck = new IntervalCheck();
intervalCheck.setInterval(60);
if (localOutTasks.size() != 0) {
this.outTasks = localOutTasks;
isLocal = true;
} else {
this.outTasks = new ArrayList<Integer>();
this.outTasks.addAll(allOutTasks);
refreshLocalNodeTasks();
isLocal = false;
}
randomrange = new RandomRange(outTasks.size());
}
示例5: MkGrouper
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkGrouper(TopologyContext _topology_context, Fields _out_fields, Grouping _thrift_grouping, List<Integer> _outTasks, String streamId,
WorkerData workerData) {
this.topology_context = _topology_context;
this.out_fields = _out_fields;
this.thrift_grouping = _thrift_grouping;
this.streamId = streamId;
this.out_tasks = new ArrayList<Integer>();
this.out_tasks.addAll(_outTasks);
Collections.sort(this.out_tasks);
this.local_tasks = _topology_context.getThisWorkerTasks();
this.fields = Thrift.groupingType(thrift_grouping);
this.grouptype = this.parseGroupType(workerData);
String id = _topology_context.getThisTaskId() + ":" + streamId;
LOG.info(id + " grouptype is " + grouptype + ", out_tasks is " + out_tasks + ", local_tasks" + local_tasks);
}
示例6: MkLocalShuffer
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkLocalShuffer(List<Integer> workerTasks, List<Integer> allOutTasks, WorkerData workerData) {
super(workerData);
List<Integer> localOutTasks = new ArrayList<>();
allTargetTasks.addAll(allOutTasks);
for (Integer outTask : allOutTasks) {
if (workerTasks.contains(outTask)) {
localOutTasks.add(outTask);
}
}
this.workerData = workerData;
intervalCheck = new IntervalCheck();
intervalCheck.setInterval(60);
if (localOutTasks.size() != 0) {
this.outTasks = localOutTasks;
isLocal = true;
} else {
this.outTasks = new ArrayList<>();
this.outTasks.addAll(allOutTasks);
refreshLocalNodeTasks();
isLocal = false;
}
randomrange = new RandomRange(outTasks.size());
}
示例7: MkGrouper
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MkGrouper(TopologyContext _topology_context, Fields _out_fields, Grouping _thrift_grouping,
String targetComponent, String streamId, WorkerData workerData) {
this.topologyContext = _topology_context;
this.outFields = _out_fields;
this.thriftGrouping = _thrift_grouping;
this.streamId = streamId;
this.targetComponent = targetComponent;
List<Integer> outTasks = topologyContext.getComponentTasks(targetComponent);
this.outTasks = new ArrayList<>();
this.outTasks.addAll(outTasks);
Collections.sort(this.outTasks);
this.localTasks = _topology_context.getThisWorkerTasks();
this.fields = Thrift.groupingType(thriftGrouping);
this.groupType = this.parseGroupType(workerData);
String id = _topology_context.getThisTaskId() + ":" + streamId;
LOG.info(id + " groupType is " + groupType + ", outTasks is " + this.outTasks + ", localTasks" + localTasks);
}
示例8: StormMetricReporter
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
private StormMetricReporter(MetricRegistry registry,
Logger logger,
Marker marker,
TimeUnit rateUnit,
TimeUnit durationUnit,
MetricFilter filter,
WorkerData workerData) {
super(registry, "logger-reporter", filter, rateUnit, durationUnit);
this.logger = logger;
this.marker = marker;
this.workerData = workerData;
}
示例9: MetricReporter
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public MetricReporter(WorkerData workerData) {
this.workerData = workerData;
reporter1Minute = StormMetricReporter.forRegistry(Metrics.getMetrics())
.outputTo(LoggerFactory.getLogger(MetricReporter.class))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.setWorkerData(workerData).build();
reporter10Minute = Slf4jReporter.forRegistry(Metrics.getJstack())
.outputTo(LoggerFactory.getLogger(MetricReporter.class))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
示例10: WorkerHeartbeatRunable
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public WorkerHeartbeatRunable(WorkerData workerData) {
this.workerData = workerData;
this.conf = workerData.getConf();
this.worker_id = workerData.getWorkerId();
this.port = workerData.getPort();
this.topologyId = workerData.getTopologyId();
this.task_ids = new CopyOnWriteArraySet<Integer>(
workerData.getTaskids());
this.active = workerData.getActive();
String key = Config.WORKER_HEARTBEAT_FREQUENCY_SECS;
frequence = JStormUtils.parseInt(conf.get(key), 10);
}
示例11: TaskHeartbeatRunable
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public TaskHeartbeatRunable(WorkerData workerData) {
this.zkCluster = workerData.getZkCluster();
this.topology_id = workerData.getTopologyId();
this.uptime = new UptimeComputer();;
this.storm_conf = workerData.getStormConf();
this.active = workerData.getActive();
String key = Config.TASK_HEARTBEAT_FREQUENCY_SECS;
Object time = storm_conf.get(key);
frequence = JStormUtils.parseInt(time, 10);
}
示例12: Task
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public Task(WorkerData workerData, int taskId) throws Exception {
openOrPrepareWasCalled = new Atom(Boolean.valueOf(false));
this.workerData = workerData;
this.topologyContext = workerData.getContextMaker()
.makeTopologyContext(workerData.getSysTopology(), taskId,
openOrPrepareWasCalled);
this.userContext = workerData.getContextMaker().makeTopologyContext(
workerData.getRawTopology(), taskId, openOrPrepareWasCalled);
this.taskid = taskId;
this.componentid = topologyContext.getThisComponentId();
this.taskStatus = new TaskStatus();
this.taskTransfer = getSendingTransfer(workerData);
this.innerTaskTransfer = workerData.getInnerTaskTransfer();
this.deserializeQueues = workerData.getDeserializeQueues();
this.topologyid = workerData.getTopologyId();
this.context = workerData.getContext();
this.workHalt = workerData.getWorkHalt();
this.zkCluster = new StormZkClusterState(workerData.getZkClusterstate());
this.stormConf = Common.component_conf(workerData.getStormConf(),
topologyContext, componentid);
WorkerClassLoader.switchThreadContext();
// get real task object -- spout/bolt/spoutspec
this.taskObj = Common.get_task_object(topologyContext.getRawTopology(),
componentid, WorkerClassLoader.getInstance());
WorkerClassLoader.restoreThreadContext();
int samplerate = StormConfig.sampling_rate(stormConf);
this.taskStats = new CommonStatsRolling(samplerate);
LOG.info("Loading task " + componentid + ":" + taskid);
}
示例13: getSendingTransfer
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
private TaskTransfer getSendingTransfer(WorkerData workerData) {
// sending tuple's serializer
KryoTupleSerializer serializer = new KryoTupleSerializer(
workerData.getStormConf(), topologyContext);
String taskName = JStormServerUtils.getName(componentid, taskid);
// Task sending all tuples through this Object
return new TaskTransfer(taskName, serializer, taskStatus, workerData);
}
示例14: TaskTransfer
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
public TaskTransfer(String taskName,
KryoTupleSerializer serializer, TaskStatus taskStatus,
WorkerData workerData) {
this.taskName = taskName;
this.serializer = serializer;
this.taskStatus = taskStatus;
this.storm_conf = workerData.getConf();
this.transferQueue = workerData.getTransferQueue();
this.innerTaskTransfer = workerData.getInnerTaskTransfer();
int queue_size = Utils.getInt(storm_conf
.get(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE));
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
this.serializeQueue = DisruptorQueue.mkInstance(taskName, ProducerType.MULTI,
queue_size, waitStrategy);
this.serializeQueue.consumerStarted();
String taskId = taskName.substring(taskName.indexOf(":") + 1);
Metrics.registerQueue(taskName, MetricDef.SERIALIZE_QUEUE, serializeQueue, taskId, Metrics.MetricType.TASK);
timer = Metrics.registerTimer(taskName, MetricDef.SERIALIZE_TIME, taskId, Metrics.MetricType.TASK);
serializeThread = new AsyncLoopThread(new TransferRunnable());
LOG.info("Successfully start TaskTransfer thread");
}
示例15: Task
import com.alibaba.jstorm.daemon.worker.WorkerData; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
public Task(WorkerData workerData, int taskId) throws Exception {
openOrPrepareWasCalled = new Atom(Boolean.valueOf(false));
this.workerData = workerData;
this.topologyContext = workerData.getContextMaker()
.makeTopologyContext(workerData.getSysTopology(), taskId,
openOrPrepareWasCalled);
this.userContext = workerData.getContextMaker().makeTopologyContext(
workerData.getRawTopology(), taskId, openOrPrepareWasCalled);
this.taskid = taskId;
this.componentid = topologyContext.getThisComponentId();
this.taskStatus = new TaskStatus();
this.taskTransfer = getSendingTransfer(workerData);
this.innerTaskTransfer = workerData.getInnerTaskTransfer();
this.deserializeQueues = workerData.getDeserializeQueues();
this.topologyid = workerData.getTopologyId();
this.context = workerData.getContext();
this.workHalt = workerData.getWorkHalt();
this.zkCluster = new StormZkClusterState(workerData.getZkClusterstate());
this.stormConf = Common.component_conf(workerData.getStormConf(),
topologyContext, componentid);
WorkerClassLoader.switchThreadContext();
// get real task object -- spout/bolt/spoutspec
this.taskObj = Common.get_task_object(topologyContext.getRawTopology(),
componentid, WorkerClassLoader.getInstance());
WorkerClassLoader.restoreThreadContext();
int samplerate = StormConfig.sampling_rate(stormConf);
this.taskStats = new CommonStatsRolling(samplerate);
LOG.info("Loading task " + componentid + ":" + taskid);
}