本文整理汇总了Java中backtype.storm.generated.Grouping类的典型用法代码示例。如果您正苦于以下问题:Java Grouping类的具体用法?Java Grouping怎么用?Java Grouping使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Grouping类属于backtype.storm.generated包,在下文中一共展示了Grouping类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MkGrouper
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public MkGrouper(TopologyContext _topology_context, Fields _out_fields,
Grouping _thrift_grouping, List<Integer> _outTasks,
String streamId, WorkerData workerData) {
this.topology_context = _topology_context;
this.out_fields = _out_fields;
this.thrift_grouping = _thrift_grouping;
this.streamId = streamId;
this.out_tasks = new ArrayList<Integer>();
this.out_tasks.addAll(_outTasks);
Collections.sort(this.out_tasks);
this.local_tasks = _topology_context.getThisWorkerTasks();
this.fields = Thrift.groupingType(thrift_grouping);
this.grouptype = this.parseGroupType(workerData);
String id = _topology_context.getThisTaskId() + ":" + streamId;
LOG.info(id + " grouptype is " + grouptype);
}
示例2: getTargets
import backtype.storm.generated.Grouping; //导入依赖的package包/类
/**
* Gets information about who is consuming the outputs of the specified
* component, and how.
*
* @return Map from stream id to component id to the Grouping used.
*/
public Map<String, Map<String, Grouping>> getTargets(String componentId) {
Map<String, Map<String, Grouping>> ret = new HashMap<String, Map<String, Grouping>>();
for (String otherComponentId : getComponentIds()) {
Map<GlobalStreamId, Grouping> inputs = getComponentCommon(
otherComponentId).get_inputs();
for (GlobalStreamId id : inputs.keySet()) {
if (id.get_componentId().equals(componentId)) {
Map<String, Grouping> curr = ret.get(id.get_streamId());
if (curr == null)
curr = new HashMap<String, Grouping>();
curr.put(otherComponentId, inputs.get(id));
ret.put(id.get_streamId(), curr);
}
}
}
return ret;
}
示例3: prepare
import backtype.storm.generated.Grouping; //导入依赖的package包/类
@Override
public void prepare(@SuppressWarnings("rawtypes") Map arg0, TopologyContext arg1, OutputCollector arg2) {
// for each logical input stream (ie, each producer bolt), we get an input partition for each of its tasks
LinkedList<Integer> taskIds = new LinkedList<Integer>();
for(Entry<GlobalStreamId, Grouping> inputStream : arg1.getThisSources().entrySet()) {
taskIds.addAll(arg1.getComponentTasks(inputStream.getKey().get_componentId()));
}
logger.debug("Detected producer tasks: {}", taskIds);
if(this.tsIndex != -1) {
assert (this.tsAttributeName == null && this.tsExtractor == null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsIndex);
} else if(this.tsAttributeName != null) {
assert (this.tsExtractor == null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsAttributeName);
} else {
assert (this.tsExtractor != null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsExtractor);
}
this.wrappedBolt.prepare(arg0, arg1, arg2);
}
示例4: worker_output_tasks
import backtype.storm.generated.Grouping; //导入依赖的package包/类
/**
* get current task's output task list
*/
public static Set<Integer> worker_output_tasks(WorkerData workerData) {
ContextMaker context_maker = workerData.getContextMaker();
Set<Integer> task_ids = workerData.getTaskids();
StormTopology topology = workerData.getSysTopology();
Set<Integer> rtn = new HashSet<Integer>();
for (Integer taskid : task_ids) {
TopologyContext context = context_maker.makeTopologyContext(topology, taskid, null);
// <StreamId, <ComponentId, Grouping>>
Map<String, Map<String, Grouping>> targets = context.getThisTargets();
for (Map<String, Grouping> e : targets.values()) {
for (String componentId : e.keySet()) {
List<Integer> tasks = context.getComponentTasks(componentId);
rtn.addAll(tasks);
}
}
}
return rtn;
}
示例5: prepare
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public void prepare(Map config, TopologyContext context, OutputCollector collector) {
TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
if (_delegate instanceof TimeoutCallback) {
callback = new TimeoutItems();
}
_tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(), callback);
_collector = collector;
_delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
for (String component : Utils.get(context.getThisTargets(), Constants.COORDINATED_STREAM_ID, new HashMap<String, Grouping>()).keySet()) {
for (Integer task : context.getComponentTasks(component)) {
_countOutTasks.add(task);
}
}
if (!_sourceArgs.isEmpty()) {
_numSourceReports = 0;
for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
if (entry.getValue().singleCount) {
_numSourceReports += 1;
} else {
_numSourceReports += context.getComponentTasks(entry.getKey()).size();
}
}
}
}
示例6: fieldGrouping
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public static List<String> fieldGrouping(Grouping grouping) {
if (!Grouping._Fields.FIELDS.equals(groupingType(grouping))) {
throw new IllegalArgumentException(
"Tried to get grouping fields from non fields grouping");
}
return grouping.get_fields();
}
示例7: isGlobalGrouping
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public static boolean isGlobalGrouping(Grouping grouping) {
if (Grouping._Fields.FIELDS.equals(groupingType(grouping))) {
return fieldGrouping(grouping).isEmpty();
}
return false;
}
示例8: mkComponentcommon
import backtype.storm.generated.Grouping; //导入依赖的package包/类
private static ComponentCommon mkComponentcommon(
Map<GlobalStreamId, Grouping> inputs,
HashMap<String, StreamInfo> output_spec, Integer parallelism_hint) {
ComponentCommon ret = new ComponentCommon(inputs, output_spec);
if (parallelism_hint != null) {
ret.set_parallelism_hint(parallelism_hint);
}
return ret;
}
示例9: worker_output_tasks
import backtype.storm.generated.Grouping; //导入依赖的package包/类
/**
* get current task's output task list
*
* @param tasks_component
* @param mk_topology_context
* @param task_ids
* @throws Exception
*/
public Set<Integer> worker_output_tasks() {
ContextMaker context_maker = workerData.getContextMaker();
Set<Integer> task_ids = workerData.getTaskids();
StormTopology topology = workerData.getSysTopology();
Set<Integer> rtn = new HashSet<Integer>();
for (Integer taskid : task_ids) {
TopologyContext context = context_maker.makeTopologyContext(
topology, taskid, null);
// <StreamId, <ComponentId, Grouping>>
Map<String, Map<String, Grouping>> targets = context
.getThisTargets();
for (Map<String, Grouping> e : targets.values()) {
for (String componentId : e.keySet()) {
List<Integer> tasks = context
.getComponentTasks(componentId);
rtn.addAll(tasks);
}
}
}
return rtn;
}
示例10: add_system_components
import backtype.storm.generated.Grouping; //导入依赖的package包/类
@SuppressWarnings("unused")
public static StormTopology add_system_components(StormTopology topology) {
// generate inputs
Map<GlobalStreamId, Grouping> inputs = new HashMap<GlobalStreamId, Grouping>();
// generate outputs
HashMap<String, StreamInfo> outputs = new HashMap<String, StreamInfo>();
ArrayList<String> fields = new ArrayList<String>();
outputs.put(Constants.SYSTEM_TICK_STREAM_ID,
Thrift.outputFields(JStormUtils.mk_list("rate_secs")));
outputs.put(Constants.METRICS_TICK_STREAM_ID,
Thrift.outputFields(JStormUtils.mk_list("interval")));
outputs.put(Constants.CREDENTIALS_CHANGED_STREAM_ID,
Thrift.outputFields(JStormUtils.mk_list("creds")));
ComponentCommon common = new ComponentCommon(inputs, outputs);
IBolt ackerbolt = new SystemBolt();
Bolt bolt = Thrift.mkBolt(inputs, ackerbolt, outputs,
Integer.valueOf(0));
topology.put_to_bolts(Constants.SYSTEM_COMPONENT_ID, bolt);
add_system_streams(topology);
return topology;
}
示例11: buildSpoutOutoputAndBoltInputMap
import backtype.storm.generated.Grouping; //导入依赖的package包/类
/**
* get all bolts' inputs and spouts' outputs <Bolt_name, <Input_name>>
* <Spout_name, <Output_name>>
*
* @param topology_context
* @return all bolts' inputs and spouts' outputs
*/
public static Map<String, Set<String>> buildSpoutOutoputAndBoltInputMap(
DefaultTopologyAssignContext context) {
Set<String> bolts = context.getRawTopology().get_bolts().keySet();
Set<String> spouts = context.getRawTopology().get_spouts().keySet();
Map<String, Set<String>> relationship = new HashMap<String, Set<String>>();
for (Entry<String, Bolt> entry : context.getRawTopology().get_bolts()
.entrySet()) {
Map<GlobalStreamId, Grouping> inputs = entry.getValue()
.get_common().get_inputs();
Set<String> input = new HashSet<String>();
relationship.put(entry.getKey(), input);
for (Entry<GlobalStreamId, Grouping> inEntry : inputs.entrySet()) {
String component = inEntry.getKey().get_componentId();
input.add(component);
if (!bolts.contains(component)) {
// spout
Set<String> spoutOutput = relationship.get(component);
if (spoutOutput == null) {
spoutOutput = new HashSet<String>();
relationship.put(component, spoutOutput);
}
spoutOutput.add(entry.getKey());
}
}
}
for (String spout : spouts) {
if (relationship.get(spout) == null)
relationship.put(spout, new HashSet<String>());
}
for (String bolt : bolts) {
if (relationship.get(bolt) == null)
relationship.put(bolt, new HashSet<String>());
}
return relationship;
}
示例12: partition
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public Stream partition(Grouping grouping) {
if(_node instanceof PartitionNode) {
return each(new Fields(), new TrueFilter()).partition(grouping);
} else {
return _topology.addSourcedNode(this, new PartitionNode(_node.streamId, _name, getOutputFields(), grouping));
}
}
示例13: mkAckerBolt
import backtype.storm.generated.Grouping; //导入依赖的package包/类
public static Bolt mkAckerBolt(Map<GlobalStreamId, Grouping> inputs,
IBolt bolt, HashMap<String, StreamInfo> output, Integer p) {
ComponentCommon common = mkAckerComponentcommon(inputs, output, p);
byte[] boltSer = Utils.serialize(bolt);
ComponentObject component = ComponentObject.serialized_java(boltSer);
return new Bolt(component, common);
}
示例14: prepare
import backtype.storm.generated.Grouping; //导入依赖的package包/类
@Override
public void prepare(Map config, TopologyContext context,
OutputCollector collector) {
TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
if (_delegate instanceof TimeoutCallback) {
callback = new TimeoutItems();
}
_tracked = new TimeCacheMap<Object, TrackingInfo>(
context.maxTopologyMessageTimeout(), callback);
_collector = collector;
_delegate.prepare(config, context, new OutputCollector(
new CoordinatedOutputCollector(collector)));
for (String component : Utils.get(context.getThisTargets(),
Constants.COORDINATED_STREAM_ID,
new HashMap<String, Grouping>()).keySet()) {
for (Integer task : context.getComponentTasks(component)) {
_countOutTasks.add(task);
}
}
if (!_sourceArgs.isEmpty()) {
_numSourceReports = 0;
for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
if (entry.getValue().singleCount) {
_numSourceReports += 1;
} else {
_numSourceReports += context.getComponentTasks(
entry.getKey()).size();
}
}
}
}
示例15: initCommon
import backtype.storm.generated.Grouping; //导入依赖的package包/类
private void initCommon(String id, IComponent component, Number parallelism) {
ComponentCommon common = new ComponentCommon();
common.set_inputs(new HashMap<GlobalStreamId, Grouping>());
if (parallelism != null)
common.set_parallelism_hint(parallelism.intValue());
else {
common.set_parallelism_hint(Integer.valueOf(1));
}
Map conf = component.getComponentConfiguration();
if (conf != null)
common.set_json_conf(Utils.to_json(conf));
_commons.put(id, common);
}