本文整理汇总了Java中backtype.storm.generated.GlobalStreamId类的典型用法代码示例。如果您正苦于以下问题:Java GlobalStreamId类的具体用法?Java GlobalStreamId怎么用?Java GlobalStreamId使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
GlobalStreamId类属于backtype.storm.generated包,在下文中一共展示了GlobalStreamId类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: recv_tuple
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public void recv_tuple(String component, String stream) {
if (enable == false) {
return;
}
Map<String, EventSampler> componentSamplers = recvTpsSamplers
.get(component);
if (componentSamplers == null) {
componentSamplers = new HashMap<String, EventSampler>();
recvTpsSamplers.put(component, componentSamplers);
}
EventSampler sampler = componentSamplers.get(stream);
if (sampler == null) {
sampler = new EventSampler(rate);
componentSamplers.put(stream, sampler);
}
Integer recv = sampler.tpsCheck();
if (recv != null) {
GlobalStreamId key = new GlobalStreamId(component, stream);
update_task_stat(StaticsType.recv_tps, key, recv);
}
}
示例2: execute
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
List<Object> id = tuple.select(_idFields);
GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
if (!_pending.containsKey(id)) {
_pending.put(id, new HashMap<GlobalStreamId, Tuple>());
}
Map<GlobalStreamId, Tuple> parts = _pending.get(id);
if (parts.containsKey(streamId))
throw new RuntimeException("Received same side of single join twice");
parts.put(streamId, tuple);
if (parts.size() == _numSources) {
_pending.remove(id);
List<Object> joinResult = new ArrayList<Object>();
for (String outField : _outFields) {
GlobalStreamId loc = _fieldLocations.get(outField);
joinResult.add(parts.get(loc).getValueByField(outField));
}
_collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);
for (Tuple part : parts.values()) {
_collector.ack(part);
}
}
}
示例3: getTargets
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
/**
* Gets information about who is consuming the outputs of the specified
* component, and how.
*
* @return Map from stream id to component id to the Grouping used.
*/
public Map<String, Map<String, Grouping>> getTargets(String componentId) {
Map<String, Map<String, Grouping>> ret = new HashMap<String, Map<String, Grouping>>();
for (String otherComponentId : getComponentIds()) {
Map<GlobalStreamId, Grouping> inputs = getComponentCommon(
otherComponentId).get_inputs();
for (GlobalStreamId id : inputs.keySet()) {
if (id.get_componentId().equals(componentId)) {
Map<String, Grouping> curr = ret.get(id.get_streamId());
if (curr == null)
curr = new HashMap<String, Grouping>();
curr.put(otherComponentId, inputs.get(id));
ret.put(id.get_streamId(), curr);
}
}
}
return ret;
}
示例4: spout_failed_tuple
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public void spout_failed_tuple(String stream) {
if (enable == false) {
return;
}
EventSampler sampler = spoutFailedSamplers.get(stream);
if (sampler == null) {
sampler = new EventSampler(rate);
spoutFailedSamplers.put(stream, sampler);
}
Integer times = sampler.timesCheck();
if (times == null) {
return;
}
GlobalStreamId key = new GlobalStreamId(Common.ACKER_COMPONENT_ID,
stream);
update_task_stat(StaticsType.failed, key, times);
}
示例5: bolt_failed_tuple
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public void bolt_failed_tuple(String component, String stream) {
if (enable == false) {
return;
}
Map<String, EventSampler> componentSamplers = boltFailedSamplers
.get(component);
if (componentSamplers == null) {
componentSamplers = new HashMap<String, EventSampler>();
boltFailedSamplers.put(component, componentSamplers);
}
EventSampler sampler = componentSamplers.get(stream);
if (sampler == null) {
sampler = new EventSampler(rate);
componentSamplers.put(stream, sampler);
}
Integer times = sampler.timesCheck();
if (times == null) {
return;
}
GlobalStreamId key = new GlobalStreamId(component, stream);
update_task_stat(StaticsType.failed, key, times);
}
示例6: prepare
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
@Override
public void prepare(@SuppressWarnings("rawtypes") Map arg0, TopologyContext arg1, OutputCollector arg2) {
// for each logical input stream (ie, each producer bolt), we get an input partition for each of its tasks
LinkedList<Integer> taskIds = new LinkedList<Integer>();
for(Entry<GlobalStreamId, Grouping> inputStream : arg1.getThisSources().entrySet()) {
taskIds.addAll(arg1.getComponentTasks(inputStream.getKey().get_componentId()));
}
logger.debug("Detected producer tasks: {}", taskIds);
if(this.tsIndex != -1) {
assert (this.tsAttributeName == null && this.tsExtractor == null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsIndex);
} else if(this.tsAttributeName != null) {
assert (this.tsExtractor == null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsAttributeName);
} else {
assert (this.tsExtractor != null);
this.merger = new StreamMerger<Tuple>(taskIds, this.tsExtractor);
}
this.wrappedBolt.prepare(arg0, arg1, arg2);
}
示例7: MkCustomGrouper
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public MkCustomGrouper(TopologyContext context,
CustomStreamGrouping _grouping, GlobalStreamId stream,
List<Integer> targetTask, int myTaskId) {
this.myTaskId = myTaskId;
this.grouping = _grouping;
this.grouping.prepare(context, stream, targetTask);
}
示例8: prepare
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
this.targetTasks = targetTasks;
int numTasks = targetTasks.size();
int numShards = initShardInfo(); // setup for doing shard to task mapping
if (numTasks % numShards != 0)
throw new IllegalArgumentException("Number of tasks ("+numTasks+") should be a multiple of the number of shards ("+numShards+")!");
this.tasksPerShard = numTasks/numShards;
this.random = new UniformIntegerDistribution(0, tasksPerShard-1);
}
示例9: mkComponentcommon
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
private static ComponentCommon mkComponentcommon(
Map<GlobalStreamId, Grouping> inputs,
HashMap<String, StreamInfo> output_spec, Integer parallelism_hint) {
ComponentCommon ret = new ComponentCommon(inputs, output_spec);
if (parallelism_hint != null) {
ret.set_parallelism_hint(parallelism_hint);
}
return ret;
}
示例10: mkBolt
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public static Bolt mkBolt(Map<GlobalStreamId, Grouping> inputs,
IBolt bolt, HashMap<String, StreamInfo> output, Integer p) {
ComponentCommon common = mkComponentcommon(inputs, output, p);
byte[] boltSer = Utils.serialize(bolt);
ComponentObject component = ComponentObject.serialized_java(boltSer);
return new Bolt(component, common);
}
示例11: get_process_latencie
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public Map<String, Map<GlobalStreamId, Double>> get_process_latencie() {
Map<String, Map<GlobalStreamId, Double>> ret =
new HashMap<String, Map<GlobalStreamId,Double>>();
Map<Integer, Object> statics = staticsMap
.get(StaticsType.process_latencies);
GlobalStreamId streamIdSample = new GlobalStreamId("", "");
Map<String, Map<GlobalStreamId, Double>> raw =
convertKey(statics, streamIdSample, Double.valueOf(0));
for (Entry<String, Map<GlobalStreamId, Double>> windowEntry : raw.entrySet()) {
String windowStr = windowEntry.getKey();
Map<GlobalStreamId, Double> oldStreamMap = windowEntry.getValue();
Map<GlobalStreamId, Double> newStreamMap = new HashMap<GlobalStreamId, Double>();
for (Entry<GlobalStreamId, Double> entry: oldStreamMap.entrySet()) {
GlobalStreamId key = entry.getKey();
Double value = entry.getValue();
if (value == null) {
newStreamMap.put(key, Double.valueOf(0));
}else {
newStreamMap.put(key, value/LATENCY_MS_RATIO);
}
}
ret.put(windowStr, newStreamMap);
}
return ret;
}
示例12: get_total_recv_tps
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
public Double get_total_recv_tps() {
Double ret = new Double(0);
Map<GlobalStreamId, Double> allTimeRecvTps = get_recv_tps().get(StatBuckets.ALL_WINDOW_STR);
for (Entry<GlobalStreamId, Double> entry : allTimeRecvTps.entrySet()) {
ret += entry.getValue();
}
return ret;
}
示例13: buildSpoutOutoputAndBoltInputMap
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
/**
* get all bolts' inputs and spouts' outputs <Bolt_name, <Input_name>>
* <Spout_name, <Output_name>>
*
* @param topology_context
* @return all bolts' inputs and spouts' outputs
*/
public static Map<String, Set<String>> buildSpoutOutoputAndBoltInputMap(
DefaultTopologyAssignContext context) {
Set<String> bolts = context.getRawTopology().get_bolts().keySet();
Set<String> spouts = context.getRawTopology().get_spouts().keySet();
Map<String, Set<String>> relationship = new HashMap<String, Set<String>>();
for (Entry<String, Bolt> entry : context.getRawTopology().get_bolts()
.entrySet()) {
Map<GlobalStreamId, Grouping> inputs = entry.getValue()
.get_common().get_inputs();
Set<String> input = new HashSet<String>();
relationship.put(entry.getKey(), input);
for (Entry<GlobalStreamId, Grouping> inEntry : inputs.entrySet()) {
String component = inEntry.getKey().get_componentId();
input.add(component);
if (!bolts.contains(component)) {
// spout
Set<String> spoutOutput = relationship.get(component);
if (spoutOutput == null) {
spoutOutput = new HashSet<String>();
relationship.put(component, spoutOutput);
}
spoutOutput.add(entry.getKey());
}
}
}
for (String spout : spouts) {
if (relationship.get(spout) == null)
relationship.put(spout, new HashSet<String>());
}
for (String bolt : bolts) {
if (relationship.get(bolt) == null)
relationship.put(bolt, new HashSet<String>());
}
return relationship;
}
示例14: prepare
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
_fieldLocations = new HashMap<String, GlobalStreamId>();
_collector = collector;
int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue();
_pending = new TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>>(timeout, new ExpireCallback());
_numSources = context.getThisSources().size();
Set<String> idFields = null;
for (GlobalStreamId source : context.getThisSources().keySet()) {
Fields fields = context.getComponentOutputFields(source.get_componentId(), source.get_streamId());
Set<String> setFields = new HashSet<String>(fields.toList());
if (idFields == null)
idFields = setFields;
else
idFields.retainAll(setFields);
for (String outfield : _outFields) {
for (String sourcefield : fields) {
if (outfield.equals(sourcefield)) {
_fieldLocations.put(outfield, source);
}
}
}
}
_idFields = new Fields(new ArrayList<String>(idFields));
if (_fieldLocations.size() != _outFields.size()) {
throw new RuntimeException("Cannot find all outfields among sources");
}
}
示例15: execute
import backtype.storm.generated.GlobalStreamId; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
List<Object> id = tuple.select(_idFields);
GlobalStreamId streamId = new GlobalStreamId(
tuple.getSourceComponent(), tuple.getSourceStreamId());
if (!_pending.containsKey(id)) {
_pending.put(id, new HashMap<GlobalStreamId, Tuple>());
}
Map<GlobalStreamId, Tuple> parts = _pending.get(id);
if (parts.containsKey(streamId))
throw new RuntimeException(
"Received same side of single join twice");
parts.put(streamId, tuple);
if (parts.size() == _numSources) {
_pending.remove(id);
List<Object> joinResult = new ArrayList<Object>();
for (String outField : _outFields) {
GlobalStreamId loc = _fieldLocations.get(outField);
joinResult.add(parts.get(loc).getValueByField(outField));
}
_collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);
for (Tuple part : parts.values()) {
_collector.ack(part);
}
}
}