本文整理汇总了Java中storm.trident.util.TridentUtils类的典型用法代码示例。如果您正苦于以下问题:Java TridentUtils类的具体用法?Java TridentUtils怎么用?Java TridentUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TridentUtils类属于storm.trident.util包,在下文中一共展示了TridentUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: each
import storm.trident.util.TridentUtils; //导入依赖的package包/类
@Override
public Stream each(Fields inputFields, Function function, Fields functionFields) {
projectionValidation(inputFields);
return _topology.addSourcedNode(this,
new ProcessorNode(_topology.getUniqueStreamId(),
_name,
TridentUtils.fieldsConcat(getOutputFields(), functionFields),
functionFields,
new EachProcessor(inputFields, function)));
}
示例2: stateQuery
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
projectionValidation(inputFields);
String stateId = state._node.stateInfo.id;
Node n = new ProcessorNode(_topology.getUniqueStreamId(),
_name,
TridentUtils.fieldsConcat(getOutputFields(), functionFields),
functionFields,
new StateQueryProcessor(stateId, inputFields, function));
_topology._colocate.get(stateId).add(n);
return _topology.addSourcedNode(this, n);
}
示例3: persistentAggregate
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public TridentState persistentAggregate(StateSpec spec, Fields inputFields, CombinerAggregator agg, Fields functionFields) {
return aggregate(inputFields, agg, functionFields)
.partitionPersist(spec,
TridentUtils.fieldsUnion(_groupFields, functionFields),
new MapCombinerAggStateUpdater(agg, _groupFields, functionFields),
TridentUtils.fieldsConcat(_groupFields, functionFields));
}
示例4: partitionAggregate
import storm.trident.util.TridentUtils; //导入依赖的package包/类
@Override
public IAggregatableStream partitionAggregate(Fields inputFields, Aggregator agg, Fields functionFields) {
Aggregator groupedAgg = new GroupedAggregator(agg, _groupFields, inputFields, functionFields.size());
Fields allInFields = TridentUtils.fieldsUnion(_groupFields, inputFields);
Fields allOutFields = TridentUtils.fieldsConcat(_groupFields, functionFields);
Stream s = _stream.partitionAggregate(allInFields, groupedAgg, allOutFields);
return new GroupedStream(s, _groupFields);
}
示例5: declareOutputFields
import storm.trident.util.TridentUtils; //导入依赖的package包/类
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
Fields outFields = TridentUtils.getSingleOutputStreamFields(_delegate);
outFields = TridentUtils.fieldsConcat(new Fields("$id$"), outFields);
declarer.declareStream(_stream, outFields);
// try to find a way to merge this code with what's already done in TridentBoltExecutor
declarer.declareStream(_coordStream, true, new Fields("id", "count"));
}
示例6: outgoingNodes
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Set<Node> outgoingNodes() {
Set<Node> ret = new HashSet<Node>();
for(Node n: nodes) {
ret.addAll(TridentUtils.getChildren(graph, n));
}
return ret;
}
示例7: incomingNodes
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Set<Node> incomingNodes() {
Set<Node> ret = new HashSet<Node>();
for(Node n: nodes) {
ret.addAll(TridentUtils.getParents(graph, n));
}
return ret;
}
示例8: newDRPCStream
import storm.trident.util.TridentUtils; //导入依赖的package包/类
private Stream newDRPCStream(DRPCSpout spout) {
// : consider adding a shuffle grouping after the spout to avoid so much routing of the args/return-info all over the place
// (at least until its possible to just pack bolt logic into the spout itself)
Node n = new SpoutNode(getUniqueStreamId(), TridentUtils.getSingleOutputStreamFields(spout), null, spout, SpoutNode.SpoutType.DRPC);
Stream nextStream = addNode(n);
// later on, this will be joined back with return-info and all the results
return nextStream.project(new Fields("args"));
}
示例9: multiReduce
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Stream multiReduce(List<Fields> inputFields, List<GroupedStream> groupedStreams, GroupedMultiReducer function, Fields outputFields) {
List<Fields> fullInputFields = new ArrayList<Fields>();
List<Stream> streams = new ArrayList<Stream>();
List<Fields> fullGroupFields = new ArrayList<Fields>();
for(int i=0; i<groupedStreams.size(); i++) {
GroupedStream gs = groupedStreams.get(i);
Fields groupFields = gs.getGroupFields();
fullGroupFields.add(groupFields);
streams.add(gs.toStream().partitionBy(groupFields));
fullInputFields.add(TridentUtils.fieldsUnion(groupFields, inputFields.get(i)));
}
return multiReduce(fullInputFields, streams, new GroupedMultiReducerExecutor(function, fullGroupFields, inputFields), outputFields);
}
示例10: strippedInputFields
import storm.trident.util.TridentUtils; //导入依赖的package包/类
private static List<Fields> strippedInputFields(List<Stream> streams, List<Fields> joinFields) {
List<Fields> ret = new ArrayList<Fields>();
for(int i=0; i<streams.size(); i++) {
ret.add(TridentUtils.fieldsSubtract(streams.get(i).getOutputFields(), joinFields.get(i)));
}
return ret;
}
示例11: newDRPCStream
import storm.trident.util.TridentUtils; //导入依赖的package包/类
private Stream newDRPCStream(DRPCSpout spout) {
// TODO: consider adding a shuffle grouping after the spout to avoid so much routing of the args/return-info all over the place
// (at least until its possible to just pack bolt logic into the spout itself)
Node n = new SpoutNode(getUniqueStreamId(), TridentUtils.getSingleOutputStreamFields(spout), null, spout, SpoutNode.SpoutType.DRPC);
Stream nextStream = addNode(n);
// later on, this will be joined back with return-info and all the results
return nextStream.project(new Fields("args"));
}
示例12: each
import storm.trident.util.TridentUtils; //导入依赖的package包/类
@Override
public Stream each(Fields inputFields, Function function, Fields functionFields) {
projectionValidation(inputFields);
return _topology.addSourcedNode(this,
new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields,
new EachProcessor(inputFields, function)));
}
示例13: stateQuery
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) {
projectionValidation(inputFields);
String stateId = state._node.stateInfo.id;
Node n =
new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields,
new StateQueryProcessor(stateId, inputFields, function));
_topology._colocate.get(stateId).add(n);
return _topology.addSourcedNode(this, n);
}
示例14: outgoingNodes
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Set<Node> outgoingNodes() {
Set<Node> ret = new HashSet<Node>();
for (Node n : nodes) {
ret.addAll(TridentUtils.getChildren(graph, n));
}
return ret;
}
示例15: incomingNodes
import storm.trident.util.TridentUtils; //导入依赖的package包/类
public Set<Node> incomingNodes() {
Set<Node> ret = new HashSet<Node>();
for (Node n : nodes) {
ret.addAll(TridentUtils.getParents(graph, n));
}
return ret;
}