本文整理匯總了Java中org.apache.storm.topology.TopologyBuilder.createTopology方法的典型用法代碼示例。如果您正苦於以下問題:Java TopologyBuilder.createTopology方法的具體用法?Java TopologyBuilder.createTopology怎麽用?Java TopologyBuilder.createTopology使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.storm.topology.TopologyBuilder
的用法示例。
在下文中一共展示了TopologyBuilder.createTopology方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: createTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology createTopology() {
final String topic = config.getKafkaSpeakerTopic();
final String name = String.format("%s_%s_%d", getTopologyName(), topic, System.currentTimeMillis());
final Integer parallelism = config.getParallelism();
TopologyBuilder builder = new TopologyBuilder();
String spoutId = "KafkaSpout-" + topic;
builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
LoggerBolt logger = new LoggerBolt()
.withLevel(config.getLoggerLevel())
.withWatermark(config.getLoggerWatermark());
builder.setBolt("Logger", logger, parallelism)
.shuffleGrouping(spoutId);
return builder.createTopology();
}
示例2: createTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology createTopology() {
final String directory = config.getFilterDirectory();
final String name = String.format("%s_%s_%s_%d", getTopologyName(), topic, directory, System.currentTimeMillis());
String spoutId = "KafkaSpout-" + topic;
int parallelism = 1;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
filer = new FilerBolt().withFileName("utils-" + topic + ".log");
if (directory.length() != 0)
filer.withDir(new File(directory));
builder.setBolt("utils", filer, parallelism)
.shuffleGrouping(spoutId);
return builder.createTopology();
}
示例3: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
.fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
return builder.createTopology();
}
示例4: getSpeedTopolgy
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
/**
* @return the topology to run
*/
protected StormTopology getSpeedTopolgy() {
final TopologyBuilder tp = new TopologyBuilder();
// consume from the truck_speed_events topic
tp.setSpout("kafka_spout", new KafkaSpout<>(getKafkaSpoutConfig()), 1);
// parse pipe-delimited speed events into a POJO
tp.setBolt("parse_speed_event", new ParseSpeedEventBolt())
.shuffleGrouping("kafka_spout");
// calculate the average speed for driver-route over a 10 second window
tp.setBolt("average_speed", new AverageSpeedBolt().withTumblingWindow(new BaseWindowedBolt.Duration(WINDOW_SIZE_MS, TimeUnit.MILLISECONDS)))
.shuffleGrouping("parse_speed_event");
//new Fields(ParseSpeedEventBolt.FIELD_DRIVER_ID, ParseSpeedEventBolt.FIELD_ROUTE_ID));
// send results back to Kafka results topic
tp.setBolt("kakfa_bolt", getKafkaBolt())
.shuffleGrouping("average_speed");
return tp.createTopology();
}
示例5: getActionTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
protected StormTopology getActionTopology(String zkhost) {
BrokerHosts hosts = new ZkHosts(zkhost);
TopologyBuilder builder = new TopologyBuilder();
// trigger spouts setting
SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
triggerConfig.ignoreZkOffsets = true;
// status spouts setting
SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
statusConfig.ignoreZkOffsets = true;
// Set spouts
builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
builder.setSpout("status-spout", new KafkaSpout(statusConfig));
/* Set bolts */
builder.setBolt("scheduling-bolt", new SchedulingBolt())
.shuffleGrouping("trigger-spout");
builder.setBolt("status-bolt", new StatusBolt(), 4)
.shuffleGrouping("status-spout");
builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
return builder.createTopology();
}
示例6: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
.fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));
return builder.createTopology();
}
示例7: getActionTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
protected StormTopology getActionTopology(String zkhost) {
BrokerHosts hosts = new ZkHosts(zkhost);
TopologyBuilder builder = new TopologyBuilder();
// trigger spouts setting
SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
triggerConfig.ignoreZkOffsets = true;
// status spouts setting
SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
statusConfig.ignoreZkOffsets = true;
// Set spouts
builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
builder.setSpout("status-spout", new KafkaSpout(statusConfig));
/* Set bolts */
builder.setBolt("scheduling-bolt", new SchedulingBolt())
.allGrouping("trigger-spout");
builder.setBolt("status-bolt", new StatusBolt(), 4)
.allGrouping("status-spout");
builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
return builder.createTopology();
}
示例8: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
final String filename = Utils.getString(Utils.get(config, FILE_CONFIG, "/A_Tale_of_Two_City.txt"));
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config), filename);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new WordCount.Count(), rcBoltNum).shuffleGrouping(SPLIT_ID);
builder.setBolt("aggregator", new WordCount.ACount(), 1).fieldsGrouping(COUNTER_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
// .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt("file sink", new FileSink(), 3).localOrShuffleGrouping(COUNTER_ID);
return builder.createTopology();
}
示例9: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
final String filename = Utils.getString(Utils.get(config, FILE_CONFIG, "/A_Tale_of_Two_City.txt"));
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config), filename);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new PartialWindowCountBolt().withWindow(new Count(windowLength), new Count(emitFreq)), rcBoltNum).shuffleGrouping(SPLIT_ID);
builder.setBolt("aggregator", new MergeCountBolt().withWindow(new Count(windowLength), new Count(emitFreq)), 6).fieldsGrouping(COUNTER_ID, new Fields("obj","count", "timestamp"));
// builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
// .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt("file sink", new FileSink(), 3).localOrShuffleGrouping(COUNTER_ID);
return builder.createTopology();
}
示例10: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int numLevels = BenchmarkUtils.getInt(config, TOPOLOGY_LEVEL, DEFAULT_NUM_LEVELS);
final int msgSize = BenchmarkUtils.getInt(config, RandomMessageSpout.MESSAGE_SIZE,
RandomMessageSpout.DEFAULT_MESSAGE_SIZE);
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int boltNum = BenchmarkUtils.getInt(config, BOLT_NUM, DEFAULT_BOLT_NUM);
spout = new RandomMessageSpout(msgSize, BenchmarkUtils.ifAckEnabled(config));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(BOLT_ID + 1, new ConstBolt(), boltNum)
.shuffleGrouping(SPOUT_ID);
for (int levelNum = 2; levelNum <= numLevels - 1; levelNum++) {
builder.setBolt(BOLT_ID + levelNum, new ConstBolt(), boltNum)
.shuffleGrouping(BOLT_ID + (levelNum - 1));
}
return builder.createTopology();
}
示例11: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int nopBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
//final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new Nop(), nopBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
return builder.createTopology();
}
示例12: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
final String filename = Utils.getString(Utils.get(config, FILE_CONFIG, "/A_Tale_of_Two_City.txt"));
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config), filename);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new WindowCountBolt().withWindow(new Count(windowLength), new Count(emitFreq)), rcBoltNum).fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
// .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt("file sink", new FileSink(), 3).localOrShuffleGrouping(COUNTER_ID);
return builder.createTopology();
}
示例13: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int uvBoltNum = BenchmarkUtils.getInt(config, UNIQUER_NUM, DEFAULT_UV_BOLT_NUM);
final int winLen = BenchmarkUtils.getInt(config, WINDOW_LENGTH, DEFAULT_WINDOW_LENGTH_IN_SEC);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ, DEFAULT_EMIT_FREQ_IN_SEC);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.USER), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(UNIQUER_ID, new UniqueVisitorBolt(winLen, emitFreq), uvBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例14: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
final String filename = Utils.getString(Utils.get(config, FILE_CONFIG, "/A_Tale_of_Two_City.txt"));
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config), filename);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
.fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt("file sink", new FileSink(), 3).localOrShuffleGrouping(COUNTER_ID);
return builder.createTopology();
}
示例15: getTopology
import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
final String filename = Utils.getString(Utils.get(config, FILE_CONFIG, "/A_Tale_of_Two_City.txt"));
spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config), filename);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNTER_ID, new WordCount.Count(), rcBoltNum).fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
// .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
// builder.setBolt("file sink", new FileSink(), 3).localOrShuffleGrouping(COUNTER_ID);
return builder.createTopology();
}