當前位置: 首頁>>代碼示例>>Java>>正文


Java TopologyBuilder.setSpout方法代碼示例

本文整理匯總了Java中org.apache.storm.topology.TopologyBuilder.setSpout方法的典型用法代碼示例。如果您正苦於以下問題:Java TopologyBuilder.setSpout方法的具體用法?Java TopologyBuilder.setSpout怎麽用?Java TopologyBuilder.setSpout使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.storm.topology.TopologyBuilder的用法示例。


在下文中一共展示了TopologyBuilder.setSpout方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
public static void main(String args[]) throws InterruptedException {
	Config config = new Config();
	config.setNumWorkers(3);
	TopologyBuilder topologyBuilder = new TopologyBuilder();
	
	String zkConnString = "localhost:2181";
	String topicName = "sensor-data";
	
	BrokerHosts hosts = new ZkHosts(zkConnString);
	SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
	spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

	KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
	topologyBuilder.setSpout("spout", kafkaSpout, 1);
       topologyBuilder.setBolt("es-bolt", new ElasticSearchBolt(), 1).shuffleGrouping("spout");
       
       LocalCluster cluster = new LocalCluster();
       cluster.submitTopology("storm-es-example", config, topologyBuilder.createTopology());
}
 
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:20,代碼來源:SensorTopology.java

示例2: main

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
public static void main(String[] args) {
	Config config = new Config();
	config.setNumWorkers(3);
	TopologyBuilder topologyBuilder = new TopologyBuilder();
	
	String zkConnString = "localhost:2181";
	String topicName = "storm-diy";
	
	BrokerHosts hosts = new ZkHosts(zkConnString);
	SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
	spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

	KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
	topologyBuilder.setSpout("spout", kafkaSpout, 1);
	topologyBuilder.setBolt("parser", new ParseAndUsageBolt(), 1).shuffleGrouping("spout");
	topologyBuilder.setBolt("usageCassandra", new UsageCassandraBolt("localhost", "usage"), 1).shuffleGrouping("parser", "usagestream");
	topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
	
	LocalCluster cluster = new LocalCluster();
       cluster.submitTopology("storm-diy", config, topologyBuilder.createTopology());
}
 
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:22,代碼來源:TelecomProcessorTopology.java

示例3: createTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology createTopology() {
    final String topic = config.getKafkaSpeakerTopic();
    final String name = String.format("%s_%s_%d", getTopologyName(), topic, System.currentTimeMillis());
    final Integer parallelism = config.getParallelism();

    TopologyBuilder builder = new TopologyBuilder();

    String spoutId = "KafkaSpout-" + topic;
    builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
    LoggerBolt logger = new LoggerBolt()
            .withLevel(config.getLoggerLevel())
            .withWatermark(config.getLoggerWatermark());

    builder.setBolt("Logger", logger, parallelism)
            .shuffleGrouping(spoutId);

    return builder.createTopology();
}
 
開發者ID:telstra,項目名稱:open-kilda,代碼行數:20,代碼來源:KafkaLoggerTopology.java

示例4: createTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology createTopology() {
    final String directory = config.getFilterDirectory();
    final String name = String.format("%s_%s_%s_%d", getTopologyName(), topic, directory, System.currentTimeMillis());

    String spoutId = "KafkaSpout-" + topic;
    int parallelism = 1;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
    filer = new FilerBolt().withFileName("utils-" + topic + ".log");
    if (directory.length() != 0)
        filer.withDir(new File(directory));

    builder.setBolt("utils", filer, parallelism)
            .shuffleGrouping(spoutId);
    return builder.createTopology();
}
 
開發者ID:telstra,項目名稱:open-kilda,代碼行數:19,代碼來源:KafkaFilerTopology.java

示例5: createCtrlBranch

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
protected void createCtrlBranch(TopologyBuilder builder, List<CtrlBoltRef> targets)
        throws StreamNameCollisionException {
    checkAndCreateTopic(config.getKafkaCtrlTopic());

    org.apache.storm.kafka.KafkaSpout kafkaSpout;
    kafkaSpout = createKafkaSpout(config.getKafkaCtrlTopic(), SPOUT_ID_CTRL);
    builder.setSpout(SPOUT_ID_CTRL, kafkaSpout);

    RouteBolt route = new RouteBolt(getTopologyName());
    builder.setBolt(BOLT_ID_CTRL_ROUTE, route)
            .shuffleGrouping(SPOUT_ID_CTRL);

    KafkaBolt kafkaBolt = createKafkaBolt(config.getKafkaCtrlTopic());
    BoltDeclarer outputSetup = builder.setBolt(BOLT_ID_CTRL_OUTPUT, kafkaBolt)
            .shuffleGrouping(BOLT_ID_CTRL_ROUTE, route.STREAM_ID_ERROR);

    for (CtrlBoltRef ref : targets) {
        String boltId = ref.getBoltId();
        ref.getDeclarer().allGrouping(BOLT_ID_CTRL_ROUTE, route.registerEndpoint(boltId));
        outputSetup.shuffleGrouping(boltId, ref.getBolt().getCtrlStreamId());
    }
}
 
開發者ID:telstra,項目名稱:open-kilda,代碼行數:23,代碼來源:AbstractTopology.java

示例6: getTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
  final int numLevels = BenchmarkUtils.getInt(config, TOPOLOGY_LEVEL, DEFAULT_NUM_LEVELS);
  final int msgSize = BenchmarkUtils.getInt(config, RandomMessageSpout.MESSAGE_SIZE,
          RandomMessageSpout.DEFAULT_MESSAGE_SIZE);
  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int boltNum = BenchmarkUtils.getInt(config, BOLT_NUM, DEFAULT_BOLT_NUM);

  spout = new RandomMessageSpout(msgSize, BenchmarkUtils.ifAckEnabled(config));

  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(BOLT_ID + 1, new ConstBolt(), boltNum)
      .shuffleGrouping(SPOUT_ID);
  for (int levelNum = 2; levelNum <= numLevels - 1; levelNum++) {
    builder.setBolt(BOLT_ID + levelNum, new ConstBolt(), boltNum)
      .shuffleGrouping(BOLT_ID + (levelNum - 1));
  }
 return builder.createTopology();
}
 
開發者ID:MBtech,項目名稱:stormbenchmark,代碼行數:22,代碼來源:SOL.java

示例7: getTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int viewBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_VIEW_BOLT_NUM);
  final int cntBoltNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);

  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
          config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.ONE), viewBoltNum)
         .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(COUNT_ID, new WordCount.Count(), cntBoltNum)
          .fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
  return builder.createTopology();
}
 
開發者ID:MBtech,項目名稱:stormbenchmark,代碼行數:19,代碼來源:PageViewCount.java

示例8: main

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
public static void main(String[] args){
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(SPOUT_ID,new SentenceSpout());
    topologyBuilder.setBolt(BOLT_ID_SENTENCE_SPLIT,new SentenceSplitBolt()).shuffleGrouping(SPOUT_ID);
    topologyBuilder.setBolt(BOLT_ID_WORD_COUNT,new WordCountBlot()).fieldsGrouping(BOLT_ID_SENTENCE_SPLIT,new Fields("word"));
    topologyBuilder.setBolt(BOLT_ID_COUNT_REPORT,new WordsReportBolt()).globalGrouping(BOLT_ID_WORD_COUNT);

    Config config  = new Config();
    LocalCluster localCluster = new LocalCluster();

    localCluster.submitTopology(TOPOLOGY_ID,config,topologyBuilder.createTopology());
    //
    Utils.sleep(10000);
    localCluster.killTopology(TOPOLOGY_ID);
    localCluster.shutdown();
}
 
開發者ID:yangboz,項目名稱:SpringBootKafkaStorm,代碼行數:17,代碼來源:WordCountTopology.java

示例9: main

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
public static void  main(String[] args){
    args = new String[] {"localhost:2181", "storm-test-topic", "/brokers", "storm-consumer"};
    // Log program usages and exit if there are less than 4 command line arguments
    if(args.length < 4) {
        LOG.fatal("Incorrect number of arguments. Required arguments: <zk-hosts> <kafka-topic> <zk-path> <clientid>");
        System.exit(1);
    }
    // Build Spout configuration using input command line parameters
    final BrokerHosts zkrHosts = new ZkHosts(args[0]);
    final String kafkaTopic = args[1];
    final String zkRoot = args[2];
    final String clientId = args[3];
    final SpoutConfig spoutConfig = new SpoutConfig(zkrHosts,kafkaTopic,zkRoot,clientId);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    // Build topology to consume message from kafka and print them on console
    final TopologyBuilder topologyBuilder = new TopologyBuilder();
    // Build topology to consume message from kafka and print them on console
    topologyBuilder.setSpout("kafka-spout",new KafkaSpout(spoutConfig));
    //Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
    topologyBuilder.setBolt("kafka-message",new LoggerBolt()).globalGrouping("kafka-spout");
    //Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
    final LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("kafka-toology",new Config(),topologyBuilder.createTopology());
}
 
開發者ID:yangboz,項目名稱:SpringBootKafkaStorm,代碼行數:25,代碼來源:LoggerTopology.java

示例10: getTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {
  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
  final int filterBoltNum = BenchmarkUtils.getInt(config, FILTER_NUM, DEFAULT_FILTER_BOLT_NUM);
  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
          config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(VIEW_ID, new PageViewBolt(Item.STATUS, Item.ALL), pvBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(FILTER_ID, new FilterBolt<Integer>(404), filterBoltNum)
          .fieldsGrouping(VIEW_ID, new Fields(Item.STATUS.toString()));
  return builder.createTopology();
}
 
開發者ID:MBtech,項目名稱:stormbenchmark,代碼行數:17,代碼來源:DataClean.java

示例11: getTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
  final int uvBoltNum = BenchmarkUtils.getInt(config, UNIQUER_NUM, DEFAULT_UV_BOLT_NUM);
  final int winLen = BenchmarkUtils.getInt(config, WINDOW_LENGTH, DEFAULT_WINDOW_LENGTH_IN_SEC);
  final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ, DEFAULT_EMIT_FREQ_IN_SEC);
  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
          config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.USER), pvBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(UNIQUER_ID, new UniqueVisitorBolt(winLen, emitFreq), uvBoltNum)
          .fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
  return builder.createTopology();
}
 
開發者ID:MBtech,項目名稱:stormbenchmark,代碼行數:20,代碼來源:UniqueVisitor.java

示例12: buildTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
public static TopologyBuilder buildTopology() {
	final TopologyBuilder builder = new TopologyBuilder();

	builder.setSpout(spoutId, new RandomSpout(true, seed));
	builder.setBolt(evenVerifierId, new VerifyAndEnrichBolt(true)).shuffleGrouping(spoutId,
			RandomSpout.EVEN_STREAM);
	builder.setBolt(oddVerifierId, new VerifyAndEnrichBolt(false)).shuffleGrouping(spoutId,
			RandomSpout.ODD_STREAM);

	// emit result
	if (outputPath != null) {
		// read the text file from given input path
		final String[] tokens = outputPath.split(":");
		final String outputFile = tokens[tokens.length - 1];
		builder.setBolt(sinkId, new BoltFileSink(outputFile, formatter))
			.shuffleGrouping(evenVerifierId).shuffleGrouping(oddVerifierId);
	} else {
		builder.setBolt(sinkId, new BoltPrintSink(formatter), 4)
			.shuffleGrouping(evenVerifierId).shuffleGrouping(oddVerifierId);
	}

	return builder;
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:24,代碼來源:SplitSpoutTopology.java

示例13: getActionTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
protected StormTopology getActionTopology(String zkhost) {
    BrokerHosts hosts = new ZkHosts(zkhost);
    TopologyBuilder builder = new TopologyBuilder();
    // trigger spouts setting
    SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
    triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    triggerConfig.ignoreZkOffsets = true;
    // status spouts setting
    SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
    statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    statusConfig.ignoreZkOffsets = true;
    // Set spouts
    builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
    builder.setSpout("status-spout", new KafkaSpout(statusConfig));
    /* Set bolts */
    builder.setBolt("scheduling-bolt", new SchedulingBolt())
            .shuffleGrouping("trigger-spout");
    builder.setBolt("status-bolt", new StatusBolt(), 4)
            .shuffleGrouping("status-spout");
    builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
    builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
    builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
    return builder.createTopology();
}
 
開發者ID:ENOW-IJI,項目名稱:ENOW-server,代碼行數:27,代碼來源:DockerSubmitter.java

示例14: getActionTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
protected StormTopology getActionTopology(String zkhost) {
    BrokerHosts hosts = new ZkHosts(zkhost);
    TopologyBuilder builder = new TopologyBuilder();
    // trigger spouts setting
    SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
    triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    triggerConfig.ignoreZkOffsets = true;
    // status spouts setting
    SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
    statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    statusConfig.ignoreZkOffsets = true;
    // Set spouts
    builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
    builder.setSpout("status-spout", new KafkaSpout(statusConfig));
    /* Set bolts */
    builder.setBolt("scheduling-bolt", new SchedulingBolt())
            .allGrouping("trigger-spout");
    builder.setBolt("status-bolt", new StatusBolt(), 4)
            .allGrouping("status-spout");
    builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
    builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
    builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
    return builder.createTopology();
}
 
開發者ID:ENOW-IJI,項目名稱:ENOW-server,代碼行數:27,代碼來源:RemoteSubmitter.java

示例15: getTopology

import org.apache.storm.topology.TopologyBuilder; //導入方法依賴的package包/類
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int nopBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
  //final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
  final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
          RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
  final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
          RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);

  spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config));

  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(SPLIT_ID, new Nop(), nopBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  return builder.createTopology();
}
 
開發者ID:MBtech,項目名稱:stormbenchmark,代碼行數:21,代碼來源:NOPFileReader.java


注:本文中的org.apache.storm.topology.TopologyBuilder.setSpout方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。