当前位置: 首页>>代码示例>>Java>>正文


Java TopologyBuilder类代码示例

本文整理汇总了Java中org.apache.storm.topology.TopologyBuilder的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder类的具体用法?Java TopologyBuilder怎么用?Java TopologyBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TopologyBuilder类属于org.apache.storm.topology包,在下文中一共展示了TopologyBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String args[]) throws InterruptedException {
	Config config = new Config();
	config.setNumWorkers(3);
	TopologyBuilder topologyBuilder = new TopologyBuilder();
	
	String zkConnString = "localhost:2181";
	String topicName = "sensor-data";
	
	BrokerHosts hosts = new ZkHosts(zkConnString);
	SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
	spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

	KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
	topologyBuilder.setSpout("spout", kafkaSpout, 1);
       topologyBuilder.setBolt("es-bolt", new ElasticSearchBolt(), 1).shuffleGrouping("spout");
       
       LocalCluster cluster = new LocalCluster();
       cluster.submitTopology("storm-es-example", config, topologyBuilder.createTopology());
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:20,代码来源:SensorTopology.java

示例2: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) {
	Config config = new Config();
	config.setNumWorkers(3);
	TopologyBuilder topologyBuilder = new TopologyBuilder();
	
	String zkConnString = "localhost:2181";
	String topicName = "storm-diy";
	
	BrokerHosts hosts = new ZkHosts(zkConnString);
	SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
	spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

	KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
	topologyBuilder.setSpout("spout", kafkaSpout, 1);
	topologyBuilder.setBolt("parser", new ParseAndUsageBolt(), 1).shuffleGrouping("spout");
	topologyBuilder.setBolt("usageCassandra", new UsageCassandraBolt("localhost", "usage"), 1).shuffleGrouping("parser", "usagestream");
	topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
	
	LocalCluster cluster = new LocalCluster();
       cluster.submitTopology("storm-diy", config, topologyBuilder.createTopology());
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:22,代码来源:TelecomProcessorTopology.java

示例3: buildTopology

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
private static TopologyBuilder buildTopology() throws Exception {
        TopologyBuilder builder = new TopologyBuilder();
        String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
        String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
        BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
        SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);
        spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
        spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
        spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
        builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
        builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
        builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
//        builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
        return builder;
    }
 
开发者ID:JiuzhouSec,项目名称:nightwatch,代码行数:18,代码来源:SingleTopology.java

示例4: buildTopology

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
private static TopologyBuilder buildTopology() throws Exception {
        TopologyBuilder builder = new TopologyBuilder();
        String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
        String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
        BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
        SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);

        spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
        spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
        spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
        builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
        builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
        builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
//        builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
        return builder;
    }
 
开发者ID:JiuzhouSec,项目名称:nightwatch,代码行数:19,代码来源:ClusterTopology.java

示例5: createTopology

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Override
public StormTopology createTopology() {
    final String topic = config.getKafkaSpeakerTopic();
    final String name = String.format("%s_%s_%d", getTopologyName(), topic, System.currentTimeMillis());
    final Integer parallelism = config.getParallelism();

    TopologyBuilder builder = new TopologyBuilder();

    String spoutId = "KafkaSpout-" + topic;
    builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
    LoggerBolt logger = new LoggerBolt()
            .withLevel(config.getLoggerLevel())
            .withWatermark(config.getLoggerWatermark());

    builder.setBolt("Logger", logger, parallelism)
            .shuffleGrouping(spoutId);

    return builder.createTopology();
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:20,代码来源:KafkaLoggerTopology.java

示例6: createTopology

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Override
public StormTopology createTopology() {
    final String directory = config.getFilterDirectory();
    final String name = String.format("%s_%s_%s_%d", getTopologyName(), topic, directory, System.currentTimeMillis());

    String spoutId = "KafkaSpout-" + topic;
    int parallelism = 1;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(spoutId, createKafkaSpout(topic, name), parallelism);
    filer = new FilerBolt().withFileName("utils-" + topic + ".log");
    if (directory.length() != 0)
        filer.withDir(new File(directory));

    builder.setBolt("utils", filer, parallelism)
            .shuffleGrouping(spoutId);
    return builder.createTopology();
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:19,代码来源:KafkaFilerTopology.java

示例7: createCtrlBranch

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
protected void createCtrlBranch(TopologyBuilder builder, List<CtrlBoltRef> targets)
        throws StreamNameCollisionException {
    checkAndCreateTopic(config.getKafkaCtrlTopic());

    org.apache.storm.kafka.KafkaSpout kafkaSpout;
    kafkaSpout = createKafkaSpout(config.getKafkaCtrlTopic(), SPOUT_ID_CTRL);
    builder.setSpout(SPOUT_ID_CTRL, kafkaSpout);

    RouteBolt route = new RouteBolt(getTopologyName());
    builder.setBolt(BOLT_ID_CTRL_ROUTE, route)
            .shuffleGrouping(SPOUT_ID_CTRL);

    KafkaBolt kafkaBolt = createKafkaBolt(config.getKafkaCtrlTopic());
    BoltDeclarer outputSetup = builder.setBolt(BOLT_ID_CTRL_OUTPUT, kafkaBolt)
            .shuffleGrouping(BOLT_ID_CTRL_ROUTE, route.STREAM_ID_ERROR);

    for (CtrlBoltRef ref : targets) {
        String boltId = ref.getBoltId();
        ref.getDeclarer().allGrouping(BOLT_ID_CTRL_ROUTE, route.registerEndpoint(boltId));
        outputSetup.shuffleGrouping(boltId, ref.getBolt().getCtrlStreamId());
    }
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:23,代码来源:AbstractTopology.java

示例8: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("integer", new RandomIntegerSpout(), 1);
    builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(new Count(30), new Count(10)), 1)
            .shuffleGrouping("integer");
    builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(new Count(3)), 1)
            .shuffleGrouping("slidingsum");
    builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(40000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
开发者ID:bigdataFlySQL,项目名称:SQLonStorm,代码行数:22,代码来源:SlidingWindowTopology.java

示例9: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException,
		InvalidTopologyException {
	// create an instance of TopologyBuilder class
	TopologyBuilder builder = new TopologyBuilder();
	// set the spout class
	builder.setSpout("SampleSpout", new SampleSpout(), 2);
	// set the bolt class
	builder.setBolt("SampleBolt", new SampleBolt(), 4).shuffleGrouping(
			"SampleSpout");
	Config conf = new Config();
	conf.setNumWorkers(3);
	// This statement submit the topology on remote
	// args[0] = name of topology
	try {
		StormSubmitter.submitTopology(args[0], conf,
				builder.createTopology());
	} catch (AlreadyAliveException alreadyAliveException) {
		System.out.println(alreadyAliveException);
	} catch (InvalidTopologyException invalidTopologyException) {
		System.out.println(invalidTopologyException);
	} catch (AuthorizationException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Storm,代码行数:26,代码来源:SampleStormClusterTopology.java

示例10: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new WordSpout(), 1);
    builder.setBolt("exclaim", new ExclamationBolt(), 1).shuffleGrouping("word");   // Tuple流向:word 》 exclaim
    builder.setBolt("print", new PrintBolt(), 1).shuffleGrouping("exclaim");        // exclaim 》 print

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);

        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();      // storm依赖,<scope>provided</scope>--> 本地开发是注释掉 -->
        cluster.submitTopology("test3", conf, builder.createTopology());
        Utils.sleep(60 * 1000);
        cluster.killTopology("test3");
        cluster.shutdown();
    }
}
 
开发者ID:xuxueli,项目名称:xxl-incubator,代码行数:24,代码来源:ExclamationTopology.java

示例11: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args){
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(SPOUT_ID,new SentenceSpout());
    topologyBuilder.setBolt(BOLT_ID_SENTENCE_SPLIT,new SentenceSplitBolt()).shuffleGrouping(SPOUT_ID);
    topologyBuilder.setBolt(BOLT_ID_WORD_COUNT,new WordCountBlot()).fieldsGrouping(BOLT_ID_SENTENCE_SPLIT,new Fields("word"));
    topologyBuilder.setBolt(BOLT_ID_COUNT_REPORT,new WordsReportBolt()).globalGrouping(BOLT_ID_WORD_COUNT);

    Config config  = new Config();
    LocalCluster localCluster = new LocalCluster();

    localCluster.submitTopology(TOPOLOGY_ID,config,topologyBuilder.createTopology());
    //
    Utils.sleep(10000);
    localCluster.killTopology(TOPOLOGY_ID);
    localCluster.shutdown();
}
 
开发者ID:yangboz,项目名称:SpringBootKafkaStorm,代码行数:17,代码来源:WordCountTopology.java

示例12: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void  main(String[] args){
    args = new String[] {"localhost:2181", "storm-test-topic", "/brokers", "storm-consumer"};
    // Log program usages and exit if there are less than 4 command line arguments
    if(args.length < 4) {
        LOG.fatal("Incorrect number of arguments. Required arguments: <zk-hosts> <kafka-topic> <zk-path> <clientid>");
        System.exit(1);
    }
    // Build Spout configuration using input command line parameters
    final BrokerHosts zkrHosts = new ZkHosts(args[0]);
    final String kafkaTopic = args[1];
    final String zkRoot = args[2];
    final String clientId = args[3];
    final SpoutConfig spoutConfig = new SpoutConfig(zkrHosts,kafkaTopic,zkRoot,clientId);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    // Build topology to consume message from kafka and print them on console
    final TopologyBuilder topologyBuilder = new TopologyBuilder();
    // Build topology to consume message from kafka and print them on console
    topologyBuilder.setSpout("kafka-spout",new KafkaSpout(spoutConfig));
    //Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
    topologyBuilder.setBolt("kafka-message",new LoggerBolt()).globalGrouping("kafka-spout");
    //Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
    final LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("kafka-toology",new Config(),topologyBuilder.createTopology());
}
 
开发者ID:yangboz,项目名称:SpringBootKafkaStorm,代码行数:25,代码来源:LoggerTopology.java

示例13: main

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
/**
 * Main. Launches a remote Storm topology.
 * @param args The input args.
 * @throws Exception if any.
 */
public static void main(String[] args) throws Exception {
    OptionSet options = PARSER.parse(args);
    if (!options.hasOptions() || options.has(HELP_ARG) || !options.has(SPOUT_ARG)) {
        printHelp();
        return;
    }
    String spoutClass = (String) options.valueOf(SPOUT_ARG);
    List<String> arguments = (List<String>) options.valuesOf(ARGUMENT_ARG);
    Integer parallelism = (Integer) options.valueOf(PARALLELISM_ARG);
    Double cpuLoad = (Double) options.valueOf(CPU_LOAD_ARG);
    Double onHeapMemoryLoad = (Double) options.valueOf(ON_HEAP_MEMORY_LOAD_ARG);
    Double offHeapMemoryLoad = (Double) options.valueOf(OFF_HEAP_MEMORY_LOAD_ARG);
    String configuration = (String) options.valueOf(CONFIGURATION_ARG);

    BulletStormConfig bulletStormConfig = new BulletStormConfig(configuration);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(TopologyConstants.RECORD_COMPONENT, getSpout(spoutClass, arguments), parallelism)
           .setCPULoad(cpuLoad)
           .setMemoryLoad(onHeapMemoryLoad, offHeapMemoryLoad);
    log.info("Added spout " + spoutClass + " with parallelism " + parallelism + ", CPU load " + cpuLoad +
             ", On-heap memory " + onHeapMemoryLoad + ", Off-heap memory " + offHeapMemoryLoad);

    submit(bulletStormConfig, TopologyConstants.RECORD_COMPONENT, builder);
}
 
开发者ID:yahoo,项目名称:bullet-storm,代码行数:30,代码来源:Topology.java

示例14: getSpeedTopolgy

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
/**
 * @return the topology to run
 */
protected StormTopology getSpeedTopolgy() {
    final TopologyBuilder tp = new TopologyBuilder();

    // consume from the truck_speed_events topic
    tp.setSpout("kafka_spout", new KafkaSpout<>(getKafkaSpoutConfig()), 1);

    // parse pipe-delimited speed events into a POJO
    tp.setBolt("parse_speed_event", new ParseSpeedEventBolt())
            .shuffleGrouping("kafka_spout");

    // calculate the average speed for driver-route over a 10 second window
    tp.setBolt("average_speed", new AverageSpeedBolt().withTumblingWindow(new BaseWindowedBolt.Duration(WINDOW_SIZE_MS, TimeUnit.MILLISECONDS)))
            .shuffleGrouping("parse_speed_event");
                    //new Fields(ParseSpeedEventBolt.FIELD_DRIVER_ID, ParseSpeedEventBolt.FIELD_ROUTE_ID));

    // send results back to Kafka results topic
    tp.setBolt("kakfa_bolt", getKafkaBolt())
            .shuffleGrouping("average_speed");

    return tp.createTopology();
}
 
开发者ID:bbende,项目名称:hdf-trucking-app,代码行数:25,代码来源:SpeedTopology.java

示例15: getActionTopology

import org.apache.storm.topology.TopologyBuilder; //导入依赖的package包/类
protected StormTopology getActionTopology(String zkhost) {
    BrokerHosts hosts = new ZkHosts(zkhost);
    TopologyBuilder builder = new TopologyBuilder();
    // trigger spouts setting
    SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
    triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    triggerConfig.ignoreZkOffsets = true;
    // status spouts setting
    SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
    statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    statusConfig.ignoreZkOffsets = true;
    // Set spouts
    builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
    builder.setSpout("status-spout", new KafkaSpout(statusConfig));
    /* Set bolts */
    builder.setBolt("scheduling-bolt", new SchedulingBolt())
            .shuffleGrouping("trigger-spout");
    builder.setBolt("status-bolt", new StatusBolt(), 4)
            .shuffleGrouping("status-spout");
    builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
    builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
    builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
    return builder.createTopology();
}
 
开发者ID:ENOW-IJI,项目名称:ENOW-server,代码行数:27,代码来源:DockerSubmitter.java


注:本文中的org.apache.storm.topology.TopologyBuilder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。