当前位置: 首页>>代码示例>>Java>>正文


Java TopologyBuilder类代码示例

本文整理汇总了Java中backtype.storm.topology.TopologyBuilder的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder类的具体用法?Java TopologyBuilder怎么用?Java TopologyBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TopologyBuilder类属于backtype.storm.topology包,在下文中一共展示了TopologyBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: buildAndSubmit

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     //configureRouteBolt(builder);
     configurePhoenixTest(builder);
     
     /*
     builder.setBolt("submitter", new SubmitBolt())
        .shuffleGrouping(ROUTE_BOLT);
     */
     
     try {
         StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
开发者ID:bucaojit,项目名称:RealEstate-Streaming,代码行数:24,代码来源:PhoenixTest.java

示例2: build

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static StormTopology build() {

        String json1 = "{\"reason\" : \"business\",\"airport\" : \"SFO\"}";
        String json2 = "{\"participants\" : 5,\"airport\" : \"OTP\"}";

        Map<String, Object> conf = new HashMap<>();
        /*
         * Configuration: https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html
         */
        conf.put("es.nodes", "192.168.1.101");
        conf.put("es.port", 9200);
        conf.put("es.input.json", "true");
        conf.put("es.batch.size.entries", "100");

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("json-spout", new StringSpout(json1, json2));
        builder.setBolt("es-bolt", new EsBolt("storm/json-trips", conf)).shuffleGrouping("json-spout");

        return builder.createTopology();
    }
 
开发者ID:asdf2014,项目名称:yuzhouwan,代码行数:21,代码来源:CreditCardTopologyBuilder.java

示例3: main

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
开发者ID:splicemachine,项目名称:splice-community-sample-code,代码行数:22,代码来源:MySqlToSpliceTopology.java

示例4: main

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        int spout_Parallelism_hint = 1;
        int split_Parallelism_hint = 2;
        int count_Parallelism_hint = 2;

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
        builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
        String topologyName = RaceConfig.JstormTopologyName;

        try {
            StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
            //begin by Young
            
            //end by Young
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
 
开发者ID:yangliguang,项目名称:preliminary.demo,代码行数:25,代码来源:RaceTopology.java

示例5: testSimpleWriteTopology

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList(ImmutableMap.of("one", 1, "two", 2));
    List doc2 = Collections.singletonList(ImmutableMap.of("OTP", "Otopeni", "SFO", "San Fran"));

    String target = index + "/simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("doc")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:20,代码来源:AbstractStormSimpleBoltTests.java

示例6: testSimpleWriteTopology

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    String target = index + "/json-simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:20,代码来源:AbstractStormJsonSimpleBoltTests.java

示例7: testSimpleRead

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void testSimpleRead() throws Exception {
    String target = index + "/basic-read";

    RestUtils.touch(index);
    RestUtils.postData(target, "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.postData(target, "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.refresh(index);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("es-spout", new TestSpout(new EsSpout(target)));
    builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("Hello"));
    assertThat(results, containsString("Goodbye"));

    System.out.println(CapturingBolt.CAPTURED);
    assertThat(CapturingBolt.CAPTURED.size(), is(2));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:26,代码来源:AbstractSpoutSimpleRead.java

示例8: testSimpleRead

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void testSimpleRead() throws Exception {
    String target = index + "/basic-read";

    RestUtils.touch(index);
    RestUtils.postData(target, "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.postData(target, "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.refresh(index);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("es-spout", new TestSpout(new EsSpout(target, "?q=*")));
    builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("Hello"));
    assertThat(results, containsString("Goodbye"));

    System.out.println(CapturingBolt.CAPTURED);
    assertThat(CapturingBolt.CAPTURED.size(), is(2));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:26,代码来源:AbstractSpoutSimpleReadWithQuery.java

示例9: testMultiIndexRead

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void testMultiIndexRead() throws Exception {

    counter++;

    RestUtils.postData(index + "/foo",
            "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.postData(index + "/bar",
            "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
    RestUtils.refresh(index);

    String target = "_all/foo";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("es-spout", new TestSpout(new EsSpout(target)));
    builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");

    MultiIndexSpoutStormSuite.run(index + "multi", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("Hello"));

    assertThat(CapturingBolt.CAPTURED.size(), greaterThanOrEqualTo(counter));
    System.out.println(CapturingBolt.CAPTURED);
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:27,代码来源:AbstractSpoutMultiIndexRead.java

示例10: test2WriteWithId

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void test2WriteWithId() throws Exception {
    List doc1 = ImmutableList.of("one", "fo1", "two", "fo2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    Map localCfg = new LinkedHashMap(conf);
    localCfg.put(ConfigurationOptions.ES_MAPPING_ID, "number");

    String target = index + "/id-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-2", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "valo1", "key2",
            "valo2", "key3", "number")));
    builder.setBolt("es-bolt-2", new TestBolt(new EsBolt(target, localCfg))).shuffleGrouping("test-spout-2");

    MultiIndexSpoutStormSuite.run(index + "id-write", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    Thread.sleep(1000);
    assertTrue(RestUtils.exists(target + "/1"));
    assertTrue(RestUtils.exists(target + "/2"));

    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("two"));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:27,代码来源:AbstractStormIdMappingBoltTests.java

示例11: test1WriteIndexPattern

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
@Test
public void test1WriteIndexPattern() throws Exception {
    List doc1 = ImmutableList.of("one", "1", "two", "2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    String target = index + "/write-{number}";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-3", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "val1", "key2",
            "val2", "key3", "number")));
    builder.setBolt("es-bolt-3", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-3");

    MultiIndexSpoutStormSuite.run(index + "write-pattern", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(20));

    Thread.sleep(1000);
    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(index + "/write-1"));
    assertTrue(RestUtils.exists(index + "/write-2"));

    String results = RestUtils.get(index + "/write-1" + "/_search?");
    assertThat(results, containsString("two"));

    results = RestUtils.get(index + "/write-2" + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:27,代码来源:AbstractStormIndexPatternBoltTests.java

示例12: buildAndSubmit

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     configureRouteBolt(builder);
     configureInsertBolt(builder);
     
     //builder.setBolt("submitter", new SubmitBolt())
     //   .shuffleGrouping(ROUTE_BOLT);
     
     try {
         StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
开发者ID:bucaojit,项目名称:RealEstate-Streaming,代码行数:22,代码来源:KafkaPhoenixTopology.java

示例13: main

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) {
    Config config = new Config();

    HdfsBolt hdfsBolt = makeHdfsBolt();
    KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);

    LOG.info("Topology name is {}", TOPOLOGY_NAME);

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
    topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
    topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
    topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));

    if (args != null && args.length > 0) {
        config.setDebug(false);
        config.setNumWorkers(3);

        try {
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:lovelock,项目名称:storm-demo,代码行数:26,代码来源:LogStatisticsTopology.java

示例14: build

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
public StormTopology build(StreamingApp app) throws Exception {
  SpringSpout eventsimSpout = new SpringSpout("eventsimSpout", spoutFields);
  SpringBolt collectionPerTimeFrameSolrBolt = new SpringBolt("collectionPerTimeFrameSolrBoltAction",
      app.tickRate("collectionPerTimeFrameSolrBoltAction"));

  // Send all docs for the same hash range to the same bolt instance,
  // which allows us to use a streaming approach to send docs to the leader
  int numShards = Integer.parseInt(String.valueOf(app.getStormConfig().get("spring.eventsimNumShards")));
  HashRangeGrouping hashRangeGrouping = new HashRangeGrouping(app.getStormConfig(), numShards);
  int tasksPerShard = hashRangeGrouping.getNumShards()*2;

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("eventsimSpout", eventsimSpout, app.parallelism("eventsimSpout"));
  builder.setBolt("collectionPerTimeFrameSolrBolt", collectionPerTimeFrameSolrBolt, tasksPerShard)
         .customGrouping("eventsimSpout", hashRangeGrouping);

  return builder.createTopology();
}
 
开发者ID:lucidworks,项目名称:storm-solr,代码行数:19,代码来源:EventsimTopology.java

示例15: configureESBolts

import backtype.storm.topology.TopologyBuilder; //导入依赖的package包/类
private void configureESBolts(TopologyBuilder builder, Config config) {
	HashMap<String, Object> esConfig = new HashMap<String, Object>();
	esConfig.put(ESIndexBolt.ES_CLUSTER_NAME, topologyConfig.getProperty(ESIndexBolt.ES_CLUSTER_NAME));
	esConfig.put(ESIndexBolt.ES_NODES, topologyConfig.getProperty(ESIndexBolt.ES_NODES));
	esConfig.put(ESIndexBolt.ES_SHIELD_ENABLED, topologyConfig.getProperty(ESIndexBolt.ES_SHIELD_ENABLED));
	esConfig.put(ESIndexBolt.ES_SHIELD_USER, topologyConfig.getProperty(ESIndexBolt.ES_SHIELD_USER));
	esConfig.put(ESIndexBolt.ES_SHIELD_PASS, topologyConfig.getProperty(ESIndexBolt.ES_SHIELD_PASS));
	esConfig.put(ESIndexBolt.ES_INDEX_NAME, topologyConfig.getProperty(ESIndexBolt.ES_INDEX_NAME));
	esConfig.put(ESIndexBolt.ES_INDEX_TYPE, topologyConfig.getProperty(ESIndexBolt.ES_INDEX_TYPE));
	esConfig.put(ESIndexBolt.ES_ASYNC_ENABLED, topologyConfig.getProperty(ESIndexBolt.ES_ASYNC_ENABLED));
	config.put("es.conf", esConfig);
	ESIndexBolt esBolt = new ESIndexBolt().withConfigKey("es.conf");
	final int boltThreads = Integer.valueOf(topologyConfig.getProperty("bolt.ESIndexBolt.threads"));

	builder.setBolt(ESINDEX_BOLT_ID, esBolt, boltThreads).shuffleGrouping(KAFKA_SPOUT_ID).setDebug(DEBUG);
}
 
开发者ID:desp0916,项目名称:LearnStorm,代码行数:17,代码来源:LogAnalyzer.java


注:本文中的backtype.storm.topology.TopologyBuilder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。