本文整理汇总了Java中backtype.storm.topology.TopologyBuilder.setSpout方法的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder.setSpout方法的具体用法?Java TopologyBuilder.setSpout怎么用?Java TopologyBuilder.setSpout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类backtype.storm.topology.TopologyBuilder
的用法示例。
在下文中一共展示了TopologyBuilder.setSpout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: test1WriteIndexPattern
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
@Test
public void test1WriteIndexPattern() throws Exception {
List doc1 = ImmutableList.of("one", "1", "two", "2", "number", 1);
List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);
String target = index + "/write-{number}";
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("test-spout-3", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "val1", "key2",
"val2", "key3", "number")));
builder.setBolt("es-bolt-3", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-3");
MultiIndexSpoutStormSuite.run(index + "write-pattern", builder.createTopology(), COMPONENT_HAS_COMPLETED);
COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(20));
Thread.sleep(1000);
RestUtils.refresh(index);
assertTrue(RestUtils.exists(index + "/write-1"));
assertTrue(RestUtils.exists(index + "/write-2"));
String results = RestUtils.get(index + "/write-1" + "/_search?");
assertThat(results, containsString("two"));
results = RestUtils.get(index + "/write-2" + "/_search?");
assertThat(results, containsString("SFO"));
}
示例2: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例3: testSimpleWriteTopology
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
@Test
public void testSimpleWriteTopology() throws Exception {
List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");
String target = index + "/json-simple-write";
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");
MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);
COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));
RestUtils.refresh(index);
assertTrue(RestUtils.exists(target));
String results = RestUtils.get(target + "/_search?");
assertThat(results, containsString("SFO"));
}
示例4: testSimpleRead
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
@Test
public void testSimpleRead() throws Exception {
String target = index + "/basic-read";
RestUtils.touch(index);
RestUtils.postData(target, "{\"message\" : \"Hello World\",\"message_date\" : \"2014-05-25\"}".getBytes());
RestUtils.postData(target, "{\"message\" : \"Goodbye World\",\"message_date\" : \"2014-05-25\"}".getBytes());
RestUtils.refresh(index);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("es-spout", new TestSpout(new EsSpout(target)));
builder.setBolt("test-bolt", new CapturingBolt()).shuffleGrouping("es-spout");
MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);
COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));
assertTrue(RestUtils.exists(target));
String results = RestUtils.get(target + "/_search?");
assertThat(results, containsString("Hello"));
assertThat(results, containsString("Goodbye"));
System.out.println(CapturingBolt.CAPTURED);
assertThat(CapturingBolt.CAPTURED.size(), is(2));
}
示例5: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例6: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
builder.setBolt("aggregator", new AggregatorBolt(), 1)
.fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
.allGrouping("counter", "tick");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例7: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new KafkaSpoutTest(""), 1);
builder.setBolt("bolt1", new Bolt1(), 2).shuffleGrouping("spout");
builder.setBolt("bolt2", new Bolt2(), 2).fieldsGrouping("bolt1",new Fields("word"));
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_WORKERS, 1);
conf.put(Config.TOPOLOGY_DEBUG, true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("flume-kafka-storm-integration", conf, builder.createTopology());
Utils.sleep(1000*60*5);
cluster.shutdown();
}
示例8: run
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
void run(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word_spout", new TestWordSpout(), 2);
builder.setBolt("tuple_double", new Function(new Double(), new Fields("word1", "word2")), 2).shuffleGrouping("word_spout");
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_WORKERS, 4);
conf.put(Config.TOPOLOGY_DEBUG, true);
if(args.length==0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("MockIngest", conf, builder.createTopology());
} else {
try {
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} catch(Exception e) {
e.printStackTrace();
}
}
}
示例9: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("spout", new FileSpout("/tmp/device-data.txt"), 1);
topologyBuilder.setBolt("parser", new ParserBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
FlinkTopology.createTopology(topologyBuilder).execute();
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:10,代码来源:FlinkStormExample.java
示例10: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException,
InvalidTopologyException {
TopologyBuilder builder = new TopologyBuilder();
List<String> zks = new ArrayList<String>();
zks.add("192.168.41.122");
List<String> cFs = new ArrayList<String>();
cFs.add("personal");
cFs.add("company");
// set the spout class
builder.setSpout("spout", new SampleSpout(), 2);
// set the bolt class
builder.setBolt("bolt", new StormRedisBolt("192.168.41.122",2181), 2).shuffleGrouping("spout");
Config conf = new Config();
conf.setDebug(true);
// create an instance of LocalCluster class for
// executing topology in local mode.
LocalCluster cluster = new LocalCluster();
// LearningStormTopolgy is the name of submitted topology.
cluster.submitTopology("StormRedisTopology", conf,
builder.createTopology());
try {
Thread.sleep(10000);
} catch (Exception exception) {
System.out.println("Thread interrupted exception : " + exception);
}
// kill the LearningStormTopology
cluster.killTopology("StormRedisTopology");
// shutdown the storm test cluster
cluster.shutdown();
}
示例11: configureKafkaSpout
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public int configureKafkaSpout(TopologyBuilder builder) {
KafkaSpout kafkaSpout = constructKafkaSpout();
//int spoutCount = Integer.valueOf(topologyConfig.getProperty("spout.thread.count"));
//int boltCount = Integer.valueOf(topologyConfig.getProperty("bolt.thread.count"));
int spoutCount = Integer.valueOf(1);
int boltCount = Integer.valueOf(1);
builder.setSpout("kafkaSpout", kafkaSpout, spoutCount);
return boltCount;
}
示例12: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws SQLException {
ArrayList<String> columnNames = new ArrayList<String>();
ArrayList<String> columnTypes = new ArrayList<String>();
// this table must exist in splice
// create table testTable (word varchar(100), number int);
String tableName = "testTable";
String server = "localhost";
// add the column names and the respective types in the two arraylists
columnNames.add("word");
columnNames.add("number");
// add the types
columnTypes.add("varchar (100)");
columnTypes.add("int");
TopologyBuilder builder = new TopologyBuilder();
// set the spout for the topology
builder.setSpout("spout", new SpliceIntegerSpout(), 10);
// dump the stream data into splice
SpliceDumperBolt dumperBolt = new SpliceDumperBolt(server, tableName);
builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
Config conf = new Config();
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("splice-topology", conf, builder.createTopology());
Utils.sleep(10000);
cluster.shutdown();
}
示例13: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) {
LocalCluster cluster = new LocalCluster();
/* begin young-define*/
Config conf = new Config();
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new SpoutLocal(), 1);
builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
/* end young-define */
//建议加上这行,使得每个bolt/spout的并发度都为1
conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);
//提交拓扑
cluster.submitTopology("SequenceTest", conf, builder.createTopology());
//等待1分钟, 1分钟后会停止拓扑和集群, 视调试情况可增大该数值
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
//结束拓扑
cluster.killTopology("SequenceTest");
cluster.shutdown();
}
示例14: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
TopologyBuilder builder = new TopologyBuilder();
LOGGER.info("Starting..");
builder.setSpout("trade", new DeliveryCheckSpout(), 1);
builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
"oddstream");
builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
"evenstream");
Config conf = new Config();
conf.setDebug(false);
conf.setMaxSpoutPending(5);
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
LOGGER.info("Submitting DeliveryTopology");
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
Utils.sleep(100000000);
cluster.killTopology("DeliveryTopology");
cluster.shutdown();
}
}
示例15: main
import backtype.storm.topology.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);
topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
.shuffleGrouping("Spout");
topologyBuilder.setBolt("Store", new PrintBolt(), 1)
.shuffleGrouping("generate");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}