本文整理汇总了Java中org.apache.storm.StormSubmitter.submitTopology方法的典型用法代码示例。如果您正苦于以下问题:Java StormSubmitter.submitTopology方法的具体用法?Java StormSubmitter.submitTopology怎么用?Java StormSubmitter.submitTopology使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.storm.StormSubmitter
的用法示例。
在下文中一共展示了StormSubmitter.submitTopology方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
Config conf = new Config();
conf.put(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
conf.put(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
conf.put(Constants.StormConfigKey.ZKCONNECT, this.zkConnect);
conf.setMessageTimeoutSecs(3600);
conf.setMaxSpoutPending(30);
conf.setDebug(true);
conf.setNumWorkers(1);
if (runAsLocal) {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(topologyName, conf, topology);
} else {
StormSubmitter.submitTopology(topologyName, conf, topology);
}
}
示例2: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
LocalDRPC drpc = new LocalDRPC();
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("CountryCount", conf, buildTopology(drpc));
Thread.sleep(2000);
for(int i=0; i<100 ; i++) {
System.out.println("Result - "+drpc.execute("Count", "Japan India Europe"));
Thread.sleep(1000);
}
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, buildTopology(null));
Thread.sleep(2000);
DRPCClient client = new DRPCClient(conf, "RRPC-Server", 1234);
System.out.println(client.execute("Count", "Japan India Europe"));
}
}
示例3: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException,
InvalidTopologyException {
// create an instance of TopologyBuilder class
TopologyBuilder builder = new TopologyBuilder();
// set the spout class
builder.setSpout("SampleSpout", new SampleSpout(), 2);
// set the bolt class
builder.setBolt("SampleBolt", new SampleBolt(), 4).shuffleGrouping(
"SampleSpout");
Config conf = new Config();
conf.setNumWorkers(3);
// This statement submit the topology on remote
// args[0] = name of topology
try {
StormSubmitter.submitTopology(args[0], conf,
builder.createTopology());
} catch (AlreadyAliveException alreadyAliveException) {
System.out.println(alreadyAliveException);
} catch (InvalidTopologyException invalidTopologyException) {
System.out.println(invalidTopologyException);
} catch (AuthorizationException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
示例4: run
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static int run(String[] args) throws Exception {
CommandLineParser parser = new BasicParser();
Options options = getCommonRequiredOptions();
CommandLine cmd = parser.parse( options, args);
Map<String, String> dsConf = getAccumuloDataStoreConf(cmd);
String featureName = cmd.getOptionValue(FEATURE_NAME);
SimpleFeatureType featureType = DataUtilities.createType(featureName, "geom:Point:srid=4326");
DataStore ds = DataStoreFinder.getDataStore(dsConf);
ds.createSchema(featureType);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String topic = cmd.getOptionValue(TOPIC);
String groupId = topic;
dsConf.put(OSMIngest.FEATURE_NAME, featureName);
OSMKafkaSpout OSMKafkaSpout = new OSMKafkaSpout(dsConf, groupId, topic);
topologyBuilder.setSpout("Spout", OSMKafkaSpout, 10).setNumTasks(10);
OSMKafkaBolt OSMKafkaBolt = new OSMKafkaBolt(dsConf, groupId, topic);
topologyBuilder.setBolt("Bolt", OSMKafkaBolt, 20).shuffleGrouping("Spout");
Config stormConf = new Config();
stormConf.setNumWorkers(10);
stormConf.setDebug(true);
StormSubmitter.submitTopology(topic, stormConf, topologyBuilder.createTopology());
return 0;
}
示例5: buildAndSubmit
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
//configureRouteBolt(builder);
configurePhoenixTest(builder);
/*
builder.setBolt("submitter", new SubmitBolt())
.shuffleGrouping(ROUTE_BOLT);
*/
try {
StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例6: buildAndSubmit
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
configureRouteBolt(builder);
configureInsertBolt(builder);
//builder.setBolt("submitter", new SubmitBolt())
// .shuffleGrouping(ROUTE_BOLT);
try {
StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例7: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
String name = "fast-word-count-topology";
if (args != null && args.length > 0) {
name = args[0];
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new FastRandomSentenceSpout(), 1);
builder.setBolt("split", new SplitSentence(), 2).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), 2).fieldsGrouping("split", new Fields("word"));
Config conf = new Config();
StormSubmitter.submitTopology(name, conf, builder.createTopology());
}
示例8: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("integer", new RandomIntegerSpout(), 1);
builder.setBolt("slidingsum", new SlidingWindowSumBolt()
.withWindow(BaseWindowedBolt.Count.of(30), BaseWindowedBolt.Count.of(10)), 1)
.shuffleGrouping("integer");
builder.setBolt("tumblingavg", new TumblingWindowAvgBolt()
.withTumblingWindow(BaseWindowedBolt.Count.of(3)), 1)
.shuffleGrouping("slidingsum");
builder.setBolt("printer", new PrinterBolt(), 1)
.shuffleGrouping("tumblingavg");
Config conf = new Config();
conf.setDebug(true);
String topoName = "test";
if (args != null && args.length > 0) {
topoName = args[0];
}
StormSubmitter.submitTopology(topoName, conf, builder.createTopology());
}
示例9: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Specify topology name");
}
TopologyBuilder builder = new TopologyBuilder();
int spouts = 2;
int bolts = 2;
builder.setSpout("word", new AckingTestWordSpout(), spouts);
builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
.shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setNumAckers(1);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// Set the number of workers or stream managers
conf.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
示例10: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word0", new TestWordSpout(), 2);
builder.setSpout("word1", new TestWordSpout(), 2);
builder.setSpout("word2", new TestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2)
.shuffleGrouping("word0")
.shuffleGrouping("word1")
.shuffleGrouping("word2");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxSpoutPending(10);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例11: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
DRPCSpout drpcSpout = new DRPCSpout("simpledrpc");
builder.setSpout("drpc-input", drpcSpout,1);
builder.setBolt("simple", new SimpleDRPC(), 2)
.noneGrouping("drpc-input");
builder.setBolt("return", new ReturnResults(),1)
.noneGrouping("simple");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxTaskParallelism(1);
try
{
StormSubmitter.submitTopology("drpc-q", conf,builder.createTopology());
}
catch (Exception e)
{
e.printStackTrace();
}
}
示例12: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
DRPCSpout drpcSpout = new DRPCSpout("simplepydrpc");
builder.setSpout("drpc-input", drpcSpout,1);
builder.setBolt("simple", new SimpleDRPC(), 2)
.noneGrouping("drpc-input");
builder.setBolt("return", new ReturnResults(),1)
.noneGrouping("simple");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxTaskParallelism(1);
try
{
StormSubmitter.submitTopology("drpc-py", conf,builder.createTopology());
}
catch (Exception e)
{
e.printStackTrace();
}
}
示例13: submit
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
/** Submits the topology under a specific name **/
protected int submit(String name, Config conf, TopologyBuilder builder) {
// register Metadata for serialization with FieldsSerializer
Config.registerSerialization(conf, Metadata.class);
if (isLocal) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(name, conf, builder.createTopology());
if (ttl != -1) {
Utils.sleep(ttl * 1000);
cluster.shutdown();
}
}
else {
try {
StormSubmitter.submitTopology(name, conf,
builder.createTopology());
} catch (Exception e) {
e.printStackTrace();
return -1;
}
}
return 0;
}
示例14: SetRemoteTopology
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void SetRemoteTopology()
throws Exception {
String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
if (streamName == null) {
String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
streamName = className[className.length - 1];
}
TopologyBuilder builder = new TopologyBuilder();
int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
// localFirstGrouping is only for jstorm
// boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
boltDeclarer.shuffleGrouping("spout");
// .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
}
示例15: main
import org.apache.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
String mode = "";
if (args.length > 0) {
mode = args[0];
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("sentence-spout", new FixedSentenceSpout());
// SentenceSpout --> SplitSentenceBolt
builder.setBolt("split-bolt", new SplitSentenceBolt()).shuffleGrouping("sentence-spout");
// SplitSentenceBolt --> WordCountBolt
builder.setBolt("count-bolt", new WordCountBolt()).fieldsGrouping("split-bolt", new Fields("word"));
// WordCountBolt --> DisplayBolt
builder.setBolt("display-bolt", new DisplayBolt()).globalGrouping("count-bolt");
Config config = new Config();
if (mode.equals("cluster")) {
System.out.println("submitting on cluster mode");
StormSubmitter.submitTopology("word-count-topology", config, builder.createTopology());
} else {
System.out.println("submitting on local mode");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-count-topology", config, builder.createTopology());
Thread.sleep(20000);
cluster.killTopology("word-count-topology");
cluster.shutdown();
}
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:33,代码来源:BasicStormWordCountExample.java