本文整理匯總了Java中backtype.storm.Config.setNumWorkers方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.setNumWorkers方法的具體用法?Java Config.setNumWorkers怎麽用?Java Config.setNumWorkers使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.setNumWorkers方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(5);
if (args.length == 1) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
Thread.sleep(60 * 1000);
cluster.killTopology("wordCounter");
cluster.shutdown();
System.exit(0);
}
else if(args.length == 2) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
} else{
System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
}
}
示例2: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例3: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// configureHBaseBolts(builder, config);
// conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
// System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
// System.setProperty("hadoop.home.dir", "/tmp");
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
}
示例4: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new TestWordSpout(), 10);
builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
}
else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("www_nginx_accesslog_stat", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("www_nginx_accesslog_stat");
cluster.shutdown();
}
}
示例5: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("points", new PointSpout());
builder.setBolt("assign", new Assign())
.shuffleGrouping("points")
.allGrouping("aggregator", "centroids");
builder.setBolt("aggregator", new Aggregator())
.fieldsGrouping("assign", new Fields("centroid_index"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例6: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @throws Exception in case of creation problems
*/
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
createTopology(b);
b.close(args[0], config);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(Naming.PIPELINE_NAME, config, b.createTopology());
}
}
示例7: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Main method
*/
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
if (args.length < 1) {
throw new RuntimeException("Specify topology name");
}
int parallelism = 1;
if (args.length > 1) {
parallelism = Integer.parseInt(args[1]);
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new WordSpout(), parallelism);
builder.setBolt("consumer", new ConsumerBolt(), parallelism)
.fieldsGrouping("word", new Fields("word"));
Config conf = new Config();
conf.setNumWorkers(parallelism);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
示例8: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @param topo the topology instance
* @throws Exception in case of creation problems
*/
public static void main(String[] args, AbstractTopology topo) throws Exception {
Config config = new Config();
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
topo.createTopology(config, b);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testGenPip", config, b.createTopology());
}
}
示例9: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new RandomSentenceSpout(), 5);
builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-count", conf, builder.createTopology());
Thread.sleep(10000);
cluster.shutdown();
}
}
示例10: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
LinearDRPCTopologyBuilder builder = construct();
Config conf = new Config();
if (args == null || args.length == 0) {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
for (String url : urlsToTry) {
System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
}
cluster.shutdown();
drpc.shutdown();
}
else {
conf.setNumWorkers(6);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例11: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @throws Exception in case of creation problems
*/
public static void main(String[] args) throws Exception {
Config config = new Config();
Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
createTopology(b);
b.close(args[0], config);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(PIP_NAME, config, b.createTopology());
}
}
示例12: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String args[]) throws Exception {
LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
builder.addBolt(new ExclaimBolt(), 3);
Config conf = new Config();
if (args == null || args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
for (String word : new String[] { "hello", "goodbye" }) {
System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
}
Thread.sleep(10000);
drpc.shutdown();
cluster.shutdown();
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例13: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* @param args
* http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
*/
public static void main(String[] args) {
try{
//設置噴發節點並分配並發數,該並發數將會控製該對象在集群中的線程數(6個)
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}
示例14: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);
topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
.shuffleGrouping("Spout");
topologyBuilder.setBolt("Store", new PrintBolt(), 1)
.shuffleGrouping("generate");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}
示例15: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);
topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
.shuffleGrouping("spout-number")
.shuffleGrouping("spout-string")
.shuffleGrouping("spout-sign");
topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
.fieldsGrouping("bolt-splitter", new Fields("type"));
topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
.shuffleGrouping("bolt-distributor", "stream-number-saver");
topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
.shuffleGrouping("bolt-distributor", "stream-string-saver");
topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
.shuffleGrouping("bolt-distributor", "stream-sign-saver");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}