本文整理汇总了Java中org.apache.storm.Config.setMaxSpoutPending方法的典型用法代码示例。如果您正苦于以下问题:Java Config.setMaxSpoutPending方法的具体用法?Java Config.setMaxSpoutPending怎么用?Java Config.setMaxSpoutPending使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.storm.Config
的用法示例。
在下文中一共展示了Config.setMaxSpoutPending方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.storm.Config; //导入方法依赖的package包/类
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
Config conf = new Config();
conf.put(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
conf.put(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
conf.put(Constants.StormConfigKey.ZKCONNECT, this.zkConnect);
conf.setMessageTimeoutSecs(3600);
conf.setMaxSpoutPending(30);
conf.setDebug(true);
conf.setNumWorkers(1);
if (runAsLocal) {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(topologyName, conf, topology);
} else {
StormSubmitter.submitTopology(topologyName, conf, topology);
}
}
示例2: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
LocalDRPC drpc = new LocalDRPC();
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("CountryCount", conf, buildTopology(drpc));
Thread.sleep(2000);
for(int i=0; i<100 ; i++) {
System.out.println("Result - "+drpc.execute("Count", "Japan India Europe"));
Thread.sleep(1000);
}
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, buildTopology(null));
Thread.sleep(2000);
DRPCClient client = new DRPCClient(conf, "RRPC-Server", 1234);
System.out.println(client.execute("Count", "Japan India Europe"));
}
}
示例3: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Specify topology name");
}
TopologyBuilder builder = new TopologyBuilder();
int spouts = 2;
int bolts = 2;
builder.setSpout("word", new AckingTestWordSpout(), spouts);
builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
.shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setNumAckers(1);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// Set the number of workers or stream managers
conf.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
示例4: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word0", new TestWordSpout(), 2);
builder.setSpout("word1", new TestWordSpout(), 2);
builder.setSpout("word2", new TestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2)
.shuffleGrouping("word0")
.shuffleGrouping("word1")
.shuffleGrouping("word2");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxSpoutPending(10);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例5: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("global-count-topology", config, builder.buildTopology());
Thread.sleep(3000);
cluster.shutdown();
}
示例6: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
for (int i = 0; i < 100; i++) {
System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
Thread.sleep(1000);
}
}
else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
}
}
示例7: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
for (int i = 0; i < 100; i++) {
System.out.println("DRPC RESULT: " + drpc.execute("words", "CAT THE DOG JUMPED"));
Thread.sleep(1000);
}
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
}
}
示例8: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
conf.put(Config.TOPOLOGY_TRIDENT_WINDOWING_INMEMORY_CACHE_LIMIT, 100);
// window-state table should already be created with cf:tuples column
HBaseWindowsStoreFactory windowStoreFactory = new HBaseWindowsStoreFactory(new HashMap<String, Object>(), "window-state", "cf".getBytes("UTF-8"), "tuples".getBytes("UTF-8"));
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
String topologyName = "wordCounterWithWindowing";
cluster.submitTopology(topologyName, conf, buildTopology(windowStoreFactory));
Utils.sleep(120 * 1000);
cluster.killTopology(topologyName);
cluster.shutdown();
System.exit(0);
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(windowStoreFactory));
}
}
示例9: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StormTopology topology = buildVehiclesTopology();
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("vehicles-topology", conf, topology);
Utils.sleep(60 * 1000);
cluster.shutdown();
System.exit(0);
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar("vehicles-topology", conf, topology);
}
}
示例10: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StormTopology topology = buildDevicesTopology();
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("devices-topology", conf, topology);
Utils.sleep(60 * 1000);
cluster.shutdown();
System.exit(0);
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar("devices-topology", conf, topology);
}
}
示例11: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("top-n-topology", config, builder.buildTopology());
Thread.sleep(3000);
cluster.shutdown();
}
示例12: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Count", conf, buildTopology());
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, buildTopology());
}
}
示例13: createStormConf
import org.apache.storm.Config; //导入方法依赖的package包/类
public static Config createStormConf()
{
Boolean limitHitsPerSelector = Boolean.parseBoolean(SystemConfiguration.getProperty("pir.limitHitsPerSelector"));
Integer maxHitsPerSelector = Integer.parseInt(SystemConfiguration.getProperty("pir.maxHitsPerSelector"));
Integer rowDivisions = Integer.parseInt(SystemConfiguration.getProperty("storm.rowDivs", "1"));
Config conf = new Config();
conf.setNumAckers(Integer.parseInt(SystemConfiguration.getProperty("storm.numAckers", numWorkers.toString())));
conf.setMaxSpoutPending(Integer.parseInt(SystemConfiguration.getProperty("storm.maxSpoutPending", "300")));
conf.setNumWorkers(numWorkers);
conf.setDebug(false);
// conf.setNumEventLoggers(2);
conf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.executor.receiveBufferSize", 1024));
conf.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.executor.sendBufferSize", 1024));
conf.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.transferBufferSize", 32));
conf.put(Config.WORKER_HEAP_MEMORY_MB, SystemConfiguration.getIntProperty("storm.worker.heapMemory", 750));
conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, Double.parseDouble(SystemConfiguration.getProperty("storm.componentOnheapMem", "128")));
// Pirk parameters to send to bolts
conf.put(StormConstants.ALLOW_ADHOC_QSCHEMAS_KEY, SystemConfiguration.getProperty("pir.allowAdHocQuerySchemas", "false").equals("true"));
conf.put(StormConstants.QSCHEMA_KEY, SystemConfiguration.getProperty("query.schemas"));
conf.put(StormConstants.DSCHEMA_KEY, SystemConfiguration.getProperty("data.schemas"));
conf.put(StormConstants.HDFS_URI_KEY, hdfsUri);
conf.put(StormConstants.QUERY_FILE_KEY, queryFile);
conf.put(StormConstants.USE_HDFS, useHdfs);
conf.put(StormConstants.OUTPUT_FILE_KEY, outputPath);
conf.put(StormConstants.LIMIT_HITS_PER_SEL_KEY, limitHitsPerSelector);
conf.put(StormConstants.MAX_HITS_PER_SEL_KEY, maxHitsPerSelector);
conf.put(StormConstants.SPLIT_PARTITIONS_KEY, splitPartitions);
conf.put(StormConstants.SALT_COLUMNS_KEY, saltColumns);
conf.put(StormConstants.ROW_DIVISIONS_KEY, rowDivisions);
conf.put(StormConstants.ENCROWCALCBOLT_PARALLELISM_KEY, encrowcalcboltParallelism);
conf.put(StormConstants.ENCCOLMULTBOLT_PARALLELISM_KEY, enccolmultboltParallelism);
return conf;
}
示例14: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
int parallelism = 2;
int spouts = parallelism;
builder.setSpout("word", new TestWordSpout(Duration.ofMillis(50)), spouts);
int bolts = 2 * parallelism;
builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
.shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxSpoutPending(10);
conf.setMessageTimeoutSecs(600);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
if (args != null && args.length > 0) {
conf.setNumWorkers(parallelism);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
System.out.println("Topology name not provided as an argument, running in simulator mode.");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例15: getConsumerConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
/**
* Return the consumer topology config.
*
* @return the topology config
*/
public Config getConsumerConfig() {
Config conf = new Config();
conf.setMaxSpoutPending(20);
// conf.setDebug(true);
return conf;
}