本文整理匯總了Java中backtype.storm.Config.setMaxSpoutPending方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.setMaxSpoutPending方法的具體用法?Java Config.setMaxSpoutPending怎麽用?Java Config.setMaxSpoutPending使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.setMaxSpoutPending方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(5);
if (args.length == 1) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
Thread.sleep(60 * 1000);
cluster.killTopology("wordCounter");
cluster.shutdown();
System.exit(0);
}
else if(args.length == 2) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
} else{
System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
}
}
示例2: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");
// LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
config.put(Config.TOPOLOGY_WORKERS, 9);
Config.setNumAckers(config, 0);
StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());
// Thread.sleep(3000);
// cluster.shutdown();
}
示例3: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("global-count-topology", config, builder.buildTopology());
Thread.sleep(3000);
cluster.shutdown();
}
示例4: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
for (int i = 0; i < 100; i++) {
System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
Thread.sleep(1000);
}
}
else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
}
}
示例5: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("top-n-topology", config, builder.buildTopology());
Thread.sleep(3000);
cluster.shutdown();
}
示例6: test_transaction_word
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void test_transaction_word() {
try {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("top-n-topology", config, builder.buildTopology());
JStormUtils.sleepMs(60 * 1000);
cluster.shutdown();
} catch (Exception e) {
Assert.fail("Failed to run simple transaction");
}
}
示例7: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
/// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
}
示例8: configureKafkaBolt
import backtype.storm.Config; //導入方法依賴的package包/類
private void configureKafkaBolt(TopologyBuilder builder, Config config) {
String topic = topologyConfig.getProperty("kafka.topic");
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
props.put("metadata.broker.list", brokerUrl);
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("request.required.acks", "1");
config.setMaxSpoutPending(20);
config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
KafkaBolt<String, String> kafkaBolt = new KafkaBolt<String, String>().withTopicSelector(new DefaultTopicSelector(topic))
.withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>("key", "log"));
builder.setBolt("KafkaBolt", kafkaBolt, 3).shuffleGrouping(SPOUT_ID).setDebug(DEBUG);
}
示例9: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
final Config conf = new Config();
conf.setDebug(false);
conf.setNumWorkers(2);
conf.setMaxSpoutPending(1);
conf.setFallBackOnJavaSerialization(false);
conf.setSkipMissingKryoRegistrations(false);
final LocalCluster cluster = new LocalCluster();
final TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("randomSpout1", new RandomFieldSpout(2, 0, 0, 1)); // (nfields,seed,min,max)
builder.setSpout("randomSpout2", new RandomFieldSpout(2, 10, 0, 1)); // (nfields,seed,min,max)
JoinBolt.connectNewBolt(builder);
final StormTopology topology = builder.createTopology();
cluster.submitTopology("playTopology", conf, topology);
Utils.sleep(10000);
cluster.killTopology("playTopology");
cluster.shutdown();
}
示例10: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
for (int i = 0; i < 100; i++) {
// The query takes as input a whitespace separated list of words
// and return the sum of the counts for those words.
System.out.println("DRPC RESULT: " + drpc.execute("words", "cat$$dog$$the$$man"));
Thread.sleep(1000);
}
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
}
}
示例11: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
TopologyBuilder builder = new TopologyBuilder();
LOGGER.info("Starting..");
builder.setSpout("trade", new DeliveryCheckSpout(), 1);
builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
"oddstream");
builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
"evenstream");
Config conf = new Config();
conf.setDebug(false);
conf.setMaxSpoutPending(5);
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
LOGGER.info("Submitting DeliveryTopology");
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
Utils.sleep(100000000);
cluster.killTopology("DeliveryTopology");
cluster.shutdown();
}
}
示例12: test_transaction_word
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void test_transaction_word(){
try {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA,
new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder(
"top-n-words", "spout", spout, 2);
builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping(
"spout", new Fields("word"));
builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping(
"bucketize", new Fields("bucket"));
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("top-n-topology", config,
builder.buildTopology());
JStormUtils.sleepMs(60 * 1000);
cluster.shutdown();
}catch(Exception e) {
Assert.fail("Failed to run simple transaction");
}
}
示例13: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
if (args.length == 0){
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("global-count-topology", config, builder.buildTopology());
Thread.sleep(100000);
cluster.shutdown();
}else {
config.setNumWorkers(3);
try {
Map yamlConf = LoadConf.LoadYaml(args[0]);
if (yamlConf != null) {
config.putAll(yamlConf);
}
}catch (Exception e) {
System.out.println("Input " + args[0] + " isn't one yaml ");
}
StormSubmitter.submitTopology("global", config, builder.buildTopology());
}
}
示例14: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(4);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology1(drpc));
cluster.submitTopology("fruitCounter", conf, buildTopology2(drpc));
for (int i = 0; i < 100; i++) {
System.out.println("DRPC RESULT 1: " + drpc.execute("words", "cat the dog jumped"));
System.out.println("DRPC RESULT 2: " + drpc.execute("fruits", "2 orange"));
Thread.sleep(1000);
}
}
示例15: getConsumerConfig
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Return the consumer topology config.
*
* @return the topology config
*/
public Config getConsumerConfig() {
Config conf = new Config();
conf.setMaxSpoutPending(20);
// conf.setDebug(true);
return conf;
}