本文整理汇总了Java中org.apache.storm.Config.put方法的典型用法代码示例。如果您正苦于以下问题:Java Config.put方法的具体用法?Java Config.put怎么用?Java Config.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.storm.Config
的用法示例。
在下文中一共展示了Config.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupOnce
import org.apache.storm.Config; //导入方法依赖的package包/类
@BeforeClass
public static void setupOnce() throws Exception {
System.out.println("------> Creating Sheep \uD83D\uDC11\n");
clusterParam = new MkClusterParam();
clusterParam.setSupervisors(1);
Config daemonConfig = new Config();
daemonConfig.put(Config.STORM_LOCAL_MODE_ZMQ, false);
clusterParam.setDaemonConf(daemonConfig);
makeConfigFile();
Config conf = new Config();
conf.setNumWorkers(1);
completeTopologyParam = new CompleteTopologyParam();
completeTopologyParam.setStormConf(conf);
}
示例2: start
import org.apache.storm.Config; //导入方法依赖的package包/类
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
Config conf = new Config();
conf.put(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
conf.put(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
conf.put(Constants.StormConfigKey.ZKCONNECT, this.zkConnect);
conf.setMessageTimeoutSecs(3600);
conf.setMaxSpoutPending(30);
conf.setDebug(true);
conf.setNumWorkers(1);
if (runAsLocal) {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(topologyName, conf, topology);
} else {
StormSubmitter.submitTopology(topologyName, conf, topology);
}
}
示例3: submitTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
private LocalCluster submitTopology() throws AlreadyAliveException,
InvalidTopologyException {
Config stormConfig = new Config();
stormConfig.setDebug(false);
stormConfig.setFallBackOnJavaSerialization(true);
stormConfig.setNumWorkers(1);
// enable backpressure since the spouts can move at a much faster speed than the bolts
stormConfig.put(Config.TOPOLOGY_BACKPRESSURE_ENABLE, true);
stormConfig.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 2048);
stormConfig.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 2048);
stormConfig.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS, 1000);
stormConfig.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10);
stormConfig.setSkipMissingKryoRegistrations(true);
stormConfig.put(ConfigurationConstants.CHATALYTICS_CONFIG.txt,
YamlUtils.writeYaml(chatalyticsConfig));
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TOPOLOGY_NAME, stormConfig, chatTopology);
return cluster;
}
示例4: testMetricsAdditionNotReplacement
import org.apache.storm.Config; //导入方法依赖的package包/类
@Test
public void testMetricsAdditionNotReplacement() {
Config config = new Config();
Map<String, String> metrics = new HashMap<>();
metrics.put("foo", "foo.bar.baz");
config.put(Config.TOPOLOGY_WORKER_METRICS, metrics);
SigarLoggingMetricsConsumer.register(config, null);
Assert.assertNotNull(config.get(Config.TOPOLOGY_WORKER_METRICS));
Map<String, String> actual = (Map<String, String>) config.get(Config.TOPOLOGY_WORKER_METRICS);
Assert.assertTrue(actual.keySet().containsAll(SigarLoggingMetricsConsumer.METRICS.keySet()));
Assert.assertTrue(actual.values().containsAll(SigarLoggingMetricsConsumer.METRICS.values()));
Assert.assertEquals(actual.get("foo"), "foo.bar.baz");
}
示例5: getConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
protected Config getConfig(String[] SETTINGS) {
Config config = new Config();
// config.put("mongodb.ip", args[2]);
// config.put("mongodb.port", Integer.parseInt(args[3]));
// config.put("redis.ip", args[4]);
// config.put("redis.port", Integer.parseInt(args[5]));
// config.put("kafka.properties", args[6]);
config.put("mongodb.ip", SETTINGS[0]);
config.put("mongodb.port", Integer.parseInt(SETTINGS[1]));
config.put("redis.ip", SETTINGS[2]);
config.put("redis.port", Integer.parseInt(SETTINGS[3]));
config.put("kafka.properties", SETTINGS[4]);
config.setDebug(true);
config.setNumWorkers(2);
return config;
}
示例6: buildAndSubmit
import org.apache.storm.Config; //导入方法依赖的package包/类
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
//configureRouteBolt(builder);
configurePhoenixTest(builder);
/*
builder.setBolt("submitter", new SubmitBolt())
.shuffleGrouping(ROUTE_BOLT);
*/
try {
StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例7: runTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
private void runTopology(File responderFile) throws Exception
{
MkClusterParam mkClusterParam = new MkClusterParam();
// The test sometimes fails because of timing issues when more than 1 supervisor set.
mkClusterParam.setSupervisors(1);
// Maybe using "withSimulatedTimeLocalCluster" would be better to avoid worrying about timing.
Config conf = PirkTopology.createStormConf();
conf.put(StormConstants.OUTPUT_FILE_KEY, responderFile.getAbsolutePath());
conf.put(StormConstants.N_SQUARED_KEY, nSquared.toString());
conf.put(StormConstants.QUERY_INFO_KEY, queryInfo.toMap());
// conf.setDebug(true);
mkClusterParam.setDaemonConf(conf);
TestJob testJob = createPirkTestJob(conf);
Testing.withLocalCluster(mkClusterParam, testJob);
// Testing.withSimulatedTimeLocalCluster(mkClusterParam, testJob);
}
示例8: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!ExclamationTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = ExclamationTopology.buildTopology();
// execute program locally
Config conf = new Config();
conf.put(ExclamationBolt.EXCLAMATION_COUNT, ExclamationTopology.getExclamation());
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology(TOPOLOGY_ID, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例9: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(final String[] args) throws AlreadyAliveException, InvalidTopologyException,
NotAliveException {
if (!WordCountTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = WordCountTopology.buildTopology();
// execute program on Flink cluster
final Config conf = new Config();
// can be changed to remote address
conf.put(Config.NIMBUS_HOST, "localhost");
// use default flink jobmanger.rpc.port
conf.put(Config.NIMBUS_THRIFT_PORT, 6123);
final FlinkClient cluster = FlinkClient.getConfiguredClient(conf);
cluster.submitTopology(topologyId, uploadedJarLocation, FlinkTopology.createTopology(builder));
Utils.sleep(5 * 1000);
cluster.killTopology(topologyId);
}
示例10: testProgram
import org.apache.storm.Config; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
final String[] tokens = this.resultPath.split(":");
final String outputFile = tokens[tokens.length - 1];
final TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(spoutId, new FiniteRandomSpout(0, 10, 2));
builder.setBolt(boltId, new TaskIdBolt(), 2).fieldsGrouping(
spoutId, FiniteRandomSpout.STREAM_PREFIX + 0, new Fields("number"));
builder.setBolt(sinkId, new BoltFileSink(outputFile)).shuffleGrouping(boltId);
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例11: testProgram
import org.apache.storm.Config; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
// get input data
builder.setSpout(spoutId1, new FiniteRandomSpout(0, 10));
builder.setSpout(spoutId2, new FiniteRandomSpout(1, 8));
builder.setSpout(spoutId3, new FiniteRandomSpout(2, 13));
builder.setBolt(boltId, new MergerBolt())
.shuffleGrouping(spoutId1, FiniteRandomSpout.STREAM_PREFIX + 0)
.shuffleGrouping(spoutId2, FiniteRandomSpout.STREAM_PREFIX + 0)
.shuffleGrouping(spoutId3, FiniteRandomSpout.STREAM_PREFIX + 0);
final String[] tokens = this.resultPath.split(":");
final String outputFile = tokens[tokens.length - 1];
builder.setBolt(sinkId, new BoltFileSink(outputFile)).shuffleGrouping(boltId);
// execute program locally
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例12: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Specify topology name");
}
TopologyBuilder builder = new TopologyBuilder();
int spouts = 2;
int bolts = 2;
builder.setSpout("word", new AckingTestWordSpout(), spouts);
builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
.shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setNumAckers(1);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// Set the number of workers or stream managers
conf.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
示例13: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word0", new TestWordSpout(), 2);
builder.setSpout("word1", new TestWordSpout(), 2);
builder.setSpout("word2", new TestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2)
.shuffleGrouping("word0")
.shuffleGrouping("word1")
.shuffleGrouping("word2");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxSpoutPending(10);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例14: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
conf.put(Config.TOPOLOGY_TRIDENT_WINDOWING_INMEMORY_CACHE_LIMIT, 100);
// window-state table should already be created with cf:tuples column
HBaseWindowsStoreFactory windowStoreFactory = new HBaseWindowsStoreFactory(new HashMap<String, Object>(), "window-state", "cf".getBytes("UTF-8"), "tuples".getBytes("UTF-8"));
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
String topologyName = "wordCounterWithWindowing";
cluster.submitTopology(topologyName, conf, buildTopology(windowStoreFactory));
Utils.sleep(120 * 1000);
cluster.killTopology(topologyName);
cluster.shutdown();
System.exit(0);
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(windowStoreFactory));
}
}
示例15: buildTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
public void buildTopology (String[] args){
//TODO
if (parseCommandArgs(args) != 0) {
return;
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("CanalClientSpout", new CanalClientSpout(), 1);
builder.setBolt("KafkaProducerBolt", new KafkaProducerBolt(), 1).shuffleGrouping("CanalClientSpout");
Config conf = new Config();
conf.put(Constants.ZOOKEEPER_SERVERS, zkServers);
conf.put(Constants.EXTRACTOR_TOPOLOGY_ID, extractorTopologyId);
logger.info(Constants.ZOOKEEPER_SERVERS + "=" + zkServers);
logger.info(Constants.EXTRACTOR_TOPOLOGY_ID + "=" + extractorTopologyId);
conf.setNumWorkers(1);
conf.setMessageTimeoutSecs(60);
if (!runAsLocal) {
conf.setDebug(false);
try {
//StormSubmitter.submitTopology("extractorTopologyId", conf, builder.createTopology());
StormSubmitter.submitTopology(extractorTopologyId, conf, builder.createTopology());
} catch (Exception e) {
e.printStackTrace();
}
} else {
conf.setDebug(false);
LocalCluster cluster = new LocalCluster();
//cluster.submitTopology("extractorTopologyId", conf, builder.createTopology());
cluster.submitTopology(extractorTopologyId, conf, builder.createTopology());
}
}