本文整理匯總了Java中backtype.storm.Config.put方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.put方法的具體用法?Java Config.put怎麽用?Java Config.put使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.put方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
//configureRouteBolt(builder);
configurePhoenixTest(builder);
/*
builder.setBolt("submitter", new SubmitBolt())
.shuffleGrouping(ROUTE_BOLT);
*/
try {
StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例2: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
configureRouteBolt(builder);
configureInsertBolt(builder);
//builder.setBolt("submitter", new SubmitBolt())
// .shuffleGrouping(ROUTE_BOLT);
try {
StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例3: testSingleEmptyPartition
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void testSingleEmptyPartition() {
@SuppressWarnings("unchecked")
List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>()));
TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
Config conf = new Config();
conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
TestSpoutOutputCollector col = new TestSpoutOutputCollector();
spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
spout.nextTuple();
spout.nextTuple();
spout.nextTuple();
Assert.assertEquals(0, col.output.size());
}
示例4: configureKafkaBolt
import backtype.storm.Config; //導入方法依賴的package包/類
private void configureKafkaBolt(TopologyBuilder builder, Config config) {
String topic = topologyConfig.getProperty("kafka.topic");
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
props.put("metadata.broker.list", brokerUrl);
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("request.required.acks", "1");
config.setMaxSpoutPending(20);
config.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
KafkaBolt<String, String> kafkaBolt = new KafkaBolt<String, String>().withTopicSelector(new DefaultTopicSelector(topic))
.withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>("key", "log"));
builder.setBolt("KafkaBolt", kafkaBolt, 3).shuffleGrouping(SPOUT_ID).setDebug(DEBUG);
}
示例5: testAllPartitionsEmpty
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void testAllPartitionsEmpty() {
@SuppressWarnings("unchecked")
List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>(),
new LinkedList<String>(), new LinkedList<String>()));
TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
Config conf = new Config();
conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(3));
TestSpoutOutputCollector col = new TestSpoutOutputCollector();
spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
spout.nextTuple();
spout.nextTuple();
spout.nextTuple();
spout.nextTuple();
spout.nextTuple();
Assert.assertEquals(0, col.output.size());
}
示例6: testSingleEmptyPartition
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void testSingleEmptyPartition() {
TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
Config conf = new Config();
conf.put(TestOrderedFileInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
TestSpoutOutputCollector col = new TestSpoutOutputCollector();
spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
spout.nextTuple();
spout.nextTuple();
spout.nextTuple();
Assert.assertEquals(0, col.output.size());
}
示例7: macVendorTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void macVendorTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("macVendor");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例8: nonTimestampTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void nonTimestampTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(true, stormFlow.contains("timestamp"));
}
}
示例9: analizeHttpUrlTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("httpUrlMap");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例10: setTopologyStormConfig
import backtype.storm.Config; //導入方法依賴的package包/類
private static Config setTopologyStormConfig(JSONObject topologyProperties) throws ConfigurationException {
Config conf = new Config();
Iterator<?> keys = topologyProperties.keys();
while(keys.hasNext()){
String stormProperty = (String) keys.next();
conf.put(stormProperty, topologyProperties.get(stormProperty));
}
return conf;
}
示例11: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
LocalCluster cluster = new LocalCluster();
/* begin young-define*/
Config conf = new Config();
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new SpoutLocal(), 1);
builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
/* end young-define */
//建議加上這行,使得每個bolt/spout的並發度都為1
conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);
//提交拓撲
cluster.submitTopology("SequenceTest", conf, builder.createTopology());
//等待1分鍾, 1分鍾後會停止拓撲和集群, 視調試情況可增大該數值
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
//結束拓撲
cluster.killTopology("SequenceTest");
cluster.shutdown();
}
示例12: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new RandomSpout());
builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");
Config conf = new Config();
conf.setDebug(false);
/* Config裏封裝了Redis的配置 */
conf.put("ip","127.0.0.1");
conf.put("port","6379");
conf.put("password","password");
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10*1000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例13: copyPropertiesIntoCfg
import backtype.storm.Config; //導入方法依賴的package包/類
private static void copyPropertiesIntoCfg(Config cfg) {
Properties props = TestSettings.TESTING_PROPS;
for (String property : props.stringPropertyNames()) {
cfg.put(property, props.get(property));
}
}
示例14: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.put("redisServerIP", args[0]);
conf.put("redisServerPort", args[1]);
conf.put("phraseCount", "4");
StormSubmitter.submitTopology("trident-eg7", conf,
buildTopology());
}
示例15: getProducerConfig
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Returns the storm config for the topology that publishes sentences to kafka "test" topic using a kafka bolt.
* The KAFKA_BROKER_PROPERTIES is needed for the KafkaBolt.
*
* @return the topology config
*/
public Config getProducerConfig() {
Config conf = new Config();
conf.setMaxSpoutPending(20);
Properties props = new Properties();
props.put("metadata.broker.list", brokerUrl);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
conf.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props);
return conf;
}