本文整理匯總了Java中backtype.storm.Config.setMessageTimeoutSecs方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.setMessageTimeoutSecs方法的具體用法?Java Config.setMessageTimeoutSecs怎麽用?Java Config.setMessageTimeoutSecs使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.setMessageTimeoutSecs方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @param topo the topology instance
* @throws Exception in case of creation problems
*/
public static void main(String[] args, AbstractTopology topo) throws Exception {
Config config = new Config();
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
topo.createTopology(config, b);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testGenPip", config, b.createTopology());
}
}
示例2: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @throws Exception in case of creation problems
*/
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
createTopology(b);
b.close(args[0], config);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(Naming.PIPELINE_NAME, config, b.createTopology());
}
}
示例3: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @throws Exception in case of creation problems
*/
public static void main(String[] args) throws Exception {
Config config = new Config();
Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
createTopology(b);
b.close(args[0], config);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(PIP_NAME, config, b.createTopology());
}
}
示例4: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
/// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
}
示例5: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// configureHBaseBolts(builder, config);
// conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
// System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
// System.setProperty("hadoop.home.dir", "/tmp");
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
}
示例6: createMainTopology
import backtype.storm.Config; //導入方法依賴的package包/類
@Override
public TopologyOutput createMainTopology() {
Config config = new Config();
config.setMessageTimeoutSecs(100);
Config.registerSerialization(config, DataItem.class, DataItemSerializer.class);
RecordingTopologyBuilder builder = new RecordingTopologyBuilder(options);
builder.setSpout("IntermediarySpout", new TestIntermediarySpout("IntermediarySpout", TOPOLOGY_NAME,
"IntermediarySpoutStreamId"), 1).setNumTasks(1);
BoltDeclarer endBolt = builder.setBolt("EndBolt", new TestEndBolt("EndBolt", TOPOLOGY_NAME
, "EndBoltStreamId"), 1);
endBolt.shuffleGrouping("IntermediarySpout", "IntermediarySpoutStreamId");
return new TopologyOutput(config, builder, 1);
}
示例7: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("beginspout", new BeginSpout(), PlatformParas.begin_spout_num).setMaxSpoutPending(200);//1,500
topologyBuilder.setSpout("loopspout", new LoopSpout(), PlatformParas.loop_spout_num).setMaxSpoutPending(200);
topologyBuilder.setBolt("generateurl", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)//2
.shuffleGrouping("beginspout")
.shuffleGrouping("loopspout");
topologyBuilder.setBolt("generateurl-loop-bolt", new GenerateUrlBolt(), PlatformParas.generateurl_bolt_num)
.shuffleGrouping("parse", "generate-loop");
topologyBuilder.setBolt("proxy", new ProxyBolt(), PlatformParas.proxy_bolt_num)
.shuffleGrouping("generateurl")
.shuffleGrouping("generateurl-loop-bolt");
topologyBuilder.setBolt("fetch", new FetchBolt(), PlatformParas.fetch_bolt_num)
.shuffleGrouping("proxy");
topologyBuilder.setBolt("parse", new ParseBolt(), PlatformParas.parse_bolt_num)
.shuffleGrouping("fetch");
topologyBuilder.setBolt("store", new StoreBolt(), PlatformParas.store_bolt_num)
.shuffleGrouping("parse", "store");
Config config = new Config();
config.setDebug(false);
//default:30s
config.setMessageTimeoutSecs(PlatformParas.message_timeout_secs);
//config.setMaxSpoutPending(2000);
if(args != null && args.length>0){
config.setNumWorkers(PlatformParas.work_num);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}
示例8: pipelineOptionsTest
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Tests the pipeline options.
*/
@Test
public void pipelineOptionsTest() {
PipelineOptions opts = new PipelineOptions();
opts.setNumberOfWorkers(5);
Properties prop = new Properties();
prop.put(CoordinationConfiguration.PIPELINE_START_SOURCE_AUTOCONNECT, "true");
prop.put(CoordinationConfiguration.INIT_MODE, InitializationMode.DYNAMIC.name());
prop.put(Configuration.HOST_EVENT, "local");
prop.put(Configuration.PORT_EVENT, 1234);
prop.put(Configuration.EVENT_DISABLE_LOGGING, "aaa,bbb");
prop.put(Configuration.PIPELINE_INTERCONN_PORTS, "10-20");
CoordinationConfiguration.configure(prop, false);
System.out.println("Configured " + prop);
// during submission
@SuppressWarnings("rawtypes")
Map stormConf = Utils.readStormConfig();
StormPipelineOptionsSetter optSetter = new StormPipelineOptionsSetter(stormConf, opts);
StormUtils.doCommonConfiguration(optSetter);
System.out.println("Conf " + stormConf);
System.out.println("OPTS " + opts);
String[] args = opts.toArgs("pip");
System.out.println("ARGS " + java.util.Arrays.toString(args));
// in topology
PipelineOptions options = new PipelineOptions(args);
Config config = new Config();
config.setMessageTimeoutSecs(100);
config.setDebug(false);
config.put("windowSize", 1 * 30); // Window size (in secs)
config.put("windowAdvance", 1); // Advance of the window (in secs)
config.put("SUBPIPELINE.NAME", "pip"); //sub-pipeline namespace
//The settings to optimize the storm performance.
config.put(Config.TOPOLOGY_RECEIVER_BUFFER_SIZE, 8);
config.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, 32);
config.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 16384);
config.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 16384);
config.put(Configuration.HOST_EVENT, Configuration.getEventHost());
config.put(Configuration.PORT_EVENT, Configuration.getEventPort());
config.put(Configuration.EVENT_DISABLE_LOGGING, Configuration.getEventDisableLogging());
config.put(Configuration.PIPELINE_INTERCONN_PORTS, Configuration.getPipelinePorts());
options.toConf(config);
System.out.println("Pip Config " + config);
Assert.assertEquals("true", config.get(Constants.CONFIG_KEY_SOURCE_AUTOCONNECT));
Assert.assertEquals(InitializationMode.DYNAMIC.name(), config.get(Constants.CONFIG_KEY_INIT_MODE));
Assert.assertEquals("local", config.get(Configuration.HOST_EVENT));
Assert.assertEquals("1234", config.get(Configuration.PORT_EVENT));
Assert.assertEquals("aaa,bbb", config.get(Configuration.EVENT_DISABLE_LOGGING));
Assert.assertEquals("10-20", config.get(Configuration.PIPELINE_INTERCONN_PORTS));
CoordinationConfiguration.clear();
}
示例9: run
import backtype.storm.Config; //導入方法依賴的package包/類
public void run(String... args){
String kafkaTopic = "stock_topic";
SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts("127.0.0.1"),
kafkaTopic, "/kafka_storm", "StormSpout");
spoutConfig.useStartOffsetTimeIfOffsetOutOfRange = true;
spoutConfig.startOffsetTime = System.currentTimeMillis();
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// Hive connection configuration
String metaStoreURI = "thrift://one.hdp:9083";
String dbName = "default";
String tblName = "stock_prices";
// Fields for possible partition
String[] partNames = {"name"};
// Fields for possible column data
String[] colNames = {"day", "open", "high", "low", "close", "volume","adj_close"};
// Record Writer configuration
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper()
.withColumnFields(new Fields(colNames))
.withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions;
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper)
.withTxnsPerBatch(2)
.withBatchSize(100)
.withIdleTimeout(10)
.withCallTimeout(10000000);
//.withKerberosKeytab(path_to_keytab)
//.withKerberosPrincipal(krb_principal);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(KAFKA_SPOUT_ID, kafkaSpout);
builder.setBolt(STOCK_PROCESS_BOLT_ID, new StockDataBolt()).shuffleGrouping(KAFKA_SPOUT_ID);
builder.setBolt(HIVE_BOLT_ID, new HiveBolt(hiveOptions)).shuffleGrouping(STOCK_PROCESS_BOLT_ID);
String topologyName = "StormHiveStreamingTopo";
Config config = new Config();
config.setNumWorkers(1);
config.setMessageTimeoutSecs(60);
try {
StormSubmitter.submitTopology(topologyName, config, builder.createTopology());
} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException ex) {
Logger.getLogger(Topology.class.getName()).log(Level.SEVERE, null, ex);
}
}