本文整理汇总了Java中org.apache.storm.Config.setMaxTaskParallelism方法的典型用法代码示例。如果您正苦于以下问题:Java Config.setMaxTaskParallelism方法的具体用法?Java Config.setMaxTaskParallelism怎么用?Java Config.setMaxTaskParallelism使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.storm.Config
的用法示例。
在下文中一共展示了Config.setMaxTaskParallelism方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.storm.Config; //导入方法依赖的package包/类
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
Config conf = new Config();
conf.put(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
conf.put(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
conf.put(Constants.StormConfigKey.ZKCONNECT, this.zkConnect);
conf.setMessageTimeoutSecs(3600);
conf.setMaxSpoutPending(30);
conf.setDebug(true);
conf.setNumWorkers(1);
if (runAsLocal) {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(topologyName, conf, topology);
} else {
StormSubmitter.submitTopology(topologyName, conf, topology);
}
}
示例2: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
DRPCSpout drpcSpout = new DRPCSpout("simpledrpc");
builder.setSpout("drpc-input", drpcSpout,1);
builder.setBolt("simple", new SimpleDRPC(), 2)
.noneGrouping("drpc-input");
builder.setBolt("return", new ReturnResults(),1)
.noneGrouping("simple");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxTaskParallelism(1);
try
{
StormSubmitter.submitTopology("drpc-q", conf,builder.createTopology());
}
catch (Exception e)
{
e.printStackTrace();
}
}
示例3: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
LinearDRPCTopologyBuilder builder = construct();
Config conf = new Config();
if (args == null || args.length == 0) {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
for (String url : urlsToTry) {
System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
}
cluster.shutdown();
drpc.shutdown();
}
else {
conf.setNumWorkers(6);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例4: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
DRPCSpout drpcSpout = new DRPCSpout("simplepydrpc");
builder.setSpout("drpc-input", drpcSpout,1);
builder.setBolt("simple", new SimpleDRPC(), 2)
.noneGrouping("drpc-input");
builder.setBolt("return", new ReturnResults(),1)
.noneGrouping("simple");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxTaskParallelism(1);
try
{
StormSubmitter.submitTopology("drpc-py", conf,builder.createTopology());
}
catch (Exception e)
{
e.printStackTrace();
}
}
示例5: makeStormConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
protected Config makeStormConfig() {
Config stormConfig = new Config();
stormConfig.setNumWorkers(config.getWorkers());
if (config.getLocal()) {
stormConfig.setMaxTaskParallelism(config.getParallelism());
}
return stormConfig;
}
示例6: stormConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
protected static Config stormConfig() {
Config config = new Config();
config.setDebug(false);
config.setMaxTaskParallelism(1);
config.setNumWorkers(1);
return config;
}
示例7: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
String zkConnString = "localhost:2181";
String topic = "words";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig kafkaSpoutConfig = new SpoutConfig(hosts, topic, "/" + topic,
"wordcountID");
kafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
kafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("kafkaspout", new KafkaSpout(kafkaSpoutConfig));
topologyBuilder.setBolt("stringsplit", new StringToWordsSpliterBolt()).shuffleGrouping("kafkaspout");
topologyBuilder.setBolt("counter", new WordCountCalculatorBolt()).shuffleGrouping("stringsplit");
Config config = new Config();
config.setDebug(true);
if (args != null && args.length > 1) {
config.setNumWorkers(3);
StormSubmitter.submitTopology(args[1], config, topologyBuilder.createTopology());
} else {
// Cap the maximum number of executors that can be spawned
// for a component to 3
config.setMaxTaskParallelism(3);
// LocalCluster is used to run locally
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("KafkaLocal", config, topologyBuilder.createTopology());
// sleep
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
cluster.killTopology("KafkaToplogy");
cluster.shutdown();
}
cluster.shutdown();
}
}
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:41,代码来源:KafkaStormWordCountTopology.java
示例8: localSubmit
import org.apache.storm.Config; //导入方法依赖的package包/类
/**
* 用于debug
* @param name
* @param builder
* @throws InterruptedException
*/
private static void localSubmit(String name,TopologyBuilder builder, Config conf)
throws InterruptedException {
conf.setDebug(true);
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(name, conf, builder.createTopology());
Thread.sleep(100000);
cluster.shutdown();
}
示例9: submitTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
public static Config submitTopology(ILocalCluster stormCluster, String topologyName,
StormTopology stormTopology) throws Exception {
Config stormConf = new Config();
stormConf.putAll(Utils.readDefaultConfig());
stormConf.put("storm.cluster.mode", "local");
stormConf.setDebug(true);
stormConf.setMaxTaskParallelism(3);
stormConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN,
org.apache.atlas.storm.hook.StormAtlasHook.class.getName());
stormCluster.submitTopology(topologyName, stormConf, stormTopology);
Thread.sleep(10000);
return stormConf;
}
示例10: runLocalTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
/**
* Run the cluster locally (inside eclipse on your desktop for example)
* @param builder
* @param conf
* @throws InterruptedException
*/
private static void runLocalTopology(TopologyBuilder builder, Config conf) throws InterruptedException {
conf.setMaxTaskParallelism(3);
// turning debug on prints out when tuples are emitted
//conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
LOG.info("Submitting local topology, will be shutdown in 60s");
cluster.submitTopology("CountingTopology", conf, builder.createTopology());
Thread.sleep(60000);
cluster.shutdown();
}
示例11: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
Intialize(args[0]);
logger.info("Successfully loaded Configuration ");
BrokerHosts hosts = new ZkHosts(zkhost);
SpoutConfig spoutConfig = new SpoutConfig(hosts, inputTopic, "/" + KafkaBroker, consumerGroup);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
String[] partNames = {"status_code"};
String[] colNames = {"date", "request_url", "protocol_type", "status_code"};
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames))
.withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions;
//make sure you change batch size and all paramtere according to requirement
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(250).withBatchSize(2)
.withIdleTimeout(10).withCallTimeout(10000000);
logger.info("Creating Storm Topology");
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("KafkaSpout", kafkaSpout, 1);
builder.setBolt("frauddetect", new FraudDetectorBolt()).shuffleGrouping("KafkaSpout");
builder.setBolt("KafkaOutputBolt",
new IPFraudKafkaBolt(zkhost, "kafka.serializer.StringEncoder", KafkaBroker, outputTopic), 1)
.shuffleGrouping("frauddetect");
builder.setBolt("HiveOutputBolt", new IPFraudHiveBolt(), 1).shuffleGrouping("frauddetect");
builder.setBolt("HiveBolt", new HiveBolt(hiveOptions)).shuffleGrouping("HiveOutputBolt");
Config conf = new Config();
if (args != null && args.length > 1) {
conf.setNumWorkers(3);
logger.info("Submiting topology to storm cluster");
StormSubmitter.submitTopology(args[1], conf, builder.createTopology());
} else {
// Cap the maximum number of executors that can be spawned
// for a component to 3
conf.setMaxTaskParallelism(3);
// LocalCluster is used to run locally
LocalCluster cluster = new LocalCluster();
logger.info("Submitting topology to local cluster");
cluster.submitTopology("KafkaLocal", conf, builder.createTopology());
// sleep
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
logger.error("Exception ocuured" + e);
cluster.killTopology("KafkaToplogy");
logger.info("Shutting down cluster");
cluster.shutdown();
}
cluster.shutdown();
}
}
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:65,代码来源:IPFraudDetectionTopology.java
示例12: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
//Used to build the topology
TopologyBuilder builder = new TopologyBuilder();
//Add the spout, with a name of 'spout'
//and parallelism hint of 5 executors
builder.setSpout("spout", new RandomSentenceSpout(), 5);
//Add the SplitSentence bolt, with a name of 'split'
//and parallelism hint of 8 executors
//shufflegrouping subscribes to the spout, and equally distributes
//tuples (sentences) across instances of the SplitSentence bolt
builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
//Add the counter, with a name of 'count'
//and parallelism hint of 12 executors
//fieldsgrouping subscribes to the split bolt, and
//ensures that the same word is sent to the same instance (group by field 'word')
builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
//new configuration
Config conf = new Config();
//Set to false to disable debug information when
// running in production on a cluster
conf.setDebug(false);
//If there are arguments, we are running on a cluster
if (args != null && args.length > 0) {
//parallelism hint to set the number of workers
conf.setNumWorkers(3);
//submit the topology
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
//Otherwise, we are running locally
else {
//Cap the maximum number of executors that can be spawned
//for a component to 3
conf.setMaxTaskParallelism(3);
//LocalCluster is used to run locally
LocalCluster cluster = new LocalCluster();
//submit the topology
cluster.submitTopology("word-count", conf, builder.createTopology());
//sleep
Thread.sleep(10000);
//shut down the cluster
cluster.shutdown();
}
}
示例13: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException, InterruptedException, AuthorizationException {
com.typesafe.config.Config config = ConfigPropertieUtil.getInstance();
//zk地址
String zks = config.getString("kafka.zk.hosts");
//topic名称
String topic = config.getString("kafka.app.topic");
//kafka 对应的zk的目录地址
String zkRoot = config.getString("kafka.zk.broker.path");
//zk的ip
String zkHosts = config.getString("kafka.zk.servers");
//zk port
Integer zkPort = config.getInt("kafka.zk.port");
//storm worker数量
Integer numWorkers = config.getInt("storm.topology.workers");
//hdfs地址
String hdfsUrl = config.getString("hdfs.url");
//storm窗口一些设置
Integer windowDuration = config.getInt("storm.topology.window.duration");
Integer windowLag = config.getInt("storm.topology.window.lag");
Integer watermarkInterval = config.getInt("storm.topology.watermark.interval");
String id = config.getString("kafka.zk.kafka.group");
BrokerHosts brokerHosts = new ZkHosts(zks);
SpoutConfig spoutConf = new SpoutConfig(brokerHosts, topic, zkRoot, id);
spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConf.zkServers = Arrays.asList(zkHosts.split(","));
spoutConf.zkPort = zkPort;
HdfsBolt hdfsCallerBolt = buildHdfsBolt(hdfsUrl, "log_caller_info_", new Fields("caller_info"));
HdfsBolt hdfsRespParamBolt = buildHdfsBolt(hdfsUrl, "log_resp_param_", new Fields("resp_param"));
HdfsBolt hdfsRespTimeBolt = buildHdfsBolt(hdfsUrl, "log_resp_time_", new Fields("resp_time"));
HdfsBolt hdfsServiceInfoBolt = buildHdfsBolt(hdfsUrl, "log_service_info_", new Fields("service_info"));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("kafka-reader", new KafkaSpout(spoutConf), 5);
builder.setBolt("json-analysis", new KafkaJsonAnalysis(), 4).shuffleGrouping("kafka-reader");
//流水窗口操作,按照日志的时间来操作,窗口延迟2s
builder.setBolt("ss-sliding-window",
new ServerServiceWindowBolt()
.withTumblingWindow(new Duration(windowDuration, TimeUnit.SECONDS))
.withWatermarkInterval(new Duration(watermarkInterval, TimeUnit.SECONDS))
.withLag(new Duration(windowLag, TimeUnit.SECONDS)).withTimestampField("timestamp"), 4).fieldsGrouping("json-analysis", new Fields("server_service"));
// builder.setBolt("json-analysis", new StatisticsTimeLineBold(), 5).shuffleGrouping("sliding-window");
builder.setBolt("hdfs-bolt", new SaveHDFSBold(), 1).shuffleGrouping("json-analysis");
builder.setBolt("hdfs-caller-bolt", hdfsCallerBolt, 1).fieldsGrouping("hdfs-bolt", new Fields("caller_info"));
builder.setBolt("hdfs-resp-param-bolt", hdfsRespParamBolt, 1).fieldsGrouping("hdfs-bolt", new Fields("resp_param"));
builder.setBolt("hdfs-resp-time-bolt", hdfsRespTimeBolt, 1).fieldsGrouping("hdfs-bolt", new Fields("resp_time"));
builder.setBolt("hdfs-service-info-bolt", hdfsServiceInfoBolt, 1).fieldsGrouping("hdfs-bolt", new Fields("service_info"));
// builder.setBolt("redis-remove-bolt", new RemoveRedisBolt(), 1);
Config conf = new Config();
//conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(numWorkers);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
String name = KafkaTopology.class.getSimpleName();
conf.setMaxTaskParallelism(numWorkers);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(name, conf, builder.createTopology());
Thread.sleep(60000);
cluster.shutdown();
}
}
示例14: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args)
throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
TopologyBuilder builder = new TopologyBuilder();
LOGGER.info("Building Trade Processing Topology..");
builder.setSpout(TRD_COLLECTOR_SPOUT, new TradeCollectorSpout(),
CONFIG.getNumber("TRD_COLLECTOR_SPOUT_PARALLELISM"));
builder
.setBolt(TRD_ELIGIBILITY_BOLT, new TradeEligibilityBolt(),
CONFIG.getNumber("TRD_ELIGIBILITY_BOLT_PARALLELISM"))
.shuffleGrouping(TRD_COLLECTOR_SPOUT);
builder
.setBolt(TRD_REPORTING_BOLT, new TradeReportPersistenceBolt(),
CONFIG.getNumber("TRD_REPORTING_BOLT_PARALLELISM"))
.shuffleGrouping(TRD_ELIGIBILITY_BOLT, REPORT_STREAM);
builder
.setBolt(TRD_EXCLUSION_BOLT, new TradeExclusionPersistenceBolt(),
CONFIG.getNumber("TRD_EXCLUSION_BOLT_PARALLELISM"))
.shuffleGrouping(TRD_ELIGIBILITY_BOLT, EXCLUDE_STREAM);
Config conf = new Config();
conf.setDebug(CONFIG.is("DEBUG_FLAG"));
conf.setNumWorkers(CONFIG.getInt("NUMBER_OF_WORKERS"));
conf.setMaxTaskParallelism(CONFIG.getInt("MAX_TASK_PARALLELISM"));
conf.setMaxSpoutPending(CONFIG.getInt("MAX_SPOUT_PENDING"));
conf.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS,
CONFIG.getInt("MAX_SPOUT_PENDING_WAIT_MS"));
LOGGER.info("Submitting Trade Processing Topology..");
if (args != null && args.length > 0) {
conf.put(Config.NIMBUS_SEEDS, CONFIG.get("LOCAL_NIMBUS_HOST"));
conf.put(Config.NIMBUS_THRIFT_PORT, CONFIG.getInt("LOCAL_NIMBUS_PORT"));
conf.put(Config.STORM_ZOOKEEPER_PORT, CONFIG.getInt("LOCAL_ZOOKEEPER_PORT"));
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TRD_PROCESSING_TOPOLOGY, conf, builder.createTopology());
Utils.sleep(CONFIG.getLong("LOCAL_CLUSTER_RUNTIME"));
cluster.killTopology(TRD_PROCESSING_TOPOLOGY);
cluster.shutdown();
}
}
示例15: getComponentConfiguration
import org.apache.storm.Config; //导入方法依赖的package包/类
@Override
public Map<String, Object> getComponentConfiguration() {
Config ret = new Config();
ret.setMaxTaskParallelism(1);
return ret;
}