本文整理汇总了Java中org.apache.storm.Config.setNumWorkers方法的典型用法代码示例。如果您正苦于以下问题:Java Config.setNumWorkers方法的具体用法?Java Config.setNumWorkers怎么用?Java Config.setNumWorkers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.storm.Config
的用法示例。
在下文中一共展示了Config.setNumWorkers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("this is simple example of trident topology"), new Values(
"this example count same words"));
spout.setCycle(true);
//TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(new TridentKafkaConfig(new ZkHosts("localhost:9091"), "test"));
TridentTopology topology = new TridentTopology();
MemoryMapState.Factory stateFactory = new MemoryMapState.Factory();
topology
.newStream("spout1", spout)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.groupBy(new Fields("word")).persistentAggregate(stateFactory, new Count(),
new Fields("count")).newValuesStream()
.filter(new DisplayOutputFilter()).parallelismHint(6);
Config config = new Config();
config.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-example", config, topology.build());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:22,代码来源:BasicTridentTopology.java
示例2: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String args[]) {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts(
"localhost:2181"), "storm-trident-diy");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(
config);
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).filter(new ExcludePhoneNumber())
.each(new Fields("str"), new DeviceInfoExtractor(), new Fields("phone", "bytes"))
.groupBy(new Fields("phone"))
.aggregate(new Fields("bytes", "phone"), new Sum(), new Fields("sum"))
.applyAssembly(new FirstN(10, "sum"))
.each(new Fields("phone", "sum"), new Debug());
Config config1 = new Config();
config1.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-diy", config1, topology.build());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:21,代码来源:TridentDIY.java
示例3: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String args[]) throws InterruptedException {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "sensor-data";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("es-bolt", new ElasticSearchBolt(), 1).shuffleGrouping("spout");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-es-example", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:20,代码来源:SensorTopology.java
示例4: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "storm-diy";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("parser", new ParseAndUsageBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("usageCassandra", new UsageCassandraBolt("localhost", "usage"), 1).shuffleGrouping("parser", "usagestream");
topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-diy", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:22,代码来源:TelecomProcessorTopology.java
示例5: setupOnce
import org.apache.storm.Config; //导入方法依赖的package包/类
@BeforeClass
public static void setupOnce() throws Exception {
System.out.println("------> Creating Sheep \uD83D\uDC11\n");
clusterParam = new MkClusterParam();
clusterParam.setSupervisors(1);
Config daemonConfig = new Config();
daemonConfig.put(Config.STORM_LOCAL_MODE_ZMQ, false);
clusterParam.setDaemonConf(daemonConfig);
makeConfigFile();
Config conf = new Config();
conf.setNumWorkers(1);
completeTopologyParam = new CompleteTopologyParam();
completeTopologyParam.setStormConf(conf);
}
示例6: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException,
InvalidTopologyException {
// create an instance of TopologyBuilder class
TopologyBuilder builder = new TopologyBuilder();
// set the spout class
builder.setSpout("SampleSpout", new SampleSpout(), 2);
// set the bolt class
builder.setBolt("SampleBolt", new SampleBolt(), 4).shuffleGrouping(
"SampleSpout");
Config conf = new Config();
conf.setNumWorkers(3);
// This statement submit the topology on remote
// args[0] = name of topology
try {
StormSubmitter.submitTopology(args[0], conf,
builder.createTopology());
} catch (AlreadyAliveException alreadyAliveException) {
System.out.println(alreadyAliveException);
} catch (InvalidTopologyException invalidTopologyException) {
System.out.println(invalidTopologyException);
} catch (AuthorizationException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
示例7: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(20);
LocalDRPC drpc = new LocalDRPC();
if (args.length == 0) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("CountryCount", conf, buildTopology(drpc));
Thread.sleep(2000);
for(int i=0; i<100 ; i++) {
System.out.println("Result - "+drpc.execute("Count", "Japan India Europe"));
Thread.sleep(1000);
}
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, buildTopology(null));
Thread.sleep(2000);
DRPCClient client = new DRPCClient(conf, "RRPC-Server", 1234);
System.out.println(client.execute("Count", "Japan India Europe"));
}
}
示例8: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("integer", new RandomIntegerSpout(), 1);
builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(new Count(30), new Count(10)), 1)
.shuffleGrouping("integer");
builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(new Count(3)), 1)
.shuffleGrouping("slidingsum");
builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(40000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例9: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new WordSpout(), 1);
builder.setBolt("exclaim", new ExclamationBolt(), 1).shuffleGrouping("word"); // Tuple流向:word 》 exclaim
builder.setBolt("print", new PrintBolt(), 1).shuffleGrouping("exclaim"); // exclaim 》 print
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster(); // storm依赖,<scope>provided</scope>--> 本地开发是注释掉 -->
cluster.submitTopology("test3", conf, builder.createTopology());
Utils.sleep(60 * 1000);
cluster.killTopology("test3");
cluster.shutdown();
}
}
示例10: submitTopology
import org.apache.storm.Config; //导入方法依赖的package包/类
private LocalCluster submitTopology() throws AlreadyAliveException,
InvalidTopologyException {
Config stormConfig = new Config();
stormConfig.setDebug(false);
stormConfig.setFallBackOnJavaSerialization(true);
stormConfig.setNumWorkers(1);
// enable backpressure since the spouts can move at a much faster speed than the bolts
stormConfig.put(Config.TOPOLOGY_BACKPRESSURE_ENABLE, true);
stormConfig.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 2048);
stormConfig.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, 2048);
stormConfig.put(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS, 1000);
stormConfig.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10);
stormConfig.setSkipMissingKryoRegistrations(true);
stormConfig.put(ConfigurationConstants.CHATALYTICS_CONFIG.txt,
YamlUtils.writeYaml(chatalyticsConfig));
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TOPOLOGY_NAME, stormConfig, chatTopology);
return cluster;
}
示例11: getConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
protected Config getConfig(String[] SETTINGS) {
Config config = new Config();
// config.put("mongodb.ip", args[2]);
// config.put("mongodb.port", Integer.parseInt(args[3]));
// config.put("redis.ip", args[4]);
// config.put("redis.port", Integer.parseInt(args[5]));
// config.put("kafka.properties", args[6]);
config.put("mongodb.ip", SETTINGS[0]);
config.put("mongodb.port", Integer.parseInt(SETTINGS[1]));
config.put("redis.ip", SETTINGS[2]);
config.put("redis.port", Integer.parseInt(SETTINGS[3]));
config.put("kafka.properties", SETTINGS[4]);
config.setDebug(true);
config.setNumWorkers(2);
return config;
}
示例12: getConfig
import org.apache.storm.Config; //导入方法依赖的package包/类
protected Config getConfig(String[] SETTINGS) {
Config config = new Config();
// config.put("mongodb.ip", args[2]);
// config.put("mongodb.port", Integer.parseInt(args[3]));
// config.put("redis.ip", args[4]);
// config.put("redis.port", Integer.parseInt(args[5]));
// config.put("kafka.properties", args[6]);
config.put("mongodb.ip", SETTINGS[0]);
config.put("mongodb.port", Integer.parseInt(SETTINGS[1]));
config.put("redis.ip", SETTINGS[2]);
config.put("redis.port", Integer.parseInt(SETTINGS[3]));
config.put("kafka.properties", SETTINGS[4]);
config.setDebug(true);
config.setNumWorkers(4);
return config;
}
示例13: run
import org.apache.storm.Config; //导入方法依赖的package包/类
public static int run(String[] args) throws Exception {
CommandLineParser parser = new BasicParser();
Options options = getCommonRequiredOptions();
CommandLine cmd = parser.parse( options, args);
Map<String, String> dsConf = getAccumuloDataStoreConf(cmd);
String featureName = cmd.getOptionValue(FEATURE_NAME);
SimpleFeatureType featureType = DataUtilities.createType(featureName, "geom:Point:srid=4326");
DataStore ds = DataStoreFinder.getDataStore(dsConf);
ds.createSchema(featureType);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String topic = cmd.getOptionValue(TOPIC);
String groupId = topic;
dsConf.put(OSMIngest.FEATURE_NAME, featureName);
OSMKafkaSpout OSMKafkaSpout = new OSMKafkaSpout(dsConf, groupId, topic);
topologyBuilder.setSpout("Spout", OSMKafkaSpout, 10).setNumTasks(10);
OSMKafkaBolt OSMKafkaBolt = new OSMKafkaBolt(dsConf, groupId, topic);
topologyBuilder.setBolt("Bolt", OSMKafkaBolt, 20).shuffleGrouping("Spout");
Config stormConf = new Config();
stormConf.setNumWorkers(10);
stormConf.setDebug(true);
StormSubmitter.submitTopology(topic, stormConf, topologyBuilder.createTopology());
return 0;
}
示例14: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StormTopology topology = buildTopology();
Config conf = new Config();
conf.setDebug(false);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, topology);
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordcount", conf, topology);
try {
System.out.println("PRESS ENTER TO STOP");
new BufferedReader(new InputStreamReader(System.in)).readLine();
cluster.killTopology("wordcount");
cluster.shutdown();
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例15: main
import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Specify topology name");
}
TopologyBuilder builder = new TopologyBuilder();
int spouts = 2;
int bolts = 2;
builder.setSpout("word", new AckingTestWordSpout(), spouts);
builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
.shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setNumAckers(1);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// Set the number of workers or stream managers
conf.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}