本文整理匯總了Java中backtype.storm.Config.setMaxTaskParallelism方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.setMaxTaskParallelism方法的具體用法?Java Config.setMaxTaskParallelism怎麽用?Java Config.setMaxTaskParallelism使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.setMaxTaskParallelism方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
LinearDRPCTopologyBuilder builder = construct();
Config conf = new Config();
if (args == null || args.length == 0) {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
for (String url : urlsToTry) {
System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
}
cluster.shutdown();
drpc.shutdown();
}
else {
conf.setNumWorkers(6);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例2: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word-reader", new WordReader());
builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader");
builder.setBolt("word-counter", new WordCounter(), 2).fieldsGrouping("word-normalizer", new Fields("word"));
Config conf = new Config();
conf.setMaxTaskParallelism(3);
conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 3);
conf.setDebug(false);
//Topology run
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-wordcount", conf, builder.createTopology());
Thread.sleep(30000);
cluster.shutdown();
}
示例3: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* Creates a standalone topology.
*
* @param args the topology arguments
* @throws Exception in case of creation problems
*/
public static void main(String[] args) throws Exception {
Config config = new Config();
Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
config.setMessageTimeoutSecs(100);
PipelineOptions options = new PipelineOptions(args);
RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
createTopology(b);
b.close(args[0], config);
// main topology: int numWorkers = options.getNumberOfWorkers(2);
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, b.createTopology());
} else {
config.setMaxTaskParallelism(2);
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(PIP_NAME, config, b.createTopology());
}
}
示例4: macVendorTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void macVendorTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("macVendor");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例5: nonTimestampTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void nonTimestampTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(true, stormFlow.contains("timestamp"));
}
}
示例6: analizeHttpUrlTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("httpUrlMap");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例7: main
import backtype.storm.Config; //導入方法依賴的package包/類
/**
* @param args
* http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
*/
public static void main(String[] args) {
try{
//設置噴發節點並分配並發數,該並發數將會控製該對象在集群中的線程數(6個)
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}
示例8: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);
topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
.shuffleGrouping("Spout");
topologyBuilder.setBolt("Store", new PrintBolt(), 1)
.shuffleGrouping("generate");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}
示例9: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);
topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
.shuffleGrouping("spout-number")
.shuffleGrouping("spout-string")
.shuffleGrouping("spout-sign");
topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
.fieldsGrouping("bolt-splitter", new Fields("type"));
topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
.shuffleGrouping("bolt-distributor", "stream-number-saver");
topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
.shuffleGrouping("bolt-distributor", "stream-string-saver");
topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
.shuffleGrouping("bolt-distributor", "stream-sign-saver");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}
示例10: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);
topologyBuilder.setBolt("generate", new GenerateUrlBolt(), 1)
.shuffleGrouping("Spout");
topologyBuilder.setBolt("generate_loop", new GenerateUrlBolt(), 1)
.shuffleGrouping("Parse", "loop");
// topologyBuilder.setBolt("Parse", new ParseTestBolt(), 1).shuffleGrouping("Spout");
topologyBuilder.setBolt("Parse", new ParseLoopBolt(), 1)
.shuffleGrouping("generate")
.shuffleGrouping("generate_loop");
topologyBuilder.setBolt("Store", new StoreTestBolt(), 1)
.shuffleGrouping("Parse", "store");
Config config = new Config();
config.setDebug(false);
if(args != null && args.length>0){
config.setNumWorkers(4);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
}else{
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", config, topologyBuilder.createTopology());
}
}catch(Exception e){
e.printStackTrace();
}
}
示例11: createConfig
import backtype.storm.Config; //導入方法依賴的package包/類
private static Config createConfig(boolean local) {
int workers = Properties.getInt("sa.storm.workers");
Config conf = new Config();
conf.setDebug(true);
if (local)
conf.setMaxTaskParallelism(workers);
else
conf.setNumWorkers(workers);
return conf;
}
示例12: getComponentConfiguration
import backtype.storm.Config; //導入方法依賴的package包/類
@Override
public Map<String, Object> getComponentConfiguration() {
Config ret = new Config();
ret.setMaxTaskParallelism(1);
ret.registerSerialization(TransactionAttempt.class);
return ret;
}
示例13: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
/*
----------------------TODO-----------------------
Task: wire up the topology
NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…),
you use the following names for each component:
FileReaderSpout -> "spout"
SplitSentenceBolt -> "split"
WordCountBolt -> "count"
NormalizerBolt -> "normalize"
------------------------------------------------- */
String spoutId = "spout";
String splitId = "split";
String countId = "count";
String normalizeId = "normalize";
builder.setSpout(spoutId, new FileReaderSpout(args[0]), 5);
builder.setBolt(splitId, new SplitSentenceBolt(), 8).shuffleGrouping(spoutId);
builder.setBolt(normalizeId, new NormalizerBolt(), 12).fieldsGrouping(splitId, new Fields("word"));
builder.setBolt(countId, new WordCountBolt(), 12).fieldsGrouping(normalizeId, new Fields("word"));
config.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-count", config, builder.createTopology());
//wait for 2 minutes then kill the job
Thread.sleep(2 * 60 * 1000);
cluster.shutdown();
}
示例14: geoIpTest
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void geoIpTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/geoIpFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
GeoIpFunction.CITY_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/city.dat").getPath();
GeoIpFunction.CITY_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/cityv6.dat").getPath();
GeoIpFunction.ASN_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asn.dat").getPath();
GeoIpFunction.ASN_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asnv6.dat").getPath();
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("geoIPMap");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new GeoIpFunction(), new Fields("geoIPMap"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
//System.out.println(stormFlow);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例15: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
try{
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// HDFS bolt
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
// FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/tmp/").withPrefix("order_").withExtension(".log");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl("hdfs://wxb-1:8020")
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}