当前位置: 首页>>代码示例>>Java>>正文


Java Config.setMaxSpoutPending方法代码示例

本文整理汇总了Java中org.apache.storm.Config.setMaxSpoutPending方法的典型用法代码示例。如果您正苦于以下问题:Java Config.setMaxSpoutPending方法的具体用法?Java Config.setMaxSpoutPending怎么用?Java Config.setMaxSpoutPending使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.storm.Config的用法示例。


在下文中一共展示了Config.setMaxSpoutPending方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.storm.Config; //导入方法依赖的package包/类
private void start(StormTopology topology, boolean runAsLocal) throws Exception {
    Config conf = new Config();
    conf.put(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId);
    conf.put(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId);
    conf.put(Constants.StormConfigKey.ZKCONNECT, this.zkConnect);
    conf.setMessageTimeoutSecs(3600);
    conf.setMaxSpoutPending(30);
    conf.setDebug(true);
    conf.setNumWorkers(1);

    if (runAsLocal) {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, topology);
    } else {
        StormSubmitter.submitTopology(topologyName, conf, topology);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:19,代码来源:FullPullerTopology.java

示例2: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Config conf = new Config();
	conf.setMaxSpoutPending(20);
	LocalDRPC drpc = new LocalDRPC();
	if (args.length == 0) {
		
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("CountryCount", conf, buildTopology(drpc));
		Thread.sleep(2000);
		for(int i=0; i<100 ; i++) {
			System.out.println("Result - "+drpc.execute("Count", "Japan India Europe"));
			Thread.sleep(1000);
			}
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopology(args[0], conf, buildTopology(null));
		Thread.sleep(2000);
  		  	DRPCClient client = new DRPCClient(conf, "RRPC-Server", 1234);
  		  	System.out.println(client.execute("Count", "Japan India Europe"));
	}
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Storm,代码行数:22,代码来源:DistributedRPC.java

示例3: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Specify topology name");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int spouts = 2;
  int bolts = 2;
  builder.setSpout("word", new AckingTestWordSpout(), spouts);
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  // Set the number of workers or stream managers
  conf.setNumWorkers(2);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
开发者ID:twitter,项目名称:heron,代码行数:27,代码来源:AckingTopology.java

示例4: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word0", new TestWordSpout(), 2);
  builder.setSpout("word1", new TestWordSpout(), 2);
  builder.setSpout("word2", new TestWordSpout(), 2);
  builder.setBolt("exclaim1", new ExclamationBolt(), 2)
      .shuffleGrouping("word0")
      .shuffleGrouping("word1")
      .shuffleGrouping("word2");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
开发者ID:twitter,项目名称:heron,代码行数:27,代码来源:MultiSpoutExclamationTopology.java

示例5: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:18,代码来源:TransactionalGlobalCount.java

示例6: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:18,代码来源:TridentWordCount.java

示例7: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(20);
    if (args.length == 0) {
        LocalDRPC drpc = new LocalDRPC();
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
        for (int i = 0; i < 100; i++) {
            System.out.println("DRPC RESULT: " + drpc.execute("words", "CAT THE DOG JUMPED"));
            Thread.sleep(1000);
        }
    } else {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
    }
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:17,代码来源:TridentMapExample.java

示例8: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(20);
    conf.put(Config.TOPOLOGY_TRIDENT_WINDOWING_INMEMORY_CACHE_LIMIT, 100);

    // window-state table should already be created with cf:tuples column
    HBaseWindowsStoreFactory windowStoreFactory = new HBaseWindowsStoreFactory(new HashMap<String, Object>(), "window-state", "cf".getBytes("UTF-8"), "tuples".getBytes("UTF-8"));

    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        String topologyName = "wordCounterWithWindowing";
        cluster.submitTopology(topologyName, conf, buildTopology(windowStoreFactory));
        Utils.sleep(120 * 1000);
        cluster.killTopology(topologyName);
        cluster.shutdown();
        System.exit(0);
    } else {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(windowStoreFactory));
    }
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:22,代码来源:TridentHBaseWindowingStoreTopology.java

示例9: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        StormTopology topology = buildVehiclesTopology();
        Config conf = new Config();
        conf.setMaxSpoutPending(20);
        if (args.length == 0) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("vehicles-topology", conf, topology);
            Utils.sleep(60 * 1000);
            cluster.shutdown();
            System.exit(0);
        } else {
            conf.setNumWorkers(3);
            StormSubmitter.submitTopologyWithProgressBar("vehicles-topology", conf, topology);
        }
    }
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:17,代码来源:TridentMinMaxOfVehiclesTopology.java

示例10: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        StormTopology topology = buildDevicesTopology();
        Config conf = new Config();
        conf.setMaxSpoutPending(20);
        if (args.length == 0) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("devices-topology", conf, topology);
            Utils.sleep(60 * 1000);
            cluster.shutdown();
            System.exit(0);
        } else {
            conf.setNumWorkers(3);
            StormSubmitter.submitTopologyWithProgressBar("devices-topology", conf, topology);
        }
    }
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:17,代码来源:TridentMinMaxOfDevicesTopology.java

示例11: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
  builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
  builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
  builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));


  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("top-n-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:20,代码来源:TransactionalWords.java

示例12: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Config conf = new Config();
	conf.setMaxSpoutPending(20);
	if (args.length == 0) {
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("Count", conf, buildTopology());
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopology(args[0], conf, buildTopology());
	}
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Storm,代码行数:12,代码来源:TridentHelloWorldTopology.java

示例13: createStormConf

import org.apache.storm.Config; //导入方法依赖的package包/类
public static Config createStormConf()
{

  Boolean limitHitsPerSelector = Boolean.parseBoolean(SystemConfiguration.getProperty("pir.limitHitsPerSelector"));
  Integer maxHitsPerSelector = Integer.parseInt(SystemConfiguration.getProperty("pir.maxHitsPerSelector"));
  Integer rowDivisions = Integer.parseInt(SystemConfiguration.getProperty("storm.rowDivs", "1"));

  Config conf = new Config();
  conf.setNumAckers(Integer.parseInt(SystemConfiguration.getProperty("storm.numAckers", numWorkers.toString())));
  conf.setMaxSpoutPending(Integer.parseInt(SystemConfiguration.getProperty("storm.maxSpoutPending", "300")));
  conf.setNumWorkers(numWorkers);
  conf.setDebug(false);
  // conf.setNumEventLoggers(2);

  conf.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.executor.receiveBufferSize", 1024));
  conf.put(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.executor.sendBufferSize", 1024));
  conf.put(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE, SystemConfiguration.getIntProperty("storm.transferBufferSize", 32));
  conf.put(Config.WORKER_HEAP_MEMORY_MB, SystemConfiguration.getIntProperty("storm.worker.heapMemory", 750));
  conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, Double.parseDouble(SystemConfiguration.getProperty("storm.componentOnheapMem", "128")));

  // Pirk parameters to send to bolts
  conf.put(StormConstants.ALLOW_ADHOC_QSCHEMAS_KEY, SystemConfiguration.getProperty("pir.allowAdHocQuerySchemas", "false").equals("true"));
  conf.put(StormConstants.QSCHEMA_KEY, SystemConfiguration.getProperty("query.schemas"));
  conf.put(StormConstants.DSCHEMA_KEY, SystemConfiguration.getProperty("data.schemas"));
  conf.put(StormConstants.HDFS_URI_KEY, hdfsUri);
  conf.put(StormConstants.QUERY_FILE_KEY, queryFile);
  conf.put(StormConstants.USE_HDFS, useHdfs);
  conf.put(StormConstants.OUTPUT_FILE_KEY, outputPath);
  conf.put(StormConstants.LIMIT_HITS_PER_SEL_KEY, limitHitsPerSelector);
  conf.put(StormConstants.MAX_HITS_PER_SEL_KEY, maxHitsPerSelector);
  conf.put(StormConstants.SPLIT_PARTITIONS_KEY, splitPartitions);
  conf.put(StormConstants.SALT_COLUMNS_KEY, saltColumns);
  conf.put(StormConstants.ROW_DIVISIONS_KEY, rowDivisions);
  conf.put(StormConstants.ENCROWCALCBOLT_PARALLELISM_KEY, encrowcalcboltParallelism);
  conf.put(StormConstants.ENCCOLMULTBOLT_PARALLELISM_KEY, enccolmultboltParallelism);

  return conf;
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:39,代码来源:PirkTopology.java

示例14: main

import org.apache.storm.Config; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();
  int parallelism = 2;

  int spouts = parallelism;
  builder.setSpout("word", new TestWordSpout(Duration.ofMillis(50)), spouts);
  int bolts = 2 * parallelism;
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.setMessageTimeoutSecs(600);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  if (args != null && args.length > 0) {
    conf.setNumWorkers(parallelism);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    System.out.println("Topology name not provided as an argument, running in simulator mode.");
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
开发者ID:twitter,项目名称:heron,代码行数:29,代码来源:ExclamationTopology.java

示例15: getConsumerConfig

import org.apache.storm.Config; //导入方法依赖的package包/类
/**
 * Return the consumer topology config.
 *
 * @return the topology config
 */
public Config getConsumerConfig() {
    Config conf = new Config();
    conf.setMaxSpoutPending(20);
    //  conf.setDebug(true);
    return conf;
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:12,代码来源:TridentKafkaWordCount.java


注:本文中的org.apache.storm.Config.setMaxSpoutPending方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。