当前位置: 首页>>代码示例>>Java>>正文


Java StormSubmitter.submitTopologyWithProgressBar方法代码示例

本文整理汇总了Java中backtype.storm.StormSubmitter.submitTopologyWithProgressBar方法的典型用法代码示例。如果您正苦于以下问题:Java StormSubmitter.submitTopologyWithProgressBar方法的具体用法?Java StormSubmitter.submitTopologyWithProgressBar怎么用?Java StormSubmitter.submitTopologyWithProgressBar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在backtype.storm.StormSubmitter的用法示例。


在下文中一共展示了StormSubmitter.submitTopologyWithProgressBar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        if(args.length < 1){
            System.err.println("[ERROR] Configuration File Required");
        }
        Config conf = new Config();

        // Store all the configuration in the Storm conf object
        conf.putAll(readConfigFile(args[0]));

        //Second arg should be local in order to run locally
        if(args.length  < 2 || (args.length  == 2 && !args[1].equals("local"))) {
            StormSubmitter.submitTopologyWithProgressBar("crawler_topology", conf, buildTopology(conf, null));
        }
        else {
            LocalDRPC drpc = new LocalDRPC();
            LocalCluster localcluster = new LocalCluster();
            localcluster.submitTopology("crawler_topology",conf,buildTopology(conf, drpc));

            String searchQuery = "elasticsearch";
            System.out.println("---* Result (search): " + drpc.execute("search",  searchQuery));
        }
    }
 
开发者ID:skalmadka,项目名称:web-crawler,代码行数:24,代码来源:WebCrawlerTopology.java

示例2: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Config conf = new Config();
	conf.setMaxSpoutPending(20);
	if (args.length == 0) {
		LocalDRPC drpc = new LocalDRPC();
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
		for (int i = 0; i < 100; i++) {
			// The query takes as input a whitespace separated list of words
			// and return the sum of the counts for those words.
			System.out.println("DRPC RESULT: " + drpc.execute("words", "cat$$dog$$the$$man"));
			Thread.sleep(1000);
		}
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
	}
}
 
开发者ID:desp0916,项目名称:LearnStorm,代码行数:19,代码来源:TridentWordCount.java

示例3: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
开发者ID:luozhaoyu,项目名称:big-data-system,代码行数:18,代码来源:TridentWordCount.java

示例4: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
开发者ID:luozhaoyu,项目名称:big-data-system,代码行数:26,代码来源:ReachTopology.java

示例5: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);

        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("www_nginx_accesslog_stat", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.killTopology("www_nginx_accesslog_stat");
        cluster.shutdown();
    }
}
 
开发者ID:coodoing,项目名称:LogRTA,代码行数:25,代码来源:ExclamationTopology.java

示例6: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
开发者ID:linkshare,项目名称:cdh-storm,代码行数:25,代码来源:ExclamationTopology.java

示例7: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
  builder.addBolt(new ExclaimBolt(), 3);

  Config conf = new Config();

  if (args == null || args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();

    cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));

    for (String word : new String[]{ "hello", "goodbye" }) {
      System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
开发者ID:linkshare,项目名称:cdh-storm,代码行数:25,代码来源:BasicDRPCTopology.java

示例8: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception {
	LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
	builder.addBolt(new ExclaimBolt(), 3);

	Config conf = new Config();

	if (args == null || args.length == 0) {
		LocalDRPC drpc = new LocalDRPC();
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
		for (String word : new String[] { "hello", "goodbye" }) {
			System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
		}

		Thread.sleep(10000);
		drpc.shutdown();
		cluster.shutdown();
	} else {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
	}
}
 
开发者ID:desp0916,项目名称:LearnStorm,代码行数:23,代码来源:DRPCTest.java

示例9: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
	Config conf = new Config();
	conf.setDebug(true);

	if (args != null && args.length > 0) {
		conf.setNumWorkers(3);

		StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
	} else {
		conf.setMaxTaskParallelism(3);
		LocalDRPC drpc = new LocalDRPC();
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology(SmashBrosTwitterTopology.class.getSimpleName(), conf, buildTopology(drpc));

		Utils.sleep(10 * 60 * 1000); // 10 minutes
		cluster.shutdown();
	}
}
 
开发者ID:danielgimenes,项目名称:SmashBrosTwitterAnalytics,代码行数:19,代码来源:SmashBrosTwitterTopology.java

示例10: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
开发者ID:techysoul,项目名称:java,代码行数:29,代码来源:DeliveryTopology.java

示例11: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception{
    if(args.length != 2){
        System.err.println("[ERROR] Configuration File Required");
    }
    Config conf = new Config();

    //Map topologyConfig = readConfigFile(args[0]);
    //conf.putAll(topologyConfig);

    // Store all the configuration in the Storm conf object
    conf.putAll(ConfigReader.readConfigFile(args[0]));

    //Second arg should be local in order to run locally
    if(args[1].equals("local"))
    {
        LocalDRPC drpc = new LocalDRPC();
        LocalCluster localcluster = new LocalCluster();
        localcluster.submitTopology("doc_event_processing",conf,buildTopology(conf, drpc));

        String searchQuery = "HoloLens crawl_test";
        System.out.println("---* Result: " + drpc.execute("search",  searchQuery));
    }
    else
    {
        StormSubmitter.submitTopologyWithProgressBar("doc_event_processing", conf, buildTopology(conf, null));
    }
}
 
开发者ID:preems,项目名称:realtime-event-processing,代码行数:28,代码来源:DocEventProcessingTopology.java

示例12: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    Map<String, String> env = System.getenv();
    for (String envName : env.keySet()) {
      LOG.info(envName + "=" + env.get(envName));
    }

    String esHost = "elasticsearch";
    LOG.info("Elasticsearch Host: " + esHost);

    Map esConf = new HashMap();
    esConf.put("es.nodes", esHost);
    esConf.put("es.storm.bolt.flush.entries.size", "100");
    esConf.put("es.batch.size.entries", "100");
    esConf.put("es.input.json", "true");

    String zkConnString = "kafka:2181";
    String topicName = "maxwell";
    BrokerHosts hosts = new ZkHosts(zkConnString);
    SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "", "storm");
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

    builder.setSpout("kafka", kafkaSpout, 1);
    builder.setBolt("binlog-expander", new BinlogPayloadExpander(), 1).shuffleGrouping("kafka");
    builder.setBolt("es-bolt", new EsBolt("maxwell/BINLOG", esConf), 1).shuffleGrouping("binlog-expander");

    Config conf = new Config();
    conf.put(Config.TOPOLOGY_DEBUG, true);
    conf.setNumWorkers(1);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
 
开发者ID:iaintshine,项目名称:cdc,代码行数:36,代码来源:KafkaEsTopology.java

示例13: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        if(args.length != 2){
            System.err.println("[ERROR] Configuration File Required");
        }
        Config conf = new Config();

        //Map topologyConfig = readConfigFile(args[0]);
        //conf.putAll(topologyConfig);

        // Store all the configuration in the Storm conf object
        conf.putAll(ConfigReader.readConfigFile(args[0]));

        //Second arg should be local in order to run locally
        if(args[1].equals("local"))
        {
            LocalDRPC drpc = new LocalDRPC();
            LocalCluster localcluster = new LocalCluster();
            localcluster.submitTopology("url_event_processing",conf,buildTopology(conf, drpc));

            String searchQuery = "HoloLens crawl_test";
            System.out.println("---* Result: " + drpc.execute("search",  searchQuery));
        }
        else
        {
            StormSubmitter.submitTopologyWithProgressBar("url_event_processing", conf, buildTopology(conf, null));
        }
    }
 
开发者ID:preems,项目名称:realtime-event-processing,代码行数:29,代码来源:URLEventProcessingTopology.java

示例14: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        if (args.length < 7) {
            System.out.println("USAGE: storm jar </path/to/topo.jar> <com.package.TopologyMainClass> " +
                    "<topo_display_name> <zookeeper_host:port[,zookeeper_host:port]> " +
                    "<kafka_topic_name> <offset_time_to_start_from> <hdfs_field_delimiter> " +
                    "<hdfs_output_path> <hdfs_uri>");
            System.exit(1);
        }

        TopologyBuilder builder = new TopologyBuilder();

        // Setup the Kafka Spout
        ConfigureKafkaSpout.configureKafkaSpout(builder, args[1], args[2], args[3]);

        // Setup the HDFS Bolt
        ConfigureHdfsBolt.configureHdfsBolt(builder, args[4], args[5], args[6]);

        // Topology
        Config conf = new Config();
        conf.setDebug(true);
        conf.setNumWorkers(1);

        // Submit the topology
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());

    }
 
开发者ID:sakserv,项目名称:storm-kafka-hdfs-starter,代码行数:28,代码来源:KafkaHdfsTopology.java

示例15: main

import backtype.storm.StormSubmitter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Config conf = new Config();
	conf.put(Config.TOPOLOGY_DEBUG, false);

	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SensorStormSpout(conf,
			new BlockFetcher(), true, 1000), 1);
	builder.setBolt(
			"average",
			new SensorStormBolt(conf, 1000, WindowBatcher.class,
					AverageOperation.class, "sensorId"), 2).customGrouping(
			"spout", new SensorStormFieldGrouping("sensorId"));
	builder.setBolt(
			"printspeed",
			new SensorStormBolt(conf, 1000,
					PrintParticleSpeedOperation.class, null), 2)
			.customGrouping("average", new SensorStormShuffleGrouping());
	builder.setBolt("printparticle",
			new SensorStormBolt(conf, 1000, PrintOperation.class, null), 2)
			.customGrouping("printspeed", new SensorStormShuffleGrouping());

	if ((args != null) && (args.length > 0)) {
		conf.setNumWorkers(3);
		StormSubmitter.submitTopologyWithProgressBar(args[0], conf,
				builder.createTopology());
	} else {
		LocalCluster cluster = new LocalCluster();
		cluster.submitTopology("test", conf, builder.createTopology());
		Utils.sleep(10000);
		cluster.killTopology("test");
		cluster.shutdown();
	}
}
 
开发者ID:sensorstorm,项目名称:SensorStorm,代码行数:34,代码来源:Main.java


注:本文中的backtype.storm.StormSubmitter.submitTopologyWithProgressBar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。