本文整理汇总了Java中backtype.storm.LocalCluster类的典型用法代码示例。如果您正苦于以下问题:Java LocalCluster类的具体用法?Java LocalCluster怎么用?Java LocalCluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LocalCluster类属于backtype.storm包,在下文中一共展示了LocalCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
LinearDRPCTopologyBuilder builder = construct();
Config conf = new Config();
if (args == null || args.length == 0) {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
for (String url : urlsToTry) {
System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
}
cluster.shutdown();
drpc.shutdown();
}
else {
conf.setNumWorkers(6);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例2: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(5);
if (args.length == 1) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
Thread.sleep(60 * 1000);
cluster.killTopology("wordCounter");
cluster.shutdown();
System.exit(0);
}
else if(args.length == 2) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
} else{
System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
}
}
示例3: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws SQLException {
// tableName is the name of the table in splice to insert records to
// server is the server instance running splice
String tableName = "students";
String server = "localhost";
TopologyBuilder builder = new TopologyBuilder();
// set the spout for the topology
builder.setSpout("seedDataFromMySql", new MySqlSpout());
// dump the stream data into splice
builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");
Config conf = new Config();
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
Utils.sleep(3000);
cluster.shutdown();
}
示例4: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
config.setDebug(true);
StormTopology topology = buildTopology();
// Un-comment to run locally:
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("local-moving-avg", config, topology);
// Un-comment to run as part of a Storm cluster:
// try {
// StormSubmitter.submitTopology("cluster-moving-average",
// config,
// topology);
// } catch(AlreadyAliveException e) {
// e.printStackTrace();
// } catch(InvalidTopologyException e) {
// e.printStackTrace();
//}
}
示例5: submitTopology
import backtype.storm.LocalCluster; //导入依赖的package包/类
private static void submitTopology(TopologyBuilder builder) {
try {
if (local_mode(conf)) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(
String.valueOf(conf.get("topology.name")), conf,
builder.createTopology());
Thread.sleep(200000);
cluster.shutdown();
} else {
StormSubmitter.submitTopology(
String.valueOf(conf.get("topology.name")), conf,
builder.createTopology());
}
} catch (Exception e) {
LOG.error(e.getMessage(), e.getCause());
}
}
示例6: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if(args.length < 1){
System.err.println("[ERROR] Configuration File Required");
}
Config conf = new Config();
// Store all the configuration in the Storm conf object
conf.putAll(readConfigFile(args[0]));
//Second arg should be local in order to run locally
if(args.length < 2 || (args.length == 2 && !args[1].equals("local"))) {
StormSubmitter.submitTopologyWithProgressBar("crawler_topology", conf, buildTopology(conf, null));
}
else {
LocalDRPC drpc = new LocalDRPC();
LocalCluster localcluster = new LocalCluster();
localcluster.submitTopology("crawler_topology",conf,buildTopology(conf, drpc));
String searchQuery = "elasticsearch";
System.out.println("---* Result (search): " + drpc.execute("search", searchQuery));
}
}
示例7: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new NumberSpout());
builder.setBolt("minusone", new MinusOne())
.shuffleGrouping("spout")
.shuffleGrouping("DoNothing", "GreaterThanZero");
builder.setBolt("DoNothing", new Filter())
.shuffleGrouping("minusone");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例8: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("points", new PointSpout());
builder.setBolt("assign", new Assign())
.shuffleGrouping("points")
.allGrouping("aggregator", "centroids");
builder.setBolt("aggregator", new Aggregator())
.fieldsGrouping("assign", new Fields("centroid_index"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例9: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
builder.setBolt("aggregator", new AggregatorBolt(), 1)
.fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
.allGrouping("counter", "tick");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例10: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例11: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new KafkaSpoutTest(""), 1);
builder.setBolt("bolt1", new Bolt1(), 2).shuffleGrouping("spout");
builder.setBolt("bolt2", new Bolt2(), 2).fieldsGrouping("bolt1",new Fields("word"));
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_WORKERS, 1);
conf.put(Config.TOPOLOGY_DEBUG, true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("flume-kafka-storm-integration", conf, builder.createTopology());
Utils.sleep(1000*60*5);
cluster.shutdown();
}
示例12: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String args[]) throws Exception {
// check validity of command line arguments
if(args.length != 2) {
System.out.println("Command line arguments missing\n");
System.out.println("Pass redis IP and port\n");
return;
}
// configure the topology
Config conf = new Config();
conf.setDebug(false);
conf.setNumWorkers(numWorkers);
LocalCluster cluster = new LocalCluster();
StormTopology topology = buildTopology(args[0], args[1]);
cluster.submitTopology("crawler", conf, topology);
System.out.println("\n>>>> TOPOLOGY - STATUS OK\n");
}
示例13: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word-reader", new WordReader());
builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader");
builder.setBolt("word-counter", new WordCounter(), 2).fieldsGrouping("word-normalizer", new Fields("word"));
Config conf = new Config();
conf.setMaxTaskParallelism(3);
conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 3);
conf.setDebug(false);
//Topology run
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-wordcount", conf, builder.createTopology());
Thread.sleep(30000);
cluster.shutdown();
}
示例14: submit
import backtype.storm.LocalCluster; //导入依赖的package包/类
protected int submit(String name, Config conf, TopologyBuilder builder) {
// register Metadata for serialization with FieldsSerializer
Config.registerSerialization(conf, Metadata.class);
if (isLocal) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(name, conf, builder.createTopology());
if (ttl != -1) {
Utils.sleep(ttl * 1000);
cluster.shutdown();
}
}
else {
try {
StormSubmitter.submitTopology(name, conf,
builder.createTopology());
} catch (Exception e) {
e.printStackTrace();
return -1;
}
}
return 0;
}
示例15: main
import backtype.storm.LocalCluster; //导入依赖的package包/类
public static void main(String[] args) {
NginxSplitBolt nginxBolt = new NginxSplitBolt();
ServiceLogBolt serviceBolt = new ServiceLogBolt();
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("nginx", new KafkaSpoutTest("log.accesslog"), 1);
builder.setSpout("service", new KafkaSpoutTest("log.servicelog"), 1);
builder.setBolt("nginxlog", nginxBolt).shuffleGrouping("nginx");
builder.setBolt("servicelog", serviceBolt).shuffleGrouping("service");
builder.setBolt("join", new SingleJoinBolt(new Fields("method", "time", "usetime", "params")))
.fieldsGrouping("nginxlog", new Fields("ip", "utime"))
.fieldsGrouping("servicelog", new Fields("ip", "utime"));
Config conf = new Config();
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("log - join", conf, builder.createTopology());
Utils.sleep(2000);
cluster.shutdown();
}