本文整理匯總了Java中backtype.storm.Config.setDebug方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.setDebug方法的具體用法?Java Config.setDebug怎麽用?Java Config.setDebug使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類backtype.storm.Config
的用法示例。
在下文中一共展示了Config.setDebug方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
//configureRouteBolt(builder);
configurePhoenixTest(builder);
/*
builder.setBolt("submitter", new SubmitBolt())
.shuffleGrouping(ROUTE_BOLT);
*/
try {
StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例2: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws SQLException {
// tableName is the name of the table in splice to insert records to
// server is the server instance running splice
String tableName = "students";
String server = "localhost";
TopologyBuilder builder = new TopologyBuilder();
// set the spout for the topology
builder.setSpout("seedDataFromMySql", new MySqlSpout());
// dump the stream data into splice
builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");
Config conf = new Config();
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
Utils.sleep(3000);
cluster.shutdown();
}
示例3: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
config.setDebug(true);
StormTopology topology = buildTopology();
// Un-comment to run locally:
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("local-moving-avg", config, topology);
// Un-comment to run as part of a Storm cluster:
// try {
// StormSubmitter.submitTopology("cluster-moving-average",
// config,
// topology);
// } catch(AlreadyAliveException e) {
// e.printStackTrace();
// } catch(InvalidTopologyException e) {
// e.printStackTrace();
//}
}
示例4: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
configureRouteBolt(builder);
configureInsertBolt(builder);
//builder.setBolt("submitter", new SubmitBolt())
// .shuffleGrouping(ROUTE_BOLT);
try {
StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例5: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例6: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new TestWordSpout(), 10);
builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
}
else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
示例7: test_transaction_word
import backtype.storm.Config; //導入方法依賴的package包/類
@Test
public void test_transaction_word() {
try {
MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
LocalCluster cluster = new LocalCluster();
Config config = new Config();
config.setDebug(true);
config.setMaxSpoutPending(3);
cluster.submitTopology("top-n-topology", config, builder.buildTopology());
JStormUtils.sleepMs(60 * 1000);
cluster.shutdown();
} catch (Exception e) {
Assert.fail("Failed to run simple transaction");
}
}
示例8: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new NumberSpout());
builder.setBolt("minusone", new MinusOne())
.shuffleGrouping("spout")
.shuffleGrouping("DoNothing", "GreaterThanZero");
builder.setBolt("DoNothing", new Filter())
.shuffleGrouping("minusone");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例9: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("points", new PointSpout());
builder.setBolt("assign", new Assign())
.shuffleGrouping("points")
.allGrouping("aggregator", "centroids");
builder.setBolt("aggregator", new Aggregator())
.fieldsGrouping("assign", new Fields("centroid_index"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例10: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
builder.setBolt("aggregator", new AggregatorBolt(), 1)
.fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
.allGrouping("counter", "tick");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例11: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例12: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new RandomSentenceSpout(), 5);
builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
conf.setMaxTaskParallelism(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-count", conf, builder.createTopology());
Thread.sleep(10000);
cluster.shutdown();
}
}
示例13: buildAndSubmit
import backtype.storm.Config; //導入方法依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
/// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
}
示例14: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws AlreadyAliveException,
InvalidTopologyException {
TopologyBuilder builder = new TopologyBuilder();
List<String> zks = new ArrayList<String>();
zks.add("192.168.41.122");
List<String> cFs = new ArrayList<String>();
cFs.add("personal");
cFs.add("company");
// set the spout class
builder.setSpout("spout", new SampleSpout(), 2);
// set the bolt class
builder.setBolt("bolt", new StormRedisBolt("192.168.41.122",2181), 2).shuffleGrouping("spout");
Config conf = new Config();
conf.setDebug(true);
// create an instance of LocalCluster class for
// executing topology in local mode.
LocalCluster cluster = new LocalCluster();
// LearningStormTopolgy is the name of submitted topology.
cluster.submitTopology("StormRedisTopology", conf,
builder.createTopology());
try {
Thread.sleep(10000);
} catch (Exception exception) {
System.out.println("Thread interrupted exception : " + exception);
}
// kill the LearningStormTopology
cluster.killTopology("StormRedisTopology");
// shutdown the storm test cluster
cluster.shutdown();
}
示例15: main
import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
LOG.info("Reading JSON file configuration...");
JSONProperties config = new JSONProperties("/topology.json");
TopologyBuilder builder = new TopologyBuilder();
/* Spout Configuration */
JSONArray spouts = config.getSpouts();
configureSpouts(builder, spouts);
/* Bolt Configuration */
JSONArray bolts = config.getBolts();
configureBolts(builder, bolts);
/* Drain Configuration */
JSONArray drains = config.getDrains();
configureDrains(builder, drains);
/* Configure more Storm options */
Config conf = setTopologyStormConfig(config.getProperties());
if(config.getProperty("name") != null){
StormSubmitter.submitTopology((String)config.getProperty("name"), conf, builder.createTopology());
} else {
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(1000000); // Alive for 100 seconds = 100000 ms
cluster.killTopology("test");
cluster.shutdown();
}
}