本文整理匯總了Java中backtype.storm.generated.InvalidTopologyException類的典型用法代碼示例。如果您正苦於以下問題:Java InvalidTopologyException類的具體用法?Java InvalidTopologyException怎麽用?Java InvalidTopologyException使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
InvalidTopologyException類屬於backtype.storm.generated包,在下文中一共展示了InvalidTopologyException類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例2: setupZkTaskInfo
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
/**
* generate TaskInfo for every bolt or spout in ZK /ZK/tasks/topoologyId/xxx
*
* @param conf
* @param topologyId
* @param stormClusterState
* @throws Exception
*/
public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId,
StormClusterState stormClusterState) throws Exception {
// mkdir /ZK/taskbeats/topoologyId
stormClusterState.setup_heartbeats(topologyId);
Map<Integer, TaskInfo> taskToComponetId = mkTaskComponentAssignments(
conf, topologyId);
if (taskToComponetId == null || taskToComponetId.size() == 0) {
throw new InvalidTopologyException("Failed to generate TaskIDs map");
}
for (Entry<Integer, TaskInfo> entry : taskToComponetId.entrySet()) {
// key is taskid, value is taskinfo
stormClusterState.set_task(topologyId, entry.getKey(), entry.getValue());
}
}
示例3: mkTaskComponentAssignments
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
/**
* generate a taskid(Integer) for every task
*
* @param conf
* @param topologyid
* @return Map<Integer, String>: from taskid to componentid
* @throws IOException
* @throws InvalidTopologyException
*/
public Map<Integer, TaskInfo> mkTaskComponentAssignments(
Map<Object, Object> conf, String topologyid) throws IOException,
InvalidTopologyException {
// @@@ here exist a little problem,
// we can directly pass stormConf from Submit method
Map<Object, Object> stormConf = StormConfig.read_nimbus_topology_conf(
conf, topologyid);
StormTopology stopology = StormConfig.read_nimbus_topology_code(conf,
topologyid);
// use TreeMap to make task as sequence
Map<Integer, TaskInfo> rtn = new TreeMap<Integer, TaskInfo>();
StormTopology topology = Common.system_topology(stormConf, stopology);
Integer count = 0;
count = mkTaskMaker(stormConf, topology.get_bolts(), rtn, count);
count = mkTaskMaker(stormConf, topology.get_spouts(), rtn, count);
count = mkTaskMaker(stormConf, topology.get_state_spouts(), rtn, count);
return rtn;
}
示例4: SetRemoteTopology
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public void SetRemoteTopology() throws AlreadyAliveException,
InvalidTopologyException, TopologyAssignException {
Config conf = getConf();
StormTopology topology = buildTopology();
conf.put(Config.STORM_CLUSTER_MODE, "distributed");
String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
if (streamName == null) {
streamName = "SequenceTest";
}
if (streamName.contains("zeromq")) {
conf.put(Config.STORM_MESSAGING_TRANSPORT,
"com.alibaba.jstorm.message.zeroMq.MQContext");
} else {
conf.put(Config.STORM_MESSAGING_TRANSPORT,
"com.alibaba.jstorm.message.netty.NettyContext");
}
StormSubmitter.submitTopology(streamName, conf,topology);
}
示例5: submitTopology
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public static void submitTopology(String name, Map stormConf,
StormTopology topology, SubmitOptions opts, List<File> jarFiles)
throws AlreadyAliveException, InvalidTopologyException {
if (jarFiles == null) {
jarFiles = new ArrayList<File>();
}
Map<String, String> jars = new HashMap<String, String>(jarFiles.size());
List<String> names = new ArrayList<String>(jarFiles.size());
for (File f : jarFiles) {
if (!f.exists()) {
LOG.info(f.getName() + " is not existed: "
+ f.getAbsolutePath());
continue;
}
jars.put(f.getName(), f.getAbsolutePath());
names.add(f.getName());
}
LOG.info("Files: " + names + " will be loaded");
stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_PATH, jars);
stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_NAME, names);
submitTopology(name, stormConf, topology, opts);
}
示例6: main
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
/**
* Main method.
* @param args the arguments
*/
public static void main(String[] args) {
//create the main topology.
options = new PipelineOptions(args);
MainTopologyCreator topoCreator = new MainTopologyCreator();
TopologyOutput topo = topoCreator.createMainTopology();
//get the topology information
config = topo.getConfig();
TopologyBuilder builder = topo.getBuilder();
int defNumWorkers = topo.getNumWorkers();
options.toConf(config);
if (args != null && args.length > 0) {
config.setNumWorkers(defNumWorkers);
try {
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} catch (AlreadyAliveException | InvalidTopologyException e) {
e.printStackTrace();
}
} else {
final LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
}
}
示例7: submitTopology
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public static void submitTopology(String name, Map stormConf,
StormTopology topology, SubmitOptions opts, List<File> jarFiles)
throws AlreadyAliveException, InvalidTopologyException {
Map<String, String> jars = new HashMap<String, String>(jarFiles.size());
List<String> names = new ArrayList<String>(jarFiles.size());
if (jarFiles == null)
jarFiles = new ArrayList<File>();
for (File f : jarFiles) {
if (!f.exists()) {
LOG.info(f.getName() + " is not existed: "
+ f.getAbsolutePath());
continue;
}
jars.put(f.getName(), f.getAbsolutePath());
names.add(f.getName());
}
LOG.info("Files: " + names + " will be loaded");
stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_PATH, jars);
stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_NAME, names);
submitTopology(name, stormConf, topology, opts);
}
示例8: setupZkTaskInfo
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
/**
* generate TaskInfo for every bolt or spout in ZK /ZK/tasks/topoologyId/xxx
*
* @param conf
* @param topologyId
* @param stormClusterState
* @throws Exception
*/
public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId,
StormClusterState stormClusterState) throws Exception {
// mkdir /ZK/taskbeats/topoologyId
stormClusterState.setup_heartbeats(topologyId);
Map<Integer, TaskInfo> taskToComponetId = mkTaskComponentAssignments(
conf, topologyId);
if (taskToComponetId == null) {
throw new InvalidTopologyException("Failed to generate TaskIDs map");
}
for (Entry<Integer, TaskInfo> entry : taskToComponetId.entrySet()) {
// key is taskid, value is taskinfo
stormClusterState.set_task(topologyId, entry.getKey(), entry.getValue());
}
}
示例9: buildAndSubmit
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
/// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
}
示例10: buildAndSubmit
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// configureHBaseBolts(builder, config);
// conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
// System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
// System.setProperty("hadoop.home.dir", "/tmp");
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
}
示例11: main
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
/**
* Main method
*/
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
if (args.length < 1) {
throw new RuntimeException("Specify topology name");
}
int parallelism = 1;
if (args.length > 1) {
parallelism = Integer.parseInt(args[1]);
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new WordSpout(), parallelism);
builder.setBolt("consumer", new ConsumerBolt(), parallelism)
.fieldsGrouping("word", new Fields("word"));
Config conf = new Config();
conf.setNumWorkers(parallelism);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
示例12: submitTopology
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
@Override
@SuppressWarnings({"rawtypes", "unchecked"})
public void submitTopology(String topoName,
Map config,
StormTopology stormTopology)
throws AlreadyAliveException, InvalidTopologyException {
assertNotAlive();
this.topologyName = topoName;
this.conf = config;
this.topology = stormTopology;
simulator.submitTopology(topoName,
ConfigUtils.translateConfig(config),
stormTopology.getStormTopology());
}
示例13: main
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
} else {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(SmashBrosTwitterTopology.class.getSimpleName(), conf, buildTopology(drpc));
Utils.sleep(10 * 60 * 1000); // 10 minutes
cluster.shutdown();
}
}
示例14: main
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
Config conf = readConfig(new File(args[1]));
if (conf == null) {
throw new RuntimeException("cannot find conf file " + args[1]);
}
ResaConfig resaConfig = ResaConfig.create();
resaConfig.putAll(conf);
StormTopology topology = createTopology(conf);
if (args[0].equals("[local]")) {
resaConfig.setDebug(false);
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("local", resaConfig, topology);
} else {
resaConfig.addDrsSupport();
resaConfig.registerMetricsConsumer(RedisMetricsCollector.class);
StormSubmitter.submitTopology(args[0], resaConfig, topology);
}
}
示例15: run
import backtype.storm.generated.InvalidTopologyException; //導入依賴的package包/類
private static void run(String name)
throws ClassNotFoundException, IllegalAccessException,
InstantiationException, AlreadyAliveException, InvalidTopologyException {
LOG.info("running benchmark " + name);
IBenchmark benchmark = (IBenchmark) Runner.getApplicationFromName(PACKAGE + "." + name);
Config config = new Config();
config.putAll(Utils.readStormConfig());
config.setDebug(true);
StormTopology topology = benchmark.getTopology(config);
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(name, config, topology);
final int runtime = BenchmarkUtils.getInt(config, MetricsCollectorConfig.METRICS_TOTAL_TIME,
MetricsCollectorConfig.DEFAULT_TOTAL_TIME);
IMetricsCollector collector = benchmark.getMetricsCollector(config, topology);
collector.run();
try {
Thread.sleep(runtime);
} catch (InterruptedException e) {
LOG.error("benchmark interrupted", e);
}
localCluster.shutdown();
}