本文整理匯總了Java中backtype.storm.Config類的典型用法代碼示例。如果您正苦於以下問題:Java Config類的具體用法?Java Config怎麽用?Java Config使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Config類屬於backtype.storm包,在下文中一共展示了Config類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildAndSubmit
import backtype.storm.Config; //導入依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
//configureRouteBolt(builder);
configurePhoenixTest(builder);
/*
builder.setBolt("submitter", new SubmitBolt())
.shuffleGrouping(ROUTE_BOLT);
*/
try {
StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例2: createProcessBuilder
import backtype.storm.Config; //導入依賴的package包/類
/**
* Create a process builder to launch the log viewer
* @param logDirectory
* @return
*/
protected ProcessBuilder createProcessBuilder() {
ProcessBuilder pb = new ProcessBuilder(
Paths.get(System.getProperty("user.dir"), "/bin/storm").toString(),
"logviewer",
"-c",
"storm.log.dir=" + System.getenv("MESOS_SANDBOX") + "/logs",
"-c",
Config.LOGVIEWER_PORT + "=" + port
);
// If anything goes wrong at startup we want to see it.
Path logPath = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs");
if (!logPath.toFile().exists() && !logPath.toFile().mkdirs()) {
throw new RuntimeException("Couldn't create log directory");
}
File log = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs/logviewer-startup.log").toFile();
pb.redirectErrorStream(true);
pb.redirectOutput(Redirect.appendTo(log));
return pb;
}
示例3: commit
import backtype.storm.Config; //導入依賴的package包/類
public void commit() {
long lastCompletedOffset = lastCompletedOffset();
if (_committedTo != lastCompletedOffset) {
LOG.debug("Writing last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
.put("topology", ImmutableMap.of("id", _topologyInstanceId,
"name", _stormConf.get(Config.TOPOLOGY_NAME)))
.put("offset", lastCompletedOffset)
.put("partition", _partition.partition)
.put("broker", ImmutableMap.of("host", _partition.host.host,
"port", _partition.host.port))
.put("topic", _spoutConfig.topic).build();
_state.writeJSON(committedPath(), data);
_committedTo = lastCompletedOffset;
LOG.debug("Wrote last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
} else {
LOG.debug("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
}
}
示例4: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setMaxSpoutPending(5);
if (args.length == 1) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
Thread.sleep(60 * 1000);
cluster.killTopology("wordCounter");
cluster.shutdown();
System.exit(0);
}
else if(args.length == 2) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
} else{
System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
}
}
示例5: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) throws SQLException {
// tableName is the name of the table in splice to insert records to
// server is the server instance running splice
String tableName = "students";
String server = "localhost";
TopologyBuilder builder = new TopologyBuilder();
// set the spout for the topology
builder.setSpout("seedDataFromMySql", new MySqlSpout());
// dump the stream data into splice
builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");
Config conf = new Config();
conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
Utils.sleep(3000);
cluster.shutdown();
}
示例6: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Config conf = new Config();
int spout_Parallelism_hint = 1;
int split_Parallelism_hint = 2;
int count_Parallelism_hint = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
String topologyName = RaceConfig.JstormTopologyName;
try {
StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
//begin by Young
//end by Young
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
示例7: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
config.setDebug(true);
StormTopology topology = buildTopology();
// Un-comment to run locally:
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("local-moving-avg", config, topology);
// Un-comment to run as part of a Storm cluster:
// try {
// StormSubmitter.submitTopology("cluster-moving-average",
// config,
// topology);
// } catch(AlreadyAliveException e) {
// e.printStackTrace();
// } catch(InvalidTopologyException e) {
// e.printStackTrace();
//}
}
示例8: buildAndSubmit
import backtype.storm.Config; //導入依賴的package包/類
public void buildAndSubmit() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
Config config = new Config();
config.setDebug(true);
// String nimbusHost = topologyConfig.getProperty("nimbus.host");
config.put(Config.NIMBUS_HOST, "localhost");
configureKafkaSpout(builder);
configureRouteBolt(builder);
configureInsertBolt(builder);
//builder.setBolt("submitter", new SubmitBolt())
// .shuffleGrouping(ROUTE_BOLT);
try {
StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
} catch (Exception e) {
LOG.error("Error submiting Topology", e);
}
}
示例9: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例10: main
import backtype.storm.Config; //導入依賴的package包/類
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, args[0]); //with realm e.g. [email protected]
conf.put(STORM_USER_NAME_KEY, args[1]); //with realm e.g. [email protected]
conf.put(STORM_KEYTAB_FILE_KEY, args[2]);// /etc/security/keytabs/storm.keytab
Configuration configuration = new Configuration();
AutoHDFS autoHDFS = new AutoHDFS();
autoHDFS.prepare(conf);
Map<String,String> creds = new HashMap<String, String>();
autoHDFS.populateCredentials(creds, conf);
LOG.info("Got HDFS credentials", autoHDFS.getCredentials(creds));
Subject s = new Subject();
autoHDFS.populateSubject(s, creds);
LOG.info("Got a Subject "+ s);
autoHDFS.renew(creds, conf);
LOG.info("renewed credentials", autoHDFS.getCredentials(creds));
}
示例11: getMaxParallelism
import backtype.storm.Config; //導入依賴的package包/類
private static Integer getMaxParallelism(Set<Group> groups) {
Integer ret = null;
for(Group g: groups) {
if(isSpoutGroup(g)) {
SpoutNode n = (SpoutNode) g.nodes.iterator().next();
Map conf = getSpoutComponentConfig(n.spout);
if(conf==null) conf = new HashMap();
Number maxP = (Number) conf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM);
if(maxP!=null) {
if(ret==null) ret = maxP.intValue();
else ret = Math.min(ret, maxP.intValue());
}
}
}
return ret;
}
示例12: SetRemoteTopology
import backtype.storm.Config; //導入依賴的package包/類
public void SetRemoteTopology() throws AlreadyAliveException,
InvalidTopologyException, TopologyAssignException {
Config conf = getConf();
StormTopology topology = buildTopology();
conf.put(Config.STORM_CLUSTER_MODE, "distributed");
String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
if (streamName == null) {
streamName = "SequenceTest";
}
if (streamName.contains("zeromq")) {
conf.put(Config.STORM_MESSAGING_TRANSPORT,
"com.alibaba.jstorm.message.zeroMq.MQContext");
} else {
conf.put(Config.STORM_MESSAGING_TRANSPORT,
"com.alibaba.jstorm.message.netty.NettyContext");
}
StormSubmitter.submitTopology(streamName, conf,topology);
}
示例13: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) {
String consumerKey = args[0];
String consumerSecret = args[1];
String accessToken = args[2];
String accessTokenSecret = args[3];
String[] arguments = args.clone();
String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
accessToken, accessTokenSecret, keyWords));
builder.setBolt("print", new PrinterBolt())
.shuffleGrouping("twitter");
Config conf = new Config();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.shutdown();
}
示例14: main
import backtype.storm.Config; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
LinearDRPCTopologyBuilder builder = construct();
Config conf = new Config();
if (args == null || args.length == 0) {
conf.setMaxTaskParallelism(3);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
for (String url : urlsToTry) {
System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
}
cluster.shutdown();
drpc.shutdown();
}
else {
conf.setNumWorkers(6);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
}
}
示例15: initThrift
import backtype.storm.Config; //導入依賴的package包/類
private void initThrift(Map conf) throws TTransportException {
Integer thrift_port = JStormUtils.parseInt(conf
.get(Config.NIMBUS_THRIFT_PORT));
TNonblockingServerSocket socket = new TNonblockingServerSocket(
thrift_port);
Integer maxReadBufSize = JStormUtils.parseInt(conf
.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));
THsHaServer.Args args = new THsHaServer.Args(socket);
args.workerThreads(ServiceHandler.THREAD_NUM);
args.protocolFactory(new TBinaryProtocol.Factory(false, true,
maxReadBufSize));
args.processor(new Nimbus.Processor<Iface>(serviceHandler));
args.maxReadBufferBytes = maxReadBufSize;
thriftServer = new THsHaServer(args);
LOG.info("Successfully started nimbus: started Thrift server...");
thriftServer.serve();
}