本文整理汇总了Java中storm.kafka.ZkHosts类的典型用法代码示例。如果您正苦于以下问题:Java ZkHosts类的具体用法?Java ZkHosts怎么用?Java ZkHosts使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ZkHosts类属于storm.kafka包,在下文中一共展示了ZkHosts类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: constructKafkaSpoutConf
import storm.kafka.ZkHosts; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
/*
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
*/
String topic = "addresses";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例2: configureKafkaSpout
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic,
String kafkaStartOffset, int parallelismHint, String spoutName,
String spoutScheme) {
LOG.info("KAFKASPOUT: Configuring the KafkaSpout");
// Configure the KafkaSpout
SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
kafkaTopic, // Kafka topic to read from
"/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
UUID.randomUUID().toString()); // ID for storing consumer offsets in Zookeeper
try {
spoutConfig.scheme = new SchemeAsMultiScheme(getSchemeFromClassName(spoutScheme));
} catch(Exception e) {
LOG.error("ERROR: Unable to create instance of scheme: " + spoutScheme);
e.printStackTrace();
}
setKafkaOffset(spoutConfig, kafkaStartOffset);
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// Add the spout and bolt to the topology
builder.setSpout(spoutName, kafkaSpout, parallelismHint);
}
示例3: configureKafkaSpout
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic, String kafkaStartOffset) {
// Configure the KafkaSpout
SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
kafkaTopic, // Kafka topic to read from
"/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
UUID.randomUUID().toString()); // ID for storing consumer offsets in Zookeeper
//spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.scheme = new SchemeAsMultiScheme(new JsonScheme());
// Allow for passing in an offset time
// startOffsetTime has a bug that ignores the special -2 value
if(kafkaStartOffset == "-2") {
spoutConfig.forceFromStart = true;
} else if (kafkaStartOffset != null) {
spoutConfig.startOffsetTime = Long.parseLong(kafkaStartOffset);
}
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// Add the spout and bolt to the topology
builder.setSpout("kafkaspout", kafkaSpout, 1);
}
示例4: initializeKafkaSpout
import storm.kafka.ZkHosts; //导入依赖的package包/类
private boolean initializeKafkaSpout(String name) {
try {
BrokerHosts zk = new ZkHosts(config.getString("kafka.zk"));
String input_topic = config.getString("spout.kafka.topic");
SpoutConfig kafkaConfig = new SpoutConfig(zk, input_topic, "",
input_topic);
kafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme());
kafkaConfig.forceFromStart = Boolean.valueOf("True");
kafkaConfig.startOffsetTime = -1;
builder.setSpout(name, new KafkaSpout(kafkaConfig),
config.getInt("spout.kafka.parallelism.hint")).setNumTasks(
config.getInt("spout.kafka.num.tasks"));
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
return true;
}
示例5: constructKafkaSpoutConf
import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Construct
* @return
*/
private SpoutConfig constructKafkaSpoutConf() {
BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
/* Custom TruckScheme that will take Kafka message of single truckEvent
* and emit a 2-tuple consisting of truckId and truckEvent. This driverId
* is required to do a fieldsSorting so that all driver events are sent to the set of bolts */
spoutConfig.scheme = new SchemeAsMultiScheme(new TruckScheme2());
return spoutConfig;
}
示例6: main
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
SpoutConfig spoutConf = new SpoutConfig(new ZkHosts("localhost:2181", "/brokers"), "test", "/kafkastorm", "KafkaSpout");
spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConf.forceFromStart = true;
builder.setSpout("KafkaSpout", new KafkaSpout(spoutConf), 3);
builder.setBolt("KafkaBolt", new PrinterBolt(), 3).shuffleGrouping("KafkaSpout");
Config conf = new Config();
// conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-test", conf, builder.createTopology());
Utils.sleep(60000);
cluster.shutdown();
}
示例7: buildTopology
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static StormTopology buildTopology(String hdfsUrl) {
TridentKafkaConfig tridentKafkaConfig = new TridentKafkaConfig(new ZkHosts(ZKHOST, "/brokers"), KAFKA_TOPIC);
tridentKafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme());
tridentKafkaConfig.startOffsetTime = -1; // forceStartOffsetTime(-1); //Read latest messages from Kafka
TransactionalTridentKafkaSpout tridentKafkaSpout = new TransactionalTridentKafkaSpout(tridentKafkaConfig);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("stream", tridentKafkaSpout);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(HDFS_OUT_PATH).withPrefix("trident").withExtension(".txt");
FileRotationPolicy rotationPolicy = new FileSizeCountRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB, 10);
HdfsState.Options seqOpts = new HdfsState.HdfsFileOptions().withFileNameFormat(fileNameFormat)
.withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|").withFields(new Fields("json")))
.withRotationPolicy(rotationPolicy).withFsUrl(hdfsUrl)
// .addRotationAction(new MoveFileAction().toDestination(HDFS_ROTATE_PATH));
// .addRotationAction(new AddSuffixFileAction().withSuffix("-processed"));
.addRotationAction(new MD5FileAction());
StateFactory factory = new HdfsStateFactory().withOptions(seqOpts);
stream.each(new Fields("bytes"), new JacksonJsonParser(), new Fields("json")).partitionPersist(factory, new Fields("json"),
new HdfsUpdater(), new Fields());
return topology.build();
}
示例8: main
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
if (args.length == 2) {
// Ready & submit the topology
String name = args[0];
BrokerHosts hosts = new ZkHosts(args[1]);
TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);
StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));
}else{
System.err.println("<topologyName> <zookeeperHost>");
}
}
示例9: main
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
if (args.length == 2) {
// Ready & submit the topology
String name = args[0];
BrokerHosts hosts = new ZkHosts(args[1]);
TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);
StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));
}else{
System.err.println("<topologyName> <zookeeperHost>");
}
}
示例10: main
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Config conf = new Config();
conf.setNumWorkers(6);
if (args.length == 2) {
// Ready & submit the topology
String name = args[0];
BrokerHosts hosts = new ZkHosts(args[1]);
TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);
StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));
}else{
System.err.println("<topologyName> <zookeeperHost>");
}
}
示例11: main
import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
Config conf = new Config();
// conf.put(Config.TOPOLOGY_DEBUG,true);
conf.put(StormElasticSearchConstants.ES_CLUSTER_NAME,"elasticsearch");
conf.put(StormElasticSearchConstants.ES_HOST,"localhost");
conf.put(StormElasticSearchConstants.ES_PORT,9300);
ZkHosts zooHosts = new ZkHosts("localhost:50003");
TopologyBuilder builder = new TopologyBuilder();
SpoutConfig spoutConfig = new SpoutConfig(zooHosts, "test", "", "STORM-ID");
//spoutConfig.scheme=new StringScheme();
// spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout spout1 = new KafkaSpout(spoutConfig);
builder.setSpout("source", spout1, 1);
builder.setBolt("echo", new EchoBolt(), 1).shuffleGrouping("source");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("basic_primitives", conf,
builder.createTopology());
}
示例12: ZkBrokerReader
import storm.kafka.ZkHosts; //导入依赖的package包/类
public ZkBrokerReader(Map conf, String topic, ZkHosts hosts) {
try {
reader = new DynamicBrokersReader(conf, hosts.brokerZkStr, hosts.brokerZkPath, topic);
cachedBrokers = reader.getBrokerInfo();
lastRefreshTimeMs = System.currentTimeMillis();
refreshMillis = hosts.refreshFreqSecs * 1000L;
} catch (java.net.SocketTimeoutException e) {
LOG.warn("Failed to update brokers", e);
}
}
示例13: TridentKafkaSpout
import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpout(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafka();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafka();
_kafkaConfig.forceFromStart = false;
}
示例14: TridentKafkaSpoutNmsp
import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpoutNmsp(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaNmsp();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaNmsp();
_kafkaConfig.forceFromStart = false;
}
示例15: TridentKafkaSpoutLocation
import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpoutLocation(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaLocation();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaLocation();
_kafkaConfig.forceFromStart = false;
}