本文整理汇总了Java中org.apache.storm.kafka.ZkHosts类的典型用法代码示例。如果您正苦于以下问题:Java ZkHosts类的具体用法?Java ZkHosts怎么用?Java ZkHosts使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ZkHosts类属于org.apache.storm.kafka包,在下文中一共展示了ZkHosts类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildTopology
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts("localhost:2181"), "test");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(config);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout).each(new Fields("str"),
new Split(), new Fields("word"))
.window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new Consumer() {
@Override
public void accept(TridentTuple tuple) {
System.out.print("[");
for (int index = 0; index < tuple.size(); index++) {
System.out.print(tuple.get(index));
if (index < (tuple.size() - 1))
System.out.print(",");
}
System.out.println("]");
}
});
return topology.build();
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:26,代码来源:TridentWindowingInmemoryStoreTopology.java
示例2: main
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String args[]) {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts(
"localhost:2181"), "storm-trident-diy");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(
config);
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).filter(new ExcludePhoneNumber())
.each(new Fields("str"), new DeviceInfoExtractor(), new Fields("phone", "bytes"))
.groupBy(new Fields("phone"))
.aggregate(new Fields("bytes", "phone"), new Sum(), new Fields("sum"))
.applyAssembly(new FirstN(10, "sum"))
.each(new Fields("phone", "sum"), new Debug());
Config config1 = new Config();
config1.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-diy", config1, topology.build());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:21,代码来源:TridentDIY.java
示例3: main
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String args[]) throws InterruptedException {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "sensor-data";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("es-bolt", new ElasticSearchBolt(), 1).shuffleGrouping("spout");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-es-example", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:20,代码来源:SensorTopology.java
示例4: main
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "storm-diy";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("parser", new ParseAndUsageBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("usageCassandra", new UsageCassandraBolt("localhost", "usage"), 1).shuffleGrouping("parser", "usagestream");
topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-diy", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:22,代码来源:TelecomProcessorTopology.java
示例5: constructKafkaSpoutConf
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
/*
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
*/
String topic = "addresses";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例6: createKafkaSpout
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Creates Kafka spout.
*
* @param topic Kafka topic
* @return {@link KafkaSpout}
*/
protected org.apache.storm.kafka.KafkaSpout createKafkaSpout(String topic, String spoutId) {
String zkRoot = String.format("/%s/%s", getTopologyName(), topic);
ZkHosts hosts = new ZkHosts(config.getZookeeperHosts());
SpoutConfig cfg = new SpoutConfig(hosts, topic, zkRoot, spoutId);
cfg.startOffsetTime = OffsetRequest.EarliestTime();
cfg.scheme = new SchemeAsMultiScheme(new StringScheme());
cfg.bufferSizeBytes = 1024 * 1024 * 4;
cfg.fetchSizeBytes = 1024 * 1024 * 4;
return new org.apache.storm.kafka.KafkaSpout(cfg);
}
示例7: extractSpoutConfig
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Extract spout config from Topology configuration.
* @param props The topology configuration.
* @return An SpoutConfig object
*/
private static SpoutConfig extractSpoutConfig(Properties props, String topic)
throws ClassNotFoundException, IllegalAccessException, InstantiationException {
// Zookeeper hosts
ZkHosts zkHosts = new ZkHosts(props.getProperty("zookeeper.connect"));
// Client ID
String clientId = props.getProperty("consumer.id");
clientId = clientId.length() > 0 ? clientId : UUID.randomUUID().toString();
// Spout config
SpoutConfig conf = new SpoutConfig(zkHosts, topic, "/kafka-mirror/" + topic, clientId);
// Get scheme classname from properties or use StringScheme by default
String schemeClassname = (String) props.getOrDefault("spout.scheme",
"org.apache.storm.kafka.StringScheme");
Class<?> clazz = Class.forName(schemeClassname);
conf.scheme = new SchemeAsMultiScheme((Scheme) clazz.newInstance());
return conf;
}
示例8: runPirkTopology
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void runPirkTopology() throws PIRException
{
// Set up Kafka parameters
logger.info("Configuring Kafka.");
String zkRoot = "/" + kafkaTopic + "_pirk_storm";
BrokerHosts zkHosts = new ZkHosts(brokerZk);
SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, kafkaTopic, zkRoot, kafkaClientId);
kafkaConfig.ignoreZkOffsets = forceFromStart;
// Create conf
logger.info("Retrieving Query and generating Storm conf.");
Config conf = createStormConf();
Query query = StormUtils.getQuery(useHdfs, hdfsUri, queryFile);
conf.put(StormConstants.N_SQUARED_KEY, query.getNSquared().toString());
conf.put(StormConstants.QUERY_INFO_KEY, query.getQueryInfo().toMap());
// Configure this for different types of input data on Kafka.
kafkaConfig.scheme = new SchemeAsMultiScheme(new PirkHashScheme(conf));
// Create topology
StormTopology topology = getPirkTopology(kafkaConfig);
// Run topology
logger.info("Submitting Pirk topology to Storm...");
try
{
StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, topology);
} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e)
{
throw new PIRException(e);
}
}
示例9: setUpTestKafkaSpout
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
private SpoutConfig setUpTestKafkaSpout(Config conf)
{
ZkHosts zkHost = new ZkHosts(zookeeperLocalCluster.getConnectString());
SpoutConfig kafkaConfig = new SpoutConfig(zkHost, topic, "/pirk_test_root", "pirk_integr_test_spout");
kafkaConfig.scheme = new SchemeAsMultiScheme(new PirkHashScheme(conf));
logger.info("KafkaConfig initialized...");
return kafkaConfig;
}
示例10: main
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args)
throws InterruptedException, InvalidTopologyException, AuthorizationException, AlreadyAliveException {
String topologyName = "TSAS";// 元组名
// Zookeeper主机地址,会自动选取其中一个
ZkHosts zkHosts = new ZkHosts("192.168.230.128:2181,192.168.230.129:2181,192.168.230.131:2181");
String topic = "trademx";
String zkRoot = "/storm";// storm在Zookeeper上的根路径
String id = "tsaPro";
// 创建SpoutConfig对象
SpoutConfig spontConfig = new SpoutConfig(zkHosts, topic, zkRoot, id);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("kafka", new KafkaSpout(spontConfig), 2);
builder.setBolt("AccBolt", new AccBolt()).shuffleGrouping("kafka");
builder.setBolt("ToDbBolt", new ToDbBolt()).shuffleGrouping("AccBolt");
Config config = new Config();
config.setDebug(false);
if (args.length == 0) { // 本地运行,用于测试
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topologyName, config, builder.createTopology());
Thread.sleep(1000 * 3600);
localCluster.killTopology(topologyName);
localCluster.shutdown();
} else { // 提交至集群运行
StormSubmitter.submitTopology(topologyName, config, builder.createTopology());
}
}
示例11: constructKafkaSpoutConf
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
String topic = "properties";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例12: getSpoutConfig
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static SpoutConfig getSpoutConfig(Map options, MultiScheme scheme) throws IllegalArgumentException {
String zkServers = (String) Utils.get(options, ZOOKEEPER_SERVERS, "localhost:2181");
String kafkaRoot = (String) Utils.get(options, KAFKA_ROOT_PATH, "/kafka");
String connectString = zkServers + kafkaRoot;
BrokerHosts hosts = new ZkHosts(connectString);
String topic = (String) Utils.get(options, TOPIC, DEFAULT_TOPIC);
String appId = (String) Utils.get(options, CLIENT_ID, "storm-app");
SpoutConfig config = new SpoutConfig(hosts, topic, kafkaRoot, appId);
//config.forceFromStart = true;
config.zkServers = new ArrayList<String>();
String [] servers = zkServers.split(",");
for (int i = 0; i < servers.length; i++) {
String[] serverAndPort = servers[0].split(":");
config.zkServers.add(serverAndPort[0]);
int port = Integer.parseInt(serverAndPort[1]);
if (i == 0) {
config.zkPort = port;
}
if (config.zkPort != port) {
throw new IllegalArgumentException("The zookeeper port on all server must be same");
}
}
config.scheme = scheme;
return config;
}
示例13: getTridentKafkaConfig
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static TridentKafkaConfig getTridentKafkaConfig(Map options, MultiScheme scheme) {
String zkServers = (String) Utils.get(options, ZOOKEEPER_SERVERS, "localhost:2181") ;
String kafkaRoot = (String) Utils.get(options, KAFKA_ROOT_PATH, "/kafka");
String connectString = zkServers + kafkaRoot;
BrokerHosts hosts = new ZkHosts(connectString);
String topic = (String) Utils.get(options, TOPIC, DEFAULT_TOPIC);
String appId = (String) Utils.get(options, CLIENT_ID, "storm-app");
TridentKafkaConfig config = new TridentKafkaConfig(hosts, topic, appId);
config.scheme = scheme;
return config;
}
示例14: createKafkaSpout
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
/**
* Creates a transactional kafka spout that consumes any new data published to "test" topic.
* <p/>
* For more info on transactional spouts
* see "Transactional spouts" section in
* <a href="https://storm.apache.org/documentation/Trident-state"> Trident state</a> doc.
*
* @return a transactional trident kafka spout.
*/
private TransactionalTridentKafkaSpout createKafkaSpout() {
ZkHosts hosts = new ZkHosts(zkUrl);
TridentKafkaConfig config = new TridentKafkaConfig(hosts, "test");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
// Consume new data from the topic
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
return new TransactionalTridentKafkaSpout(config);
}
示例15: main
import org.apache.storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
if(args.length <1){
System.out.println("Please mention deployment mode either local or cluster");
System.exit(1);
}
String deploymentMode = args[0];
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "vehicle-data";
String hcHostName = "localhost";
String hcPort = "5701";
String esClusterName = "cluster.name";
String esApplicationName = "my-application";
String esHostName = "localhost";
int esPort = 9300;
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("parser", new ParseBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("checkAndAlert", new CheckDistanceAndAlertBolt(hcHostName, hcPort), 1).shuffleGrouping("parser","parsedstream");
topologyBuilder.setBolt("saveTDR", new ElasticSearchBolt("vehicle-tdr", "tdr",esClusterName, esApplicationName,esHostName, esPort),1).shuffleGrouping("parser","parsedstream");
topologyBuilder.setBolt("generateAlert", new GenerateAlertBolt(hcHostName, hcPort), 1).shuffleGrouping("checkAndAlert", "alertInfo");
topologyBuilder.setBolt("saveAlert", new ElasticSearchBolt("vehicle-alert", "alert",esClusterName, esApplicationName,esHostName, esPort), 1).shuffleGrouping("generateAlert", "generatedAlertInfo");
LocalCluster cluster = new LocalCluster();
if (deploymentMode.equalsIgnoreCase("local")) {
System.out.println("Submitting topology on local");
cluster.submitTopology(topicName, config, topologyBuilder.createTopology());
} else {
try {
System.out.println("Submitting topology on cluster");
StormSubmitter.submitTopology(topicName, config, topologyBuilder.createTopology());
} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) {
e.printStackTrace();
}
}
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:48,代码来源:GeoFencingProcessorTopology.java