当前位置: 首页>>代码示例>>Java>>正文


Java ZkHosts类代码示例

本文整理汇总了Java中storm.kafka.ZkHosts的典型用法代码示例。如果您正苦于以下问题:Java ZkHosts类的具体用法?Java ZkHosts怎么用?Java ZkHosts使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ZkHosts类属于storm.kafka包,在下文中一共展示了ZkHosts类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: constructKafkaSpoutConf

import storm.kafka.ZkHosts; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
    // BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
    BrokerHosts hosts = new ZkHosts("localhost:2181");
    /*
    String topic = topologyConfig.getProperty("kafka.topic");
    String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
    String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
    */
    String topic = "addresses";
    String zkRoot = "";
    String consumerGroupId = "group1";

    SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

    return spoutConfig;
}
 
开发者ID:bucaojit,项目名称:RealEstate-Streaming,代码行数:18,代码来源:PhoenixTest.java

示例2: configureKafkaSpout

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic, 
                                       String kafkaStartOffset, int parallelismHint, String spoutName,
                                       String spoutScheme) {

    LOG.info("KAFKASPOUT: Configuring the KafkaSpout");

    // Configure the KafkaSpout
    SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
            kafkaTopic,      // Kafka topic to read from
            "/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
            UUID.randomUUID().toString());  // ID for storing consumer offsets in Zookeeper
    try {
        spoutConfig.scheme = new SchemeAsMultiScheme(getSchemeFromClassName(spoutScheme));
    } catch(Exception e) {
        LOG.error("ERROR: Unable to create instance of scheme: " + spoutScheme);
        e.printStackTrace();
    }
    setKafkaOffset(spoutConfig, kafkaStartOffset);
    
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

    // Add the spout and bolt to the topology
    builder.setSpout(spoutName, kafkaSpout, parallelismHint);

}
 
开发者ID:sakserv,项目名称:storm-topology-examples,代码行数:26,代码来源:ConfigureKafkaSpout.java

示例3: configureKafkaSpout

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic, String kafkaStartOffset) {

        // Configure the KafkaSpout
        SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
                kafkaTopic,      // Kafka topic to read from
                "/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
                UUID.randomUUID().toString());  // ID for storing consumer offsets in Zookeeper
        //spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        spoutConfig.scheme = new SchemeAsMultiScheme(new JsonScheme());

        // Allow for passing in an offset time
        // startOffsetTime has a bug that ignores the special -2 value
        if(kafkaStartOffset == "-2") {
            spoutConfig.forceFromStart = true;
        } else if (kafkaStartOffset != null) {
            spoutConfig.startOffsetTime = Long.parseLong(kafkaStartOffset);
        }
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        // Add the spout and bolt to the topology
        builder.setSpout("kafkaspout", kafkaSpout, 1);

    }
 
开发者ID:sakserv,项目名称:storm-kafka-hdfs-starter,代码行数:24,代码来源:ConfigureKafkaSpout.java

示例4: initializeKafkaSpout

import storm.kafka.ZkHosts; //导入依赖的package包/类
private boolean initializeKafkaSpout(String name) {
	try {

		BrokerHosts zk = new ZkHosts(config.getString("kafka.zk"));
		String input_topic = config.getString("spout.kafka.topic");
		SpoutConfig kafkaConfig = new SpoutConfig(zk, input_topic, "",
				input_topic);
		kafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme());
		kafkaConfig.forceFromStart = Boolean.valueOf("True");
		kafkaConfig.startOffsetTime = -1;

		builder.setSpout(name, new KafkaSpout(kafkaConfig),
				config.getInt("spout.kafka.parallelism.hint")).setNumTasks(
				config.getInt("spout.kafka.num.tasks"));

	} catch (Exception e) {
		e.printStackTrace();
		System.exit(0);
	}

	return true;
}
 
开发者ID:OpenSOC,项目名称:opensoc-streaming,代码行数:23,代码来源:TopologyRunner.java

示例5: constructKafkaSpoutConf

import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
 * Construct 
 * @return
 */
private SpoutConfig constructKafkaSpoutConf() {
	BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
	String topic = topologyConfig.getProperty("kafka.topic");
	String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
	String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
	
	SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
	
	/* Custom TruckScheme that will take Kafka message of single truckEvent 
	 * and emit a 2-tuple consisting of truckId and truckEvent. This driverId
	 * is required to do a fieldsSorting so that all driver events are sent to the set of bolts */
	spoutConfig.scheme = new SchemeAsMultiScheme(new TruckScheme2());
	
	return spoutConfig;
}
 
开发者ID:patw,项目名称:storm-sample,代码行数:20,代码来源:TruckEventProcessorKafkaTopology.java

示例6: main

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();

    SpoutConfig spoutConf = new SpoutConfig(new ZkHosts("localhost:2181", "/brokers"), "test", "/kafkastorm", "KafkaSpout");
    spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConf.forceFromStart = true;

    builder.setSpout("KafkaSpout", new KafkaSpout(spoutConf), 3);
    builder.setBolt("KafkaBolt", new PrinterBolt(), 3).shuffleGrouping("KafkaSpout");

    Config conf = new Config();
    // conf.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-test", conf, builder.createTopology());

    Utils.sleep(60000);
    cluster.shutdown();
}
 
开发者ID:ogidogi,项目名称:kafka-storm-hive,代码行数:20,代码来源:KafkaStormTopology.java

示例7: buildTopology

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static StormTopology buildTopology(String hdfsUrl) {
    TridentKafkaConfig tridentKafkaConfig = new TridentKafkaConfig(new ZkHosts(ZKHOST, "/brokers"), KAFKA_TOPIC);
    tridentKafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme());
    tridentKafkaConfig.startOffsetTime = -1; // forceStartOffsetTime(-1); //Read latest messages from Kafka

    TransactionalTridentKafkaSpout tridentKafkaSpout = new TransactionalTridentKafkaSpout(tridentKafkaConfig);

    TridentTopology topology = new TridentTopology();

    Stream stream = topology.newStream("stream", tridentKafkaSpout);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(HDFS_OUT_PATH).withPrefix("trident").withExtension(".txt");
    FileRotationPolicy rotationPolicy = new FileSizeCountRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB, 10);
    HdfsState.Options seqOpts = new HdfsState.HdfsFileOptions().withFileNameFormat(fileNameFormat)
            .withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|").withFields(new Fields("json")))
            .withRotationPolicy(rotationPolicy).withFsUrl(hdfsUrl)
            // .addRotationAction(new MoveFileAction().toDestination(HDFS_ROTATE_PATH));
            // .addRotationAction(new AddSuffixFileAction().withSuffix("-processed"));
            .addRotationAction(new MD5FileAction());
    StateFactory factory = new HdfsStateFactory().withOptions(seqOpts);

    stream.each(new Fields("bytes"), new JacksonJsonParser(), new Fields("json")).partitionPersist(factory, new Fields("json"),
            new HdfsUpdater(), new Fields());

    return topology.build();
}
 
开发者ID:ogidogi,项目名称:kafka-storm-hive,代码行数:27,代码来源:HDFSSequenceTopology.java

示例8: main

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Config conf = new Config();

    if (args.length == 2) {
        // Ready & submit the topology
        String name = args[0];
        BrokerHosts hosts = new ZkHosts(args[1]);
        TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);

        StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));

    }else{
        System.err.println("<topologyName> <zookeeperHost>");
    }

}
 
开发者ID:eshioji,项目名称:trident-tutorial,代码行数:17,代码来源:TopHashtagByFollowerClass.java

示例9: main

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Config conf = new Config();


    if (args.length == 2) {
        // Ready & submit the topology
        String name = args[0];
        BrokerHosts hosts = new ZkHosts(args[1]);
        TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);

        StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));

    }else{
        System.err.println("<topologyName> <zookeeperHost>");
    }

}
 
开发者ID:eshioji,项目名称:trident-tutorial,代码行数:18,代码来源:RealTimeTextSearch.java

示例10: main

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setNumWorkers(6);

    if (args.length == 2) {
        // Ready & submit the topology
        String name = args[0];
        BrokerHosts hosts = new ZkHosts(args[1]);
        TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts);

        StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout));

    }else{
        System.err.println("<topologyName> <zookeeperHost>");
    }

}
 
开发者ID:eshioji,项目名称:trident-tutorial,代码行数:18,代码来源:TopHashtagFollowerCountGrouping.java

示例11: main

import storm.kafka.ZkHosts; //导入依赖的package包/类
public static void main(String[] args) {
	
	
	Config conf = new Config();
	// conf.put(Config.TOPOLOGY_DEBUG,true);
	conf.put(StormElasticSearchConstants.ES_CLUSTER_NAME,"elasticsearch");
	conf.put(StormElasticSearchConstants.ES_HOST,"localhost");
	conf.put(StormElasticSearchConstants.ES_PORT,9300);
	
	ZkHosts zooHosts = new ZkHosts("localhost:50003");
    TopologyBuilder builder = new TopologyBuilder();
    SpoutConfig spoutConfig = new SpoutConfig(zooHosts, "test", "", "STORM-ID");

    //spoutConfig.scheme=new StringScheme();
   // spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout spout1 =  new KafkaSpout(spoutConfig);
    builder.setSpout("source", spout1, 1);
	builder.setBolt("echo", new EchoBolt(), 1).shuffleGrouping("source");

	LocalCluster cluster = new LocalCluster();
	cluster.submitTopology("basic_primitives", conf,
			builder.createTopology());
}
 
开发者ID:Produban,项目名称:openbus,代码行数:24,代码来源:kafkaSpoutTest.java

示例12: ZkBrokerReader

import storm.kafka.ZkHosts; //导入依赖的package包/类
public ZkBrokerReader(Map conf, String topic, ZkHosts hosts) {
	try {
		reader = new DynamicBrokersReader(conf, hosts.brokerZkStr, hosts.brokerZkPath, topic);
		cachedBrokers = reader.getBrokerInfo();
		lastRefreshTimeMs = System.currentTimeMillis();
		refreshMillis = hosts.refreshFreqSecs * 1000L;
	} catch (java.net.SocketTimeoutException e) {
		LOG.warn("Failed to update brokers", e);
	}

}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:12,代码来源:ZkBrokerReader.java

示例13: TridentKafkaSpout

import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
 * Constructor
 *
 * @param config Config file to read properties from
 * @param section Section of the kafka config file to read properties from.
 */
public TridentKafkaSpout(ConfigData config, String section) {
    _kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
    _kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    _kafkaConfig.bufferSizeBytes = config.getFetchSizeKafka();
    _kafkaConfig.fetchSizeBytes = config.getFetchSizeKafka();
    _kafkaConfig.forceFromStart = false;
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:14,代码来源:TridentKafkaSpout.java

示例14: TridentKafkaSpoutNmsp

import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
 * Constructor
 *
 * @param config Config file to read properties from
 * @param section Section of the kafka config file to read properties from.
 */
public TridentKafkaSpoutNmsp(ConfigData config, String section) {
    _kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
    _kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    _kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaNmsp();
    _kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaNmsp();
    _kafkaConfig.forceFromStart = false;
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:14,代码来源:TridentKafkaSpoutNmsp.java

示例15: TridentKafkaSpoutLocation

import storm.kafka.ZkHosts; //导入依赖的package包/类
/**
 * Constructor
 *
 * @param config Config file to read properties from
 * @param section Section of the kafka config file to read properties from.
 */
public TridentKafkaSpoutLocation(ConfigData config, String section) {
    _kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
    _kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    _kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaLocation();
    _kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaLocation();
    _kafkaConfig.forceFromStart = false;
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:14,代码来源:TridentKafkaSpoutLocation.java


注:本文中的storm.kafka.ZkHosts类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。