当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerConfig类代码示例

本文整理汇总了Java中kafka.consumer.ConsumerConfig的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConfig类的具体用法?Java ConsumerConfig怎么用?Java ConsumerConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConsumerConfig类属于kafka.consumer包,在下文中一共展示了ConsumerConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: OldApiTopicConsumer

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
/**
 * 
 * @param connector
 * @param topics
 * @param processThreads 
 */
@SuppressWarnings("unchecked")
public OldApiTopicConsumer(ConsumerContext context) {

    this.consumerContext = context;
    try {
        Class<?> deserializerClass = Class
            .forName(context.getProperties().getProperty("value.deserializer"));
        deserializer = (Deserializer<Object>) deserializerClass.newInstance();
    } catch (Exception e) {
    }
    this.connector = kafka.consumer.Consumer
        .createJavaConsumerConnector(new ConsumerConfig(context.getProperties()));

    int poolSize = consumerContext.getMessageHandlers().size();
    this.fetchExecutor = new StandardThreadExecutor(poolSize, poolSize, 0, TimeUnit.SECONDS,
        poolSize, new StandardThreadFactory("KafkaFetcher"));

    this.defaultProcessExecutor = new StandardThreadExecutor(1, context.getMaxProcessThreads(),
        30, TimeUnit.SECONDS, context.getMaxProcessThreads(),
        new StandardThreadFactory("KafkaProcessor"), new PoolFullRunsPolicy());

    logger.info(
        "Kafka Conumer ThreadPool initialized,fetchPool Size:{},defalutProcessPool Size:{} ",
        poolSize, context.getMaxProcessThreads());
}
 
开发者ID:warlock-china,项目名称:azeroth,代码行数:32,代码来源:OldApiTopicConsumer.java

示例2: KafkaDataProvider

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
  super(MessageAndMetadata.class);
  Properties props = new Properties();
  props.put("zookeeper.connect", zookeeper);
  props.put("group.id", groupId);
  props.put("zookeeper.session.timeout.ms", "30000");
  props.put("auto.commit.interval.ms", "1000");
  props.put("fetch.message.max.bytes", "4194304");
  consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
  Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);

  iter = stream.iterator();
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:17,代码来源:KafkaDataProvider.java

示例3: createConsumerConfig

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
private static ConsumerConfig createConsumerConfig(String zookeeper, String groupId) {

/**
 * this method used to set kafka-consumer configuration
 * 
 * Args :
 * 	m_zookeeper: zookeeper address with port
 * 	m_groupId  : kafka-consumer consumer group
 * 
 * Return :
 * 	an object of ConnsumerConfig 
 * 
 */

      Properties props = new Properties();
      props.put("zookeeper.connect", zookeeper);
      props.put("group.id", groupId);
      props.put("zookeeper.session.timeout.ms", "400");
      props.put("zookeeper.sync.time.ms", "200");
      props.put("auto.commit.interval.ms", "1000");
      return new ConsumerConfig(props);
  }
 
开发者ID:zhai3516,项目名称:storm-demos,代码行数:23,代码来源:KafkaDataSpout.java

示例4: createConsumerConfig

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
private ConsumerConfig createConsumerConfig(String groupId,
		String consumerId) {
	final Properties props = new Properties();
	props.put("zookeeper.connect", fZooKeeper);
	props.put("group.id", groupId);
	props.put("consumer.id", consumerId);
	//props.put("auto.commit.enable", "false");
	// additional settings: start with our defaults, then pull in configured
	// overrides
	props.putAll(KafkaInternalDefaults);
	for (String key : KafkaConsumerKeys) {
		transferSettingIfProvided(props, key, "kafka");
	}

	return new ConsumerConfig(props);
}
 
开发者ID:att,项目名称:dmaap-framework,代码行数:17,代码来源:DMaaPKafkaConsumerFactory.java

示例5: open

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
    _collector = spoutOutputCollector;
    Properties props = new Properties();
    props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
    props.put("group.id", groupId);
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
    List<KafkaStream<String, String>> streams = consumerMap.get(topic);
    KafkaStream<String, String> stream = null;
    if (streams.size() == 1) {
        stream = streams.get(0);
    } else {
        log.error("Streams should be of size 1");
    }
    kafkaIterator = stream.iterator();
}
 
开发者ID:geomesa,项目名称:geomesa-tutorials,代码行数:22,代码来源:OSMKafkaSpout.java

示例6: readTopicToList

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
/**
 * Read topic to list, only using Kafka code.
 */
private static List<MessageAndMetadata<byte[], byte[]>> readTopicToList(String topicName, ConsumerConfig config, final int stopAfter) {
	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
	// we request only one stream per consumer instance. Kafka will make sure that each consumer group
	// will see each message only once.
	Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
	Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
	if (streams.size() != 1) {
		throw new RuntimeException("Expected only one message stream but got "+streams.size());
	}
	List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
	if (kafkaStreams == null) {
		throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
	}
	if (kafkaStreams.size() != 1) {
		throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
	}
	LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
	ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();

	List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
	int read = 0;
	while(iteratorToRead.hasNext()) {
		read++;
		result.add(iteratorToRead.next());
		if (read == stopAfter) {
			LOG.info("Read "+read+" elements");
			return result;
		}
	}
	return result;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:35,代码来源:KafkaConsumerTestBase.java

示例7: prepare

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void prepare() {
	Properties props = geneConsumerProp();
	
	for(String topicName : topic.keySet()){
		ConsumerConnector consumer = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(props));
		
		consumerConnMap.put(topicName, consumer);
	}
	if(distributed!=null){
		try {
			logger.warn("zkDistributed is start...");
			zkDistributed = ZkDistributed.getSingleZkDistributed(distributed);
			zkDistributed.zkRegistration();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			logger.error("zkRegistration fail:{}",ExceptionUtil.getErrorMessage(e));
		}
	}
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:22,代码来源:KafkaDistributed.java

示例8: reconnConsumer

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public void reconnConsumer(String topicName){
		
		//停止topic 对应的conn
		ConsumerConnector consumerConn = consumerConnMap.get(topicName);
		consumerConn.commitOffsets(true);
		consumerConn.shutdown();
		consumerConnMap.remove(topicName);
		
		//停止topic 对应的stream消耗线程
		ExecutorService es = executorMap.get(topicName);
		es.shutdownNow();
		executorMap.remove(topicName);

		Properties prop = geneConsumerProp();
		ConsumerConnector newConsumerConn = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(prop));
		consumerConnMap.put(topicName, newConsumerConn);

		addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:KafkaDistributed.java

示例9: reconnConsumer

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public void reconnConsumer(String topicName){
	
	//停止topic 对应的conn
	ConsumerConnector consumerConn = consumerConnMap.get(topicName);
	consumerConn.commitOffsets(true);
	consumerConn.shutdown();
	consumerConnMap.remove(topicName);
	
	//停止topic 对应的stream消耗线程
	ExecutorService es = executorMap.get(topicName);
	es.shutdownNow();	
	executorMap.remove(topicName);
	
	Properties prop = geneConsumerProp();
	ConsumerConnector newConsumerConn = kafka.consumer.Consumer
			.createJavaConsumerConnector(new ConsumerConfig(prop));
	consumerConnMap.put(topicName, newConsumerConn);
	
	addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:Kafka.java

示例10: KafkaConsumerConnector

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public KafkaConsumerConnector(String zk, String groupName) {
    //Get group id which should be unique for table so as to keep offsets clean for multiple runs.
    String groupId = "voltdb-" + groupName;
    //TODO: Should get this from properties file or something as override?
    Properties props = new Properties();
    props.put("zookeeper.connect", zk);
    props.put("group.id", groupId);
    props.put("zookeeper.session.timeout.ms", "400");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.commit.enable", "true");
    props.put("auto.offset.reset", "smallest");
    props.put("rebalance.backoff.ms", "10000");

    m_consumerConfig = new ConsumerConfig(props);

    m_consumer = kafka.consumer.Consumer.createJavaConsumerConnector(m_consumerConfig);
}
 
开发者ID:anhnv-3991,项目名称:VoltDB,代码行数:19,代码来源:KafkaLoader.java

示例11: startConsumers

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
@Override
public CompletionService<Histogram> startConsumers() {
    final ConsumerConfig consumerConfig = new ConsumerConfig(props);

    consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

    // Create message streams
    final Map<String, Integer> topicMap = new HashMap<>();
    topicMap.put(topic, numThreads);

    final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicMap);
    final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

    // Pass each stream to a consumer that will read from the stream in its own thread.
    for (final KafkaStream<byte[], byte[]> stream : streams) {
        executorCompletionService.submit(new BlockingKafkaMessageConsumer(stream));
    }

    return executorCompletionService;
}
 
开发者ID:eHarmony,项目名称:benchmarkio,代码行数:21,代码来源:BlockingKafkaMessageConsumerCoordinator.java

示例12: initialize

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public void initialize()
    throws StreamingException
{
    ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
    consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

    Map<String, Integer> topicCountMap = Maps.newHashMap();
    topicCountMap.put(topic, TOPIC_COUNT);

    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
        consumerConnector.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
    consumerIterator = stream.iterator();
}
 
开发者ID:HuaweiBigData,项目名称:StreamCQL,代码行数:19,代码来源:KafkaSourceOp.java

示例13: main

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    if (id == null) throw new IllegalStateException("Undefined HC_ID");
    if (zk == null) throw new IllegalStateException("Undefined HC_ZK");

    out.println("Starting " + HttpClient.class.getSimpleName());
    out.println("Using zk:" + zk + ", id:" + id);

    Properties props = new Properties();
    props.put("zookeeper.connect", zk);
    props.put("group.id", id);
    props.put("zookeeper.session.timeout.ms", "400");
    props.put("zookeeper.sync.time.ms", "200");

    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
    KafkaStream<byte[],byte[]> stream = consumer.createMessageStreams(Collections.singletonMap(id, 1)).get(id).get(0);

    consume(consumer, stream);
}
 
开发者ID:stealthly,项目名称:punxsutawney,代码行数:19,代码来源:HttpClient.java

示例14: createDefaultConsumerConfig

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
/**
 * Creates default consumer config.
 *
 * @param zooKeeper ZooKeeper address &lt;server:port&gt;.
 * @param grpId Group Id for kafka subscriber.
 * @return Kafka consumer configuration.
 */
private ConsumerConfig createDefaultConsumerConfig(String zooKeeper, String grpId) {
    A.notNull(zooKeeper, "zookeeper");
    A.notNull(grpId, "groupId");

    Properties props = new Properties();

    props.put("zookeeper.connect", zooKeeper);
    props.put("group.id", grpId);
    props.put("zookeeper.session.timeout.ms", "400");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "smallest");

    return new ConsumerConfig(props);
}
 
开发者ID:apache,项目名称:ignite,代码行数:23,代码来源:KafkaIgniteStreamerSelfTest.java

示例15: createKafkaStream

import kafka.consumer.ConsumerConfig; //导入依赖的package包/类
public List<KafkaStream<byte[], byte[]>> createKafkaStream(
    String zookeeperConnectString,
    String topic,
    int partitions
) {
  //create consumer
  Properties consumerProps = new Properties();
  consumerProps.put("zookeeper.connect", zookeeperConnectString);
  consumerProps.put("group.id", "testClient");
  consumerProps.put("zookeeper.session.timeout.ms", "6000");
  consumerProps.put("zookeeper.sync.time.ms", "200");
  consumerProps.put("auto.commit.interval.ms", "1000");
  consumerProps.put("consumer.timeout.ms", "500");
  ConsumerConfig consumerConfig = new ConsumerConfig(consumerProps);
  ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
  Map<String, Integer> topicCountMap = new HashMap<>();
  topicCountMap.put(topic, partitions);
  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  return consumerMap.get(topic);
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:21,代码来源:SdcKafkaTestUtil.java


注:本文中的kafka.consumer.ConsumerConfig类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。