当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerConnector类代码示例

本文整理汇总了Java中kafka.javaapi.consumer.ConsumerConnector的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConnector类的具体用法?Java ConsumerConnector怎么用?Java ConsumerConnector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConsumerConnector类属于kafka.javaapi.consumer包,在下文中一共展示了ConsumerConnector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:37,代码来源:KafkaOffsetGetter.java

示例2: newConsumerConnector

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
@Override
public ConsumerConnector newConsumerConnector(String name, ConsumerConfig configOverrides) {

    Properties mergedProps = new Properties();

    Map<String, String> config = configs.get(name);

    if (config != null) {
        mergedProps.putAll(config);
    }

    if (configOverrides != null) {
        mergedProps.putAll(configOverrides.createConsumerConfig());
    }

    return Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(mergedProps));
}
 
开发者ID:bootique,项目名称:bootique-kafka-client,代码行数:18,代码来源:DefaultConsumerFactory.java

示例3: release

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
@Override
public void release() {
	try {
		for(ConsumerConnector consumer : consumerConnMap.values()){
			consumer.commitOffsets(true);
			consumer.shutdown();
		}
		for(ExecutorService executor : executorMap.values()){
			executor.shutdownNow();
		}

		if(scheduleExecutor != null){
			scheduleExecutor.shutdownNow();
		}

		this.zkDistributed.realse();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		logger.error(ExceptionUtil.getErrorMessage(e));
	}
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:22,代码来源:KafkaDistributed.java

示例4: KafkaConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
/**
 * KafkaConsumer() is constructor. It has following 4 parameters:-
 * @param topic
 * @param group
 * @param id
 * @param cc
 * 
 */

public KafkaConsumer(String topic, String group, String id, ConsumerConnector cc) {
	fTopic = topic;
	fGroup = group;
	fId = id;
	fConnector = cc;

	fCreateTimeMs = System.currentTimeMillis();
	fLastTouch = fCreateTimeMs;

	fLogTag = fGroup + "(" + fId + ")/" + fTopic;
	offset = 0;

	state = KafkaConsumer.State.OPENED;

	final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(fTopic, 1);
	final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = fConnector
			.createMessageStreams(topicCountMap);
	final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(fTopic);
	fStream = streams.iterator().next();
}
 
开发者ID:att,项目名称:dmaap-framework,代码行数:31,代码来源:KafkaConsumer.java

示例5: close

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
@Override
public synchronized void close() throws IOException {
    logger.debug("Stop kafka fetcher. [topic: {}]", topics);
    ConsumerConnector connector = this.connector;
    this.connector = null;
    if (connector != null) {
        connector.commitOffsets();
        connector.shutdown();
    }

    IOUtil.closeQuietly(eventItr);
    // Some events could exists in the buffer, try to save them.
    List<byte[]> remaining = new ArrayList<>();
    try {
        while (eventItr.hasNext()) {
            remaining.add(eventItr.next());
        }
    } catch (Exception e) {
        // Ignore
    }
    eventItr = null;
    if (!remaining.isEmpty()) {
        this.remaining = remaining;
    }
}
 
开发者ID:shunfei,项目名称:indexr,代码行数:26,代码来源:Kafka08Fetcher.java

示例6: open

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
    _collector = spoutOutputCollector;
    Properties props = new Properties();
    props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
    props.put("group.id", groupId);
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
    List<KafkaStream<String, String>> streams = consumerMap.get(topic);
    KafkaStream<String, String> stream = null;
    if (streams.size() == 1) {
        stream = streams.get(0);
    } else {
        log.error("Streams should be of size 1");
    }
    kafkaIterator = stream.iterator();
}
 
开发者ID:geomesa,项目名称:geomesa-tutorials,代码行数:22,代码来源:OSMKafkaSpout.java

示例7: readTopicToList

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
/**
 * Read topic to list, only using Kafka code.
 */
private static List<MessageAndMetadata<byte[], byte[]>> readTopicToList(String topicName, ConsumerConfig config, final int stopAfter) {
	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
	// we request only one stream per consumer instance. Kafka will make sure that each consumer group
	// will see each message only once.
	Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
	Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
	if (streams.size() != 1) {
		throw new RuntimeException("Expected only one message stream but got "+streams.size());
	}
	List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
	if (kafkaStreams == null) {
		throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
	}
	if (kafkaStreams.size() != 1) {
		throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
	}
	LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
	ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();

	List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
	int read = 0;
	while(iteratorToRead.hasNext()) {
		read++;
		result.add(iteratorToRead.next());
		if (read == stopAfter) {
			LOG.info("Read "+read+" elements");
			return result;
		}
	}
	return result;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:35,代码来源:KafkaConsumerTestBase.java

示例8: prepare

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void prepare() {
	Properties props = geneConsumerProp();
	
	for(String topicName : topic.keySet()){
		ConsumerConnector consumer = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(props));
		
		consumerConnMap.put(topicName, consumer);
	}
	if(distributed!=null){
		try {
			logger.warn("zkDistributed is start...");
			zkDistributed = ZkDistributed.getSingleZkDistributed(distributed);
			zkDistributed.zkRegistration();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			logger.error("zkRegistration fail:{}",ExceptionUtil.getErrorMessage(e));
		}
	}
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:22,代码来源:KafkaDistributed.java

示例9: addNewConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public void addNewConsumer(String topic, Integer threads){
	ConsumerConnector consumer = consumerConnMap.get(topic);
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = null;
	
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, threads);
	consumerMap = consumer.createMessageStreams(topicCountMap);
	
	List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
	ExecutorService executor = Executors.newFixedThreadPool(threads);

	for (final KafkaStream<byte[], byte[]> stream : streams) {
		executor.submit(new Consumer(stream, this));
	}
	
	executorMap.put(topic, executor);
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:18,代码来源:KafkaDistributed.java

示例10: reconnConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public void reconnConsumer(String topicName){
		
		//停止topic 对应的conn
		ConsumerConnector consumerConn = consumerConnMap.get(topicName);
		consumerConn.commitOffsets(true);
		consumerConn.shutdown();
		consumerConnMap.remove(topicName);
		
		//停止topic 对应的stream消耗线程
		ExecutorService es = executorMap.get(topicName);
		es.shutdownNow();
		executorMap.remove(topicName);

		Properties prop = geneConsumerProp();
		ConsumerConnector newConsumerConn = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(prop));
		consumerConnMap.put(topicName, newConsumerConn);

		addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:KafkaDistributed.java

示例11: reconnConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public void reconnConsumer(String topicName){
	
	//停止topic 对应的conn
	ConsumerConnector consumerConn = consumerConnMap.get(topicName);
	consumerConn.commitOffsets(true);
	consumerConn.shutdown();
	consumerConnMap.remove(topicName);
	
	//停止topic 对应的stream消耗线程
	ExecutorService es = executorMap.get(topicName);
	es.shutdownNow();	
	executorMap.remove(topicName);
	
	Properties prop = geneConsumerProp();
	ConsumerConnector newConsumerConn = kafka.consumer.Consumer
			.createJavaConsumerConnector(new ConsumerConfig(prop));
	consumerConnMap.put(topicName, newConsumerConn);
	
	addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:Kafka.java

示例12: create

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
@Override
public void create()
{
  super.create();
  if (standardConsumer == null) {
    standardConsumer = new HashMap<String, ConsumerConnector>();
  }

  // This is important to let kafka know how to distribute the reads among
  // different consumers in same consumer group
  // Don't reuse any id for recovery to avoid rebalancing error because
  // there is some delay for zookeeper to
  // find out the old consumer is dead and delete the entry even new
  // consumer is back online
  consumerConfig.put("consumer.id", "consumer" + System.currentTimeMillis());
  if (initialOffset.equalsIgnoreCase("earliest")) {
    consumerConfig.put("auto.offset.reset", "smallest");
  } else {
    consumerConfig.put("auto.offset.reset", "largest");
  }

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:23,代码来源:HighlevelKafkaConsumer.java

示例13: consume

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
/**消费消息  [指定Topic]
 * 
 * @param topicName 队列名称
 * @param groupId Group Name
 * @return
 */
static MsgIterator consume(String topicName, String groupId) {
	ConsumerConnector consumerConnector = KafkaHelper.getConsumer(groupId);
	
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();	//(topic, #stream) pair
	topicCountMap.put(topicName, new Integer(1));

	//TODO: 可消费多个topic
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);	//Using default decoder
	List<KafkaStream<byte[], byte[]>> streamList = consumerMap.get(topicName);	//The number of items in the list is #streams, Each Stream supoorts an iterator over message/metadata pair
	KafkaStream<byte[], byte[]> stream = streamList.get(0);
	
	//KafkaStream[K,V] K代表partitio Key的类型,V代表Message Value的类型
	ConsumerIterator<byte[], byte[]> it = stream.iterator();
	MsgIterator iter = new MsgIterator(it);
	return iter;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:23,代码来源:KafkaHelper.java

示例14: getConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public static ConsumerConnector getConsumer(String groupId) {
	//加上线程名字的考虑是:保证每个线程只有一个Consumer,但是每个线程又可以有一个独立的Consumer,从而消费不同的partition
	String consumerKey = groupId + "|" + Thread.currentThread().getName();
	ConsumerConnector msgConnector = groupConsumers.get(consumerKey);
	if (msgConnector == null) {
		try {
			consumerLock.lock();
			msgConnector = groupConsumers.get(consumerKey);
			if (msgConnector == null) {
				msgConnector = Consumer.createJavaConsumerConnector(getConsumerRealConfig(groupId));
				groupConsumers.put(consumerKey, msgConnector);
			}
		} finally {
			consumerLock.unlock();
		}
	}

	return msgConnector;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:20,代码来源:KafkaHelper.java

示例15: main

import kafka.javaapi.consumer.ConsumerConnector; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    if (id == null) throw new IllegalStateException("Undefined HC_ID");
    if (zk == null) throw new IllegalStateException("Undefined HC_ZK");

    out.println("Starting " + HttpClient.class.getSimpleName());
    out.println("Using zk:" + zk + ", id:" + id);

    Properties props = new Properties();
    props.put("zookeeper.connect", zk);
    props.put("group.id", id);
    props.put("zookeeper.session.timeout.ms", "400");
    props.put("zookeeper.sync.time.ms", "200");

    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
    KafkaStream<byte[],byte[]> stream = consumer.createMessageStreams(Collections.singletonMap(id, 1)).get(id).get(0);

    consume(consumer, stream);
}
 
开发者ID:stealthly,项目名称:punxsutawney,代码行数:19,代码来源:HttpClient.java


注:本文中的kafka.javaapi.consumer.ConsumerConnector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。