本文整理汇总了Java中kafka.javaapi.consumer.ConsumerConnector.commitOffsets方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConnector.commitOffsets方法的具体用法?Java ConsumerConnector.commitOffsets怎么用?Java ConsumerConnector.commitOffsets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.javaapi.consumer.ConsumerConnector
的用法示例。
在下文中一共展示了ConsumerConnector.commitOffsets方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: release
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
try {
for(ConsumerConnector consumer : consumerConnMap.values()){
consumer.commitOffsets(true);
consumer.shutdown();
}
for(ExecutorService executor : executorMap.values()){
executor.shutdownNow();
}
if(scheduleExecutor != null){
scheduleExecutor.shutdownNow();
}
this.zkDistributed.realse();
} catch (Exception e) {
// TODO Auto-generated catch block
logger.error(ExceptionUtil.getErrorMessage(e));
}
}
示例2: close
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public synchronized void close() throws IOException {
logger.debug("Stop kafka fetcher. [topic: {}]", topics);
ConsumerConnector connector = this.connector;
this.connector = null;
if (connector != null) {
connector.commitOffsets();
connector.shutdown();
}
IOUtil.closeQuietly(eventItr);
// Some events could exists in the buffer, try to save them.
List<byte[]> remaining = new ArrayList<>();
try {
while (eventItr.hasNext()) {
remaining.add(eventItr.next());
}
} catch (Exception e) {
// Ignore
}
eventItr = null;
if (!remaining.isEmpty()) {
this.remaining = remaining;
}
}
示例3: reconnConsumer
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
//停止topic 对应的conn
ConsumerConnector consumerConn = consumerConnMap.get(topicName);
consumerConn.commitOffsets(true);
consumerConn.shutdown();
consumerConnMap.remove(topicName);
//停止topic 对应的stream消耗线程
ExecutorService es = executorMap.get(topicName);
es.shutdownNow();
executorMap.remove(topicName);
Properties prop = geneConsumerProp();
ConsumerConnector newConsumerConn = kafka.consumer.Consumer
.createJavaConsumerConnector(new ConsumerConfig(prop));
consumerConnMap.put(topicName, newConsumerConn);
addNewConsumer(topicName, topic.get(topicName));
}
示例4: reconnConsumer
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
//停止topic 对应的conn
ConsumerConnector consumerConn = consumerConnMap.get(topicName);
consumerConn.commitOffsets(true);
consumerConn.shutdown();
consumerConnMap.remove(topicName);
//停止topic 对应的stream消耗线程
ExecutorService es = executorMap.get(topicName);
es.shutdownNow();
executorMap.remove(topicName);
Properties prop = geneConsumerProp();
ConsumerConnector newConsumerConn = kafka.consumer.Consumer
.createJavaConsumerConnector(new ConsumerConfig(prop));
consumerConnMap.put(topicName, newConsumerConn);
addNewConsumer(topicName, topic.get(topicName));
}
示例5: commit
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void commit() {
ConsumerConnector connector = this.connector;
if (connector != null) {
connector.commitOffsets();
}
}
示例6: main
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] args) {
args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" };
if (args == null || args.length != 4) {
System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}");
System.exit(1);
}
String zk = args[0];
String topic = args[1];
String groupid = args[2];
String consumerid = args[3];
Properties props = new Properties();
props.put("zookeeper.connect", zk);
props.put("group.id", groupid);
props.put("client.id", "test");
props.put("consumer.id", consumerid);
props.put("auto.offset.reset", "largest");
props.put("auto.commit.enable", "false");
props.put("auto.commit.interval.ms", "60000");
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0);
ConsumerIterator<byte[], byte[]> interator = stream1.iterator();
while (interator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next();
String message = String.format(
"Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s",
messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(),
messageAndMetadata.offset(), new String(messageAndMetadata.key()),
new String(messageAndMetadata.message()));
System.out.println(message);
consumerConnector.commitOffsets();
}
}
示例7: release
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
for(ConsumerConnector consumer : consumerConnMap.values()){
consumer.commitOffsets(true);
consumer.shutdown();
}
for(ExecutorService executor : executorMap.values()){
executor.shutdownNow();
}
scheduleExecutor.shutdownNow();
}
示例8: commitOffset
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
protected void commitOffset()
{
// commit the offsets at checkpoint so that high-level consumer don't
// have to receive too many duplicate messages
if (standardConsumer != null && standardConsumer.values() != null) {
for (ConsumerConnector consumerConnector : standardConsumer.values()) {
consumerConnector.commitOffsets();
}
}
}
示例9: commitConsumerOffsets
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
static void commitConsumerOffsets(String groupId) {
String consumerKey = groupId + "|" + Thread.currentThread().getName();
ConsumerConnector consumerConnector = groupConsumers.get(consumerKey);
if (consumerConnector != null) {
consumerConnector.commitOffsets();
LOG.debug("[EF-Msg] Commit: " + groupId);
}
}
示例10: consume
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@SuppressWarnings("InfiniteLoopStatement")
private static void consume(ConsumerConnector consumer, KafkaStream<byte[], byte[]> stream) throws Exception {
for(ConsumerIterator<byte[],byte[]> i = stream.iterator();;) {
out.println("Waiting for requests on \"" + id + "\" topic ...");
MessageAndMetadata<byte[],byte[]> message = i.next();
Requests requests = new Requests(message.message());
sendRequests(requests);
consumer.commitOffsets();
}
}
示例11: consume
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@GET
@Timed
public Response consume(
@QueryParam("topic") String topic,
@QueryParam("timeout") Integer timeout
) {
if (Strings.isNullOrEmpty(topic))
return Response.status(400)
.entity(new String[]{"Undefined topic"})
.build();
Properties props = (Properties) consumerCfg.clone();
if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);
ConsumerConfig config = new ConsumerConfig(props);
ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);
Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);
List<Message> messages = new ArrayList<>();
try {
for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
messages.add(new Message(messageAndMetadata));
} catch (ConsumerTimeoutException ignore) {
} finally {
connector.commitOffsets();
connector.shutdown();
}
return Response.ok(messages).build();
}