本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.position方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.position方法的具体用法?Java KafkaConsumer.position怎么用?Java KafkaConsumer.position使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.position方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, InterruptedException{
Properties properties = PropertiesUtils.getProps("consumer.properties");
properties.setProperty("client.id","whtestconsumer");
properties.setProperty("group.id","whtestconsumer");
properties.setProperty("bootstrap.servers", "localhost:9092");
//properties.setProperty("auto.offset.reset", "earliest");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
String topic = "uav-test.monitor.result";
TopicPartition topicPartition = new TopicPartition(topic, 0);
List<TopicPartition> topics = Arrays.asList(topicPartition);
consumer.assign(topics);
consumer.seekToEnd(topics);
long current = consumer.position(topicPartition);
consumer.seek(topicPartition, current-1000);
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
Thread.sleep(1);
}
}
示例2: readKafkaTopic
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
try {
Properties properties = PropertiesUtils.getProps("consumer.properties");
properties.setProperty("client.id","readKafkaTopic");
properties.setProperty("group.id","readKafkaTopic");
//properties.setProperty("bootstrap.servers", "localhost:9092");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
String topic = map.get("topic").toString();
//System.out.println("topic="+topic);
TopicPartition topicPartition = new TopicPartition(topic, 0);
List<TopicPartition> topics = Arrays.asList(topicPartition);
consumer.assign(topics);
consumer.seekToEnd(topics);
long current = consumer.position(topicPartition);
long end = current;
current -= 1000;
if(current < 0) current = 0;
consumer.seek(topicPartition, current);
List<String> result = new ArrayList<>();
while (current < end) {
//System.out.println("topic position = "+current);
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
result.add(record.value());
//System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
current = consumer.position(topicPartition);
}
consumer.close();
return Response.ok().entity(result).build();
} catch (Exception e) {
logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
return Response.status(204).entity(new Result(-1, e.getMessage())).build();
}
}
示例3: messagesRemaining
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private static long messagesRemaining(KafkaConsumer<String, String> consumer, TopicPartition partition) {
long currentPosition = consumer.position(partition);
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(singleton(partition));
if (endOffsets.containsKey(partition)) {
return endOffsets.get(partition) - currentPosition;
}
return 0;
}
示例4: KafkaSource
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public KafkaSource () throws IOException, PropertyException {
Properties configs = ConfUtils.getProps(CONFIG_PROPERTIES);
statTopic = configs.getProperty(Constants.STATISTIC_TOPIC);
if (statTopic == null) {
throw new PropertyException("配置参数文件内容不能为空! " + Constants.STATISTIC_TOPIC);
}
statTopicPartition = new TopicPartition(statTopic, 0);
Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
statProps.setProperty("enable.auto.commit", "false");
List<TopicPartition> topics = Arrays.asList(statTopicPartition);
consumer = new KafkaConsumer(statProps);
consumer.assign(topics);
long beforeOffset = consumer.position(statTopicPartition);
String offset = configs.getProperty("kafka.offset");
if (offset.equalsIgnoreCase("none")) {
; // do nothing
} else if (offset.equalsIgnoreCase("begin")) {
consumer.seekToBeginning(Lists.newArrayList(statTopicPartition));
} else if (offset.equalsIgnoreCase("end")) {
consumer.seekToEnd(Lists.newArrayList(statTopicPartition));
} else {
long nOffset = Long.parseLong(offset);
consumer.seek(statTopicPartition, nOffset);
}
long afferOffset = consumer.position(statTopicPartition);
LOG.info(String.format("init kafkaSoure OK. beforeOffset %d, afferOffset=%d", beforeOffset, afferOffset));
}
示例5: run
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String group = "kafka-insight-logOffsetListener";
int sleepTime = 60000;
KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;
while (true) {
try {
if (null == kafkaConsumer) {
kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
}
Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
for (PartitionInfo partitionInfo : partitionInfoList) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToEnd(topicPartitions);
Long logEndOffset = kafkaConsumer.position(topicPartition);
logEndOffsetMap.put(topicPartition, logEndOffset);
}
}
Thread.sleep(sleepTime);
} catch (Exception e) {
e.printStackTrace();
if (null != kafkaConsumer) {
kafkaConsumer.close();
kafkaConsumer = null;
}
}
}
}
示例6: getLogSize
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer, String topic,
int partition) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
List<TopicPartition> asList = Arrays.asList(topicPartition);
kafkaConsumer.assign(asList);
kafkaConsumer.seekToEnd(asList);
long logEndOffset = kafkaConsumer.position(topicPartition);
return logEndOffset;
}