本文整理匯總了Java中org.apache.kafka.clients.consumer.KafkaConsumer.seekToEnd方法的典型用法代碼示例。如果您正苦於以下問題:Java KafkaConsumer.seekToEnd方法的具體用法?Java KafkaConsumer.seekToEnd怎麽用?Java KafkaConsumer.seekToEnd使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.seekToEnd方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) throws IOException, InterruptedException{
Properties properties = PropertiesUtils.getProps("consumer.properties");
properties.setProperty("client.id","whtestconsumer");
properties.setProperty("group.id","whtestconsumer");
properties.setProperty("bootstrap.servers", "localhost:9092");
//properties.setProperty("auto.offset.reset", "earliest");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
String topic = "uav-test.monitor.result";
TopicPartition topicPartition = new TopicPartition(topic, 0);
List<TopicPartition> topics = Arrays.asList(topicPartition);
consumer.assign(topics);
consumer.seekToEnd(topics);
long current = consumer.position(topicPartition);
consumer.seek(topicPartition, current-1000);
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
Thread.sleep(1);
}
}
示例2: readKafkaTopic
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
try {
Properties properties = PropertiesUtils.getProps("consumer.properties");
properties.setProperty("client.id","readKafkaTopic");
properties.setProperty("group.id","readKafkaTopic");
//properties.setProperty("bootstrap.servers", "localhost:9092");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
String topic = map.get("topic").toString();
//System.out.println("topic="+topic);
TopicPartition topicPartition = new TopicPartition(topic, 0);
List<TopicPartition> topics = Arrays.asList(topicPartition);
consumer.assign(topics);
consumer.seekToEnd(topics);
long current = consumer.position(topicPartition);
long end = current;
current -= 1000;
if(current < 0) current = 0;
consumer.seek(topicPartition, current);
List<String> result = new ArrayList<>();
while (current < end) {
//System.out.println("topic position = "+current);
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
result.add(record.value());
//System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
current = consumer.position(topicPartition);
}
consumer.close();
return Response.ok().entity(result).build();
} catch (Exception e) {
logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
return Response.status(204).entity(new Result(-1, e.getMessage())).build();
}
}
示例3: maybeSeekToEnd
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
private void maybeSeekToEnd(final KafkaConsumer<byte[], byte[]> client, final Set<TopicPartition> intermediateTopicPartitions) {
final String groupId = options.valueOf(applicationIdOption);
final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
if (intermediateTopicPartitions.size() > 0) {
if (!dryRun) {
client.seekToEnd(intermediateTopicPartitions);
} else {
System.out.println("Following intermediate topics offsets will be reset to end (for consumer group " + groupId + ")");
for (final String topic : intermediateTopics) {
if (allTopics.contains(topic)) {
System.out.println("Topic: " + topic);
}
}
}
}
}
示例4: KafkaSource
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public KafkaSource () throws IOException, PropertyException {
Properties configs = ConfUtils.getProps(CONFIG_PROPERTIES);
statTopic = configs.getProperty(Constants.STATISTIC_TOPIC);
if (statTopic == null) {
throw new PropertyException("配置參數文件內容不能為空! " + Constants.STATISTIC_TOPIC);
}
statTopicPartition = new TopicPartition(statTopic, 0);
Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
statProps.setProperty("enable.auto.commit", "false");
List<TopicPartition> topics = Arrays.asList(statTopicPartition);
consumer = new KafkaConsumer(statProps);
consumer.assign(topics);
long beforeOffset = consumer.position(statTopicPartition);
String offset = configs.getProperty("kafka.offset");
if (offset.equalsIgnoreCase("none")) {
; // do nothing
} else if (offset.equalsIgnoreCase("begin")) {
consumer.seekToBeginning(Lists.newArrayList(statTopicPartition));
} else if (offset.equalsIgnoreCase("end")) {
consumer.seekToEnd(Lists.newArrayList(statTopicPartition));
} else {
long nOffset = Long.parseLong(offset);
consumer.seek(statTopicPartition, nOffset);
}
long afferOffset = consumer.position(statTopicPartition);
LOG.info(String.format("init kafkaSoure OK. beforeOffset %d, afferOffset=%d", beforeOffset, afferOffset));
}
示例5: run
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String group = "kafka-insight-logOffsetListener";
int sleepTime = 60000;
KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;
while (true) {
try {
if (null == kafkaConsumer) {
kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
}
Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
for (PartitionInfo partitionInfo : partitionInfoList) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToEnd(topicPartitions);
Long logEndOffset = kafkaConsumer.position(topicPartition);
logEndOffsetMap.put(topicPartition, logEndOffset);
}
}
Thread.sleep(sleepTime);
} catch (Exception e) {
e.printStackTrace();
if (null != kafkaConsumer) {
kafkaConsumer.close();
kafkaConsumer = null;
}
}
}
}
示例6: getLogSize
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer, String topic,
int partition) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
List<TopicPartition> asList = Arrays.asList(topicPartition);
kafkaConsumer.assign(asList);
kafkaConsumer.seekToEnd(asList);
long logEndOffset = kafkaConsumer.position(topicPartition);
return logEndOffset;
}