本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.seekToBeginning方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.seekToBeginning方法的具体用法?Java KafkaConsumer.seekToBeginning怎么用?Java KafkaConsumer.seekToBeginning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.seekToBeginning方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resetToLastCommittedPositions
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private static void resetToLastCommittedPositions(KafkaConsumer<String, String> consumer) {
for (TopicPartition topicPartition : consumer.assignment()) {
OffsetAndMetadata offsetAndMetadata = consumer.committed(topicPartition);
if (offsetAndMetadata != null)
consumer.seek(topicPartition, offsetAndMetadata.offset());
else
consumer.seekToBeginning(singleton(topicPartition));
}
}
示例2: consumeAllRecordsFromTopic
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* This will consume all records from only the partitions given.
* @param topic Topic to consume from.
* @param partitionIds Collection of PartitionIds to consume.
* @return List of ConsumerRecords consumed.
*/
public List<ConsumerRecord<byte[], byte[]>> consumeAllRecordsFromTopic(final String topic, Collection<Integer> partitionIds) {
// Create topic Partitions
List<TopicPartition> topicPartitions = new ArrayList<>();
for (Integer partitionId: partitionIds) {
topicPartitions.add(new TopicPartition(topic, partitionId));
}
// Connect Consumer
KafkaConsumer<byte[], byte[]> kafkaConsumer =
kafkaTestServer.getKafkaConsumer(ByteArrayDeserializer.class, ByteArrayDeserializer.class);
// Assign topic partitions & seek to head of them
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToBeginning(topicPartitions);
// Pull records from kafka, keep polling until we get nothing back
final List<ConsumerRecord<byte[], byte[]>> allRecords = new ArrayList<>();
ConsumerRecords<byte[], byte[]> records;
do {
// Grab records from kafka
records = kafkaConsumer.poll(2000L);
logger.info("Found {} records in kafka", records.count());
// Add to our array list
records.forEach(allRecords::add);
}
while (!records.isEmpty());
// close consumer
kafkaConsumer.close();
// return all records
return allRecords;
}
示例3: maybeSeekToBeginning
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void maybeSeekToBeginning(final KafkaConsumer<byte[], byte[]> client,
final Set<TopicPartition> inputTopicPartitions) {
final List<String> inputTopics = options.valuesOf(inputTopicsOption);
final String groupId = options.valueOf(applicationIdOption);
if (inputTopicPartitions.size() > 0) {
if (!dryRun) {
client.seekToBeginning(inputTopicPartitions);
} else {
System.out.println("Following input topics offsets will be reset to beginning (for consumer group " + groupId + ")");
for (final String topic : inputTopics) {
if (allTopics.contains(topic)) {
System.out.println("Topic: " + topic);
}
}
}
}
}
示例4: KafkaSource
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public KafkaSource () throws IOException, PropertyException {
Properties configs = ConfUtils.getProps(CONFIG_PROPERTIES);
statTopic = configs.getProperty(Constants.STATISTIC_TOPIC);
if (statTopic == null) {
throw new PropertyException("配置参数文件内容不能为空! " + Constants.STATISTIC_TOPIC);
}
statTopicPartition = new TopicPartition(statTopic, 0);
Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
statProps.setProperty("enable.auto.commit", "false");
List<TopicPartition> topics = Arrays.asList(statTopicPartition);
consumer = new KafkaConsumer(statProps);
consumer.assign(topics);
long beforeOffset = consumer.position(statTopicPartition);
String offset = configs.getProperty("kafka.offset");
if (offset.equalsIgnoreCase("none")) {
; // do nothing
} else if (offset.equalsIgnoreCase("begin")) {
consumer.seekToBeginning(Lists.newArrayList(statTopicPartition));
} else if (offset.equalsIgnoreCase("end")) {
consumer.seekToEnd(Lists.newArrayList(statTopicPartition));
} else {
long nOffset = Long.parseLong(offset);
consumer.seek(statTopicPartition, nOffset);
}
long afferOffset = consumer.position(statTopicPartition);
LOG.info(String.format("init kafkaSoure OK. beforeOffset %d, afferOffset=%d", beforeOffset, afferOffset));
}
示例5: testProducerAndConsumer
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* Test that KafkaServer works as expected!
*
* This also serves as a decent example of how to use the producer and consumer.
*/
@Test
public void testProducerAndConsumer() throws Exception {
final int partitionId = 0;
// Define our message
final String expectedKey = "my-key";
final String expectedValue = "my test message";
// Define the record we want to produce
ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, partitionId, expectedKey, expectedValue);
// Create a new producer
KafkaProducer<String, String> producer = getKafkaTestServer().getKafkaProducer(StringSerializer.class, StringSerializer.class);
// Produce it & wait for it to complete.
Future<RecordMetadata> future = producer.send(producerRecord);
producer.flush();
while (!future.isDone()) {
Thread.sleep(500L);
}
logger.info("Produce completed");
// Close producer!
producer.close();
KafkaConsumer<String, String> kafkaConsumer =
getKafkaTestServer().getKafkaConsumer(StringDeserializer.class, StringDeserializer.class);
final List<TopicPartition> topicPartitionList = Lists.newArrayList();
for (final PartitionInfo partitionInfo: kafkaConsumer.partitionsFor(topicName)) {
topicPartitionList.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
kafkaConsumer.assign(topicPartitionList);
kafkaConsumer.seekToBeginning(topicPartitionList);
// Pull records from kafka, keep polling until we get nothing back
ConsumerRecords<String, String> records;
do {
records = kafkaConsumer.poll(2000L);
logger.info("Found {} records in kafka", records.count());
for (ConsumerRecord<String, String> record: records) {
// Validate
assertEquals("Key matches expected", expectedKey, record.key());
assertEquals("value matches expected", expectedValue, record.value());
}
}
while (!records.isEmpty());
// close consumer
kafkaConsumer.close();
}
示例6: consume
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public void consume(String topic) throws Exception {
if (maybeSetupPhase(topic, "simple-benchmark-consumer-load", true)) {
return;
}
Properties props = setProduceConsumeProperties("simple-benchmark-consumer");
KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(props);
List<TopicPartition> partitions = getAllPartitions(consumer, topic);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
Integer key = null;
long startTime = System.currentTimeMillis();
while (true) {
ConsumerRecords<Integer, byte[]> records = consumer.poll(POLL_MS);
if (records.isEmpty()) {
if (processedRecords == numRecords)
break;
} else {
for (ConsumerRecord<Integer, byte[]> record : records) {
processedRecords++;
processedBytes += record.value().length + Integer.SIZE;
Integer recKey = record.key();
if (key == null || key < recKey)
key = recKey;
if (processedRecords == numRecords)
break;
}
}
if (processedRecords == numRecords)
break;
}
long endTime = System.currentTimeMillis();
consumer.close();
printResults("Consumer Performance [records/latency/rec-sec/MB-sec read]: ", endTime - startTime);
}