当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecords.partitions方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecords.partitions方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecords.partitions方法的具体用法?Java ConsumerRecords.partitions怎么用?Java ConsumerRecords.partitions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecords的用法示例。


在下文中一共展示了ConsumerRecords.partitions方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    configs.put(ConsumerConstants.BOOTSTRAP_SERVERS, ConsumerConstants.BROKER_CLUSTER_LIST);
    configs.put(ConsumerConstants.ZK_CONNECT, ConsumerConstants.ZK_CLUSTER_LIST);
    configs.put(ConsumerConstants.GROUP_ID, ConsumerConstants.GROUPID_TEST);
    LatestConsumer consumer = LatestConsumer.getInstance(ConsumerConstants.TOPIC_TEST, configs);
    consumer.subscribe();
    consumer.poll();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll();
        records.partitions();
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.partition(), record
                    .value());
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:18,代码来源:LatestConsumer.java

示例2: onConsume

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Override
@SuppressWarnings("deprecation")
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {

    // This will ensure that we get the cluster metadata when onConsume is called for the first time
    // as subsequent compareAndSet operations will fail.
    CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());

    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> lst = new ArrayList<>();
        for (ConsumerRecord<String, String> record: records.records(tp)) {
            lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(),
                                         record.timestamp(), record.timestampType(),
                                         record.checksum(), record.serializedKeySize(),
                                         record.serializedValueSize(),
                                         record.key(), record.value().toUpperCase(Locale.ROOT)));
        }
        recordMap.put(tp, lst);
    }
    return new ConsumerRecords<String, String>(recordMap);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:MockConsumerInterceptor.java

示例3: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消费者订阅的topic, 可同时订阅多个
    consumer.subscribe(Arrays.asList("kafka-test"));

    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:35,代码来源:ComsumerDemo3.java

示例4: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("zookeeper.connect", "192.168.0.107:2182,192.168.0.108:2182,192.168.0.109:2182");
    configs.put("group.id", "test");
    configs.put("auto.offset.reset", "earliest"); //必须要加要读旧数据
    // 是否自动确认offset
    configs.put("enable.auto.commit", "true");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消费者订阅的topic, 可同时订阅多个
    consumer.subscribe(Arrays.asList("kafka-test"));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        records.partitions();
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.partition(), record
                    .value());
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:29,代码来源:ComsumerDemo.java

示例5: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:ComsumerDemo4.java

示例6: setWaitingToEmit

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public void setWaitingToEmit(ConsumerRecords<K,V> consumerRecords) {
    List<ConsumerRecord<K,V>> waitingToEmitList = new LinkedList<>();
    for (TopicPartition tp : consumerRecords.partitions()) {
        waitingToEmitList.addAll(consumerRecords.records(tp));
    }
    waitingToEmit = waitingToEmitList.iterator();
}
 
开发者ID:Paleozoic,项目名称:storm_spring_boot_demo,代码行数:8,代码来源:KafkaSpout.java

示例7: addRecordsToTasks

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
/**
 * Take records and add them to each respective task
 * @param records Records, can be null
 */
private void addRecordsToTasks(final ConsumerRecords<byte[], byte[]> records) {
    if (records != null && !records.isEmpty()) {
        int numAddedRecords = 0;

        for (final TopicPartition partition : records.partitions()) {
            final StreamTask task = activeTasksByPartition.get(partition);
            numAddedRecords += task.addRecords(partition, records.records(partition));
        }
        streamsMetrics.skippedRecordsSensor.record(records.count() - numAddedRecords, timerStartedMs);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:16,代码来源:StreamThread.java

示例8: onConsume

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
    onConsumeCount++;
    if (throwExceptionOnConsume)
        throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume.");

    // filters out topic/partitions with partition == FILTER_PARTITION
    Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        if (tp.partition() != filterPartition)
            recordMap.put(tp, records.records(tp));
    }
    return new ConsumerRecords<K, V>(recordMap);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:15,代码来源:ConsumerInterceptorsTest.java

示例9: onRecordsReceived

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private Map<TopicPartition, OffsetAndMetadata> onRecordsReceived(ConsumerRecords<String, String> records) {
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();

    List<RecordSetSummary> summaries = new ArrayList<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);

        if (hasMessageLimit() && consumedMessages + partitionRecords.size() > maxMessages)
            partitionRecords = partitionRecords.subList(0, maxMessages - consumedMessages);

        if (partitionRecords.isEmpty())
            continue;

        long minOffset = partitionRecords.get(0).offset();
        long maxOffset = partitionRecords.get(partitionRecords.size() - 1).offset();

        offsets.put(tp, new OffsetAndMetadata(maxOffset + 1));
        summaries.add(new RecordSetSummary(tp.topic(), tp.partition(),
                partitionRecords.size(), minOffset, maxOffset));

        if (verbose) {
            for (ConsumerRecord<String, String> record : partitionRecords)
                printJson(new RecordData(record));
        }

        consumedMessages += partitionRecords.size();
        if (isFinished())
            break;
    }

    printJson(new RecordsConsumed(records.count(), summaries));
    return offsets;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:VerifiableConsumer.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecords.partitions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。