當前位置: 首頁>>代碼示例>>Java>>正文


Java ConsumerRecords.partitions方法代碼示例

本文整理匯總了Java中org.apache.kafka.clients.consumer.ConsumerRecords.partitions方法的典型用法代碼示例。如果您正苦於以下問題:Java ConsumerRecords.partitions方法的具體用法?Java ConsumerRecords.partitions怎麽用?Java ConsumerRecords.partitions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.kafka.clients.consumer.ConsumerRecords的用法示例。


在下文中一共展示了ConsumerRecords.partitions方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    configs.put(ConsumerConstants.BOOTSTRAP_SERVERS, ConsumerConstants.BROKER_CLUSTER_LIST);
    configs.put(ConsumerConstants.ZK_CONNECT, ConsumerConstants.ZK_CLUSTER_LIST);
    configs.put(ConsumerConstants.GROUP_ID, ConsumerConstants.GROUPID_TEST);
    LatestConsumer consumer = LatestConsumer.getInstance(ConsumerConstants.TOPIC_TEST, configs);
    consumer.subscribe();
    consumer.poll();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll();
        records.partitions();
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.partition(), record
                    .value());
        }
    }
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:18,代碼來源:LatestConsumer.java

示例2: onConsume

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
@Override
@SuppressWarnings("deprecation")
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {

    // This will ensure that we get the cluster metadata when onConsume is called for the first time
    // as subsequent compareAndSet operations will fail.
    CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());

    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> lst = new ArrayList<>();
        for (ConsumerRecord<String, String> record: records.records(tp)) {
            lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(),
                                         record.timestamp(), record.timestampType(),
                                         record.checksum(), record.serializedKeySize(),
                                         record.serializedValueSize(),
                                         record.key(), record.value().toUpperCase(Locale.ROOT)));
        }
        recordMap.put(tp, lst);
    }
    return new ConsumerRecords<String, String>(recordMap);
}
 
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:23,代碼來源:MockConsumerInterceptor.java

示例3: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一個或多個broker,不用指定全部的broker,它將自動發現集群中的其餘的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自動確認offset
    configs.put("enable.auto.commit", "false");
    // 自動確認offset的時間間隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消費者訂閱的topic, 可同時訂閱多個
    consumer.subscribe(Arrays.asList("kafka-test"));

    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步確認某個分區的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:35,代碼來源:ComsumerDemo3.java

示例4: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一個或多個broker,不用指定全部的broker,它將自動發現集群中的其餘的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("zookeeper.connect", "192.168.0.107:2182,192.168.0.108:2182,192.168.0.109:2182");
    configs.put("group.id", "test");
    configs.put("auto.offset.reset", "earliest"); //必須要加要讀舊數據
    // 是否自動確認offset
    configs.put("enable.auto.commit", "true");
    // 自動確認offset的時間間隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消費者訂閱的topic, 可同時訂閱多個
    consumer.subscribe(Arrays.asList("kafka-test"));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        records.partitions();
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.partition(), record
                    .value());
        }
    }
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:29,代碼來源:ComsumerDemo.java

示例5: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一個或多個broker,不用指定全部的broker,它將自動發現集群中的其餘的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自動確認offset
    configs.put("enable.auto.commit", "false");
    // 自動確認offset的時間間隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步確認某個分區的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:33,代碼來源:ComsumerDemo4.java

示例6: setWaitingToEmit

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
public void setWaitingToEmit(ConsumerRecords<K,V> consumerRecords) {
    List<ConsumerRecord<K,V>> waitingToEmitList = new LinkedList<>();
    for (TopicPartition tp : consumerRecords.partitions()) {
        waitingToEmitList.addAll(consumerRecords.records(tp));
    }
    waitingToEmit = waitingToEmitList.iterator();
}
 
開發者ID:Paleozoic,項目名稱:storm_spring_boot_demo,代碼行數:8,代碼來源:KafkaSpout.java

示例7: addRecordsToTasks

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
/**
 * Take records and add them to each respective task
 * @param records Records, can be null
 */
private void addRecordsToTasks(final ConsumerRecords<byte[], byte[]> records) {
    if (records != null && !records.isEmpty()) {
        int numAddedRecords = 0;

        for (final TopicPartition partition : records.partitions()) {
            final StreamTask task = activeTasksByPartition.get(partition);
            numAddedRecords += task.addRecords(partition, records.records(partition));
        }
        streamsMetrics.skippedRecordsSensor.record(records.count() - numAddedRecords, timerStartedMs);
    }
}
 
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:16,代碼來源:StreamThread.java

示例8: onConsume

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
    onConsumeCount++;
    if (throwExceptionOnConsume)
        throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume.");

    // filters out topic/partitions with partition == FILTER_PARTITION
    Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        if (tp.partition() != filterPartition)
            recordMap.put(tp, records.records(tp));
    }
    return new ConsumerRecords<K, V>(recordMap);
}
 
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:15,代碼來源:ConsumerInterceptorsTest.java

示例9: onRecordsReceived

import org.apache.kafka.clients.consumer.ConsumerRecords; //導入方法依賴的package包/類
private Map<TopicPartition, OffsetAndMetadata> onRecordsReceived(ConsumerRecords<String, String> records) {
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();

    List<RecordSetSummary> summaries = new ArrayList<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);

        if (hasMessageLimit() && consumedMessages + partitionRecords.size() > maxMessages)
            partitionRecords = partitionRecords.subList(0, maxMessages - consumedMessages);

        if (partitionRecords.isEmpty())
            continue;

        long minOffset = partitionRecords.get(0).offset();
        long maxOffset = partitionRecords.get(partitionRecords.size() - 1).offset();

        offsets.put(tp, new OffsetAndMetadata(maxOffset + 1));
        summaries.add(new RecordSetSummary(tp.topic(), tp.partition(),
                partitionRecords.size(), minOffset, maxOffset));

        if (verbose) {
            for (ConsumerRecord<String, String> record : partitionRecords)
                printJson(new RecordData(record));
        }

        consumedMessages += partitionRecords.size();
        if (isFinished())
            break;
    }

    printJson(new RecordsConsumed(records.count(), summaries));
    return offsets;
}
 
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:34,代碼來源:VerifiableConsumer.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecords.partitions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。