当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecords.count方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecords.count方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecords.count方法的具体用法?Java ConsumerRecords.count怎么用?Java ConsumerRecords.count使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecords的用法示例。


在下文中一共展示了ConsumerRecords.count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Override
public void run() {
    try {
        consumer.subscribe(Collections.singletonList(Config.getProperty("input_topic")));
        while (!closed.get()) {
            ConsumerRecords<String, String> records = consumer.poll(3000);
            try {
                consumer.commitSync(); // commit
            } catch (Exception ignored) {
            }
            if (records.count() > 0) {
                handler.consume(records);
            }
        }
    } catch (WakeupException e) {
        if (!closed.get()) throw e;
    } finally {
        consumer.close();
    }
}
 
开发者ID:Zephery,项目名称:newblog,代码行数:21,代码来源:KafkaConsumerRunner.java

示例2: poll

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public List<StatMessage> poll() {
                /* 快速取,如果没有就立刻返回 */
    ConsumerRecords<String, String> records = consumer.poll(1000);
    if (records.count() == 0) {
        count++;
        if (count % 60 == 0) {
            count = 0;
            LOG.info(String.format("running on %s (offset=%d).......", statTopic,  consumer.position(statTopicPartition)));
        }
        return null;
    }

    LOG.info(String.format("KafkaSource got %d records......", records.count()));

    List<StatMessage> list = new ArrayList<>();
    for (ConsumerRecord<String, String> record : records) {
        String key = record.key();
        long offset = record.offset();

        StatMessage msg = StatMessage.parse(record.value());
        list.add(msg);
        //logger.info(String.format("KafkaSource got record key=%s, offset=%d......", key, offset));
    }

    return list;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:27,代码来源:KafkaSource.java

示例3: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.setProperty("bootstrap.servers", args[0]);
    props.setProperty("group.id", UUID.randomUUID().toString());
    props.setProperty("key.deserializer", LongDeserializer.class.getName());
    props.setProperty("value.deserializer", TradeDeserializer.class.getName());
    props.setProperty("auto.offset.reset", "earliest");
    KafkaConsumer<Long, Trade> consumer = new KafkaConsumer<>(props);
    List<String> topics = Arrays.asList(args[1]);
    consumer.subscribe(topics);
    System.out.println("Subscribed to topics " + topics);
    long count = 0;
    long start = System.nanoTime();
    while (true) {
        ConsumerRecords<Long, Trade> poll = consumer.poll(5000);
        System.out.println("Partitions in batch: " + poll.partitions());
        LongSummaryStatistics stats = StreamSupport.stream(poll.spliterator(), false)
                                                                   .mapToLong(r -> r.value().getTime()).summaryStatistics();
        System.out.println("Oldest record time: " + stats.getMin() + ", newest record: " + stats.getMax());
        count += poll.count();
        long elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
        long rate = (long) ((double) count / elapsed * 1000);
        System.out.printf("Total count: %,d in %,dms. Average rate: %,d records/s %n", count, elapsed, rate);

    }
}
 
开发者ID:hazelcast,项目名称:big-data-benchmark,代码行数:27,代码来源:TradeTestConsumer.java

示例4: pollAndCommitTransactionsBatch

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private void pollAndCommitTransactionsBatch() {
    ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);
    List<TransactionScope> scopes = new ArrayList<>(records.count());
    for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
        TransactionScope transactionScope = serializer.deserialize(record.key());
        if (transactionScope.getScope().isEmpty()) {
            LOGGER.warn("[R] {} polled empty transaction {}", readerId, transactionScope.getTransactionId());
        }
        TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
        buffer.put(transactionScope.getTransactionId(),
                new TransactionData(transactionScope, record.value(), topicPartition, record.offset()));
        scopes.add(transactionScope);
        committedOffsetMap.computeIfAbsent(topicPartition, COMMITTED_OFFSET).notifyRead(record.offset());
    }
    if (!scopes.isEmpty()) {
        scopes.sort(SCOPE_COMPARATOR);
        LOGGER.trace("[R] {} polled {}", readerId, scopes);
    }
    approveAndCommitTransactionsBatch(scopes);
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:21,代码来源:Reader.java

示例5: pollCommunicateOnce

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private void pollCommunicateOnce(Consumer<ByteBuffer, ByteBuffer> consumer) {
    ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);

    if (records.isEmpty()) {
        if (!stalled && checkStalled(consumer)) {
            LOGGER.info("[I] Loader stalled {} / {}", f(leadId), f(localLoaderId));
            stalled = true;
            lead.notifyLocalLoaderStalled(leadId, localLoaderId);
        }
        // ToDo: Consider sending empty messages for heartbeat sake.
        return;
    }
    if (stalled) {
        stalled = false;
    }
    MutableLongList committedIds = new LongArrayList(records.count());

    for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
        committedIds.add(record.timestamp());
    }
    committedIds.sortThis();
    lead.updateInitialContext(localLoaderId, committedIds);
    consumer.commitSync();
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:25,代码来源:LocalLeadContextLoader.java

示例6: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
/**
 * run
 * Check the performance of pulling a whole table
 * @throws Exception
 */
public void run() throws Exception {
    int readCount = 0;
    try {
        this.consumer = createConsumer();
        // Fetch data from the consumer
        while (running) {
            // Wait for 100ms
            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0) {
                System.out.print(".");
                continue;
            }
            for (ConsumerRecord<String, String> record : records) {
                if (readCount >= maxLength) {
                    running = false;
                    break;
                }
                readCount++;

                System.out.println("");
                System.out.println("offset: " + record.offset() + ", key:" + record.key());
                System.out.println(record.value());
            }
        }

    } catch (Exception e) {
        logger.error("Exception was caught when read kafka", e);
        throw e;
    } finally {
        System.out.println("");

        consumer.close();
        logger.info("Finished read kafka");
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:41,代码来源:KafkaReader.java

示例7: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "b");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"));

        System.out.println("Consumer B gestartet!");

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key= %s, value= %s\n", record.offset(), record.key(), record.value());

        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:29,代码来源:Consumer_B.java

示例8: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "c");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"));

        System.out.println("Consumer C gestartet!");

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key= %s, value= %s\n", record.offset(), record.key(), record.value());

        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:29,代码来源:Consumer_C.java

示例9: getOutputRecords

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private static Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> getOutputRecords(final KafkaConsumer<byte[], byte[]> consumer,
                                                                                                       final Map<TopicPartition, Long> committedOffsets) {
    final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> recordPerTopicPerPartition = new HashMap<>();

    long maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
    boolean allRecordsReceived = false;
    while (!allRecordsReceived && System.currentTimeMillis() < maxWaitTime) {
        final ConsumerRecords<byte[], byte[]> receivedRecords = consumer.poll(500);

        for (final ConsumerRecord<byte[], byte[]> record : receivedRecords) {
            maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
            addRecord(record, recordPerTopicPerPartition);
        }

        if (receivedRecords.count() > 0) {
            allRecordsReceived =
                receivedAllRecords(
                    recordPerTopicPerPartition.get("data"),
                    recordPerTopicPerPartition.get("echo"),
                    committedOffsets);
        }
    }

    if (!allRecordsReceived) {
        throw new RuntimeException("FAIL: did not receive all records after 30 sec idle time.");
    }

    return recordPerTopicPerPartition;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:30,代码来源:EosTestDriver.java

示例10: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new OffsetBeginningRebalanceListener(consumer, "produktion"));

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            System.out.println(" Count: " + records.count());

            for (ConsumerRecord<String, String> rec : records)
                System.out.printf("partition= %d, offset= %d, key= %s, value= %s\n", rec.partition(), rec.offset(), rec.key(), rec.value());

        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:29,代码来源:SimpleConsumer.java

示例11: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        props.put(SESSION_TIMEOUT_MS_CONFIG, 30000);
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new SeekToBeginningRebalanceListener(consumer));

        int num = 0;
        int numOld = -1;
        while (num != numOld) {
            ConsumerRecords<String, String> records = consumer.poll(1000);

            numOld = num;
            num += records.count();

            for (ConsumerRecord record : records) {
                System.out.printf("Key: %s Offset: %s\n", record.key(), record.offset());
            }

            System.out.println("Gelesene Nachrichten:" + num);

        }

        consumer.close();

    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:35,代码来源:RetentionCompactConsumer.java

示例12: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "k");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");


        try {
            KafkaConsumer<Long, String> consumer = new KafkaConsumer<>(props);

            consumer.subscribe(Arrays.asList("produktion"), new LogRebalanceListener());

            while (true) {

                ConsumerRecords<Long, String> records = consumer.poll(1000);
                if (records.count() == 0)
                    continue;

                System.out.print("Partitions: " + records.partitions());
                System.out.println(" Count: " + records.count());

                for (ConsumerRecord<Long, String> record : records)
                    System.out.printf("partition=%d, offset= %d, key= %s, value= %s\n", record.partition(), record.offset(), record.key(), record.value());

            }
        } catch (RuntimeException e) {
            System.out.println("e = " + e);
        } finally {
            System.out.println("Closing!");
        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:36,代码来源:SimpleConsumer.java

示例13: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Override
public TaskReport run(TaskSource taskSource, Schema schema, int taskIndex, PageOutput output) {
    PluginTask task = taskSource.loadTask(PluginTask.class);

    BufferAllocator allocator = task.getBufferAllocator();
    PageBuilder builder = new PageBuilder(allocator, schema, output);
    KafkaInputColumns columns = new KafkaInputColumns(task);

    KafkaProperties props = new KafkaProperties(task);
    KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(task.getTopics());
    setOffsetPosition(consumer, task);

    long readRecords = 0;
    long showReadRecords = 500;
    while(true) {
        ConsumerRecords<?,?> records = consumer.poll(task.getPollTimeoutSec() * 1000);
        if(records.count() == 0) {
            break;
        }
        readRecords += records.count();
        columns.setOutputRecords(builder, records);
        builder.flush();
        if(readRecords >= showReadRecords) {
            logger.info(String.format("Read %d record(s) in task-%d", readRecords, taskIndex));
            showReadRecords *= 2;
        }
    }
    builder.finish();
    builder.close();
    logger.info(String.format("Finishing task-%d.Total %d record(s) read in this task", taskIndex, readRecords));
    consumer.close();

    return Exec.newTaskReport();
}
 
开发者ID:sasakitoa,项目名称:embulk-input-kafka,代码行数:36,代码来源:KafkaInputPlugin.java

示例14: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"));

        System.out.println("Consumer gestartet!");

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key= %s, timestamp=%d, timestampType=%s, value= %s\n", record.offset(), record.key(), record.timestamp(), record.timestampType().toString(), record.value());

        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:29,代码来源:TimestampConsumer.java

示例15: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        props.put(SESSION_TIMEOUT_MS_CONFIG, 30000);
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new SeekToBeginningRebalanceListener(consumer));

        int num = 0;
        int numOld = -1;
        while (num != numOld) {
            ConsumerRecords<String, String> records = consumer.poll(1000);

            numOld = num;
            num += records.count();

            System.out.println("Gelesene Nachrichten: " + num);

        }

        consumer.close();

    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:31,代码来源:RetentionDeleteConsumer.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecords.count方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。