当前位置: 首页>>代码示例>>Java>>正文


Java Consumer.commitSync方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.Consumer.commitSync方法的典型用法代码示例。如果您正苦于以下问题:Java Consumer.commitSync方法的具体用法?Java Consumer.commitSync怎么用?Java Consumer.commitSync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.Consumer的用法示例。


在下文中一共展示了Consumer.commitSync方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: pollCommunicateOnce

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
private void pollCommunicateOnce(Consumer<ByteBuffer, ByteBuffer> consumer) {
    ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);

    if (records.isEmpty()) {
        if (!stalled && checkStalled(consumer)) {
            LOGGER.info("[I] Loader stalled {} / {}", f(leadId), f(localLoaderId));
            stalled = true;
            lead.notifyLocalLoaderStalled(leadId, localLoaderId);
        }
        // ToDo: Consider sending empty messages for heartbeat sake.
        return;
    }
    if (stalled) {
        stalled = false;
    }
    MutableLongList committedIds = new LongArrayList(records.count());

    for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
        committedIds.add(record.timestamp());
    }
    committedIds.sortThis();
    lead.updateInitialContext(localLoaderId, committedIds);
    consumer.commitSync();
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:25,代码来源:LocalLeadContextLoader.java

示例2: commitOffsets

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
static void commitOffsets(Consumer<?, ?> consumer, Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
                          boolean async) {

    if (offsetsToCommit == null || offsetsToCommit.isEmpty()) {
        return;
    }

    OffsetCommitCallback callback = (offsets, exception) -> {
        if (exception != null) {
            LOG.warn("Unable to commit offsets for {} TopicPartition(s) {}: {}",
                    offsets.size(),
                    offsetsAsString(offsets),
                    exception.getMessage(),
                    exception);
        } else {
            LOG.debug("Successfully committed offset(s) for {} TopicPartition(s): {}",
                    offsets.size(), offsetsAsString(offsets));
        }
    };

    if (async) {
        consumer.commitAsync(offsetsToCommit, callback);
    } else {
        consumer.commitSync(offsetsToCommit);
    }
}
 
开发者ID:rmap-project,项目名称:rmap,代码行数:27,代码来源:KafkaUtils.java

示例3: main

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    String topic = "persistent://sample/standalone/ns/my-topic";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
开发者ID:apache,项目名称:incubator-pulsar,代码行数:24,代码来源:ConsumerExample.java

示例4: shiftToLastCommitted

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
private void shiftToLastCommitted(Consumer<?, ?> consumer, long commitId) {
    Map<TopicPartition, Long> partitionsAndTimestamps = getTopicPartitionStream(consumer)
            .collect(Collectors.toMap(identity(), v -> commitId));
    Map<TopicPartition, OffsetAndMetadata> partitionsAndOffsets = consumer
            .offsetsForTimes(partitionsAndTimestamps)
            .entrySet()
            .stream()
            .collect(Collectors.toMap(
                    Map.Entry::getKey,
                    v -> v.getValue() != null ? new OffsetAndMetadata(v.getValue().offset()) : new OffsetAndMetadata(0)
            ));
    consumer.commitSync(partitionsAndOffsets);
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:14,代码来源:LeadStateLoader.java

示例5: commitOffsets

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public static void commitOffsets(TransactionsBuffer buffer, OffsetCalculator offsetCalculator,
    Consumer<ByteBuffer, ByteBuffer> consumer) {
    List<List<TransactionWrapper>> allTransactions = buffer.getCommittedTxs();
    Map<TopicPartition, OffsetAndMetadata> toCommit = offsetCalculator.calculateChangedOffsets(allTransactions);

    if (!toCommit.isEmpty()) {
        consumer.commitSync(toCommit);
        buffer.removeCommitted(allTransactions);
    }
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:11,代码来源:KafkaHelper.java

示例6: testSimpleConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Test
public void testSimpleConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleConsumer";

    Properties props = new Properties();
    props.put("bootstrap.servers", lookupUrl.toString());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<String, String> consumer = new PulsarKafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    Producer pulsarProducer = pulsarClient.createProducer(topic);

    for (int i = 0; i < 10; i++) {
        Message msg = MessageBuilder.create().setKey(Integer.toString(i)).setContent(("hello-" + i).getBytes())
                .build();
        pulsarProducer.send(msg);
    }

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        assertEquals(records.count(), 1);
        int idx = i;
        records.forEach(record -> {
            assertEquals(record.key(), Integer.toString(idx));
            assertEquals(record.value(), "hello-" + idx);
        });

        consumer.commitSync();
    }

    consumer.close();
}
 
开发者ID:apache,项目名称:incubator-pulsar,代码行数:37,代码来源:KafkaConsumerTest.java

示例7: read

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
/**
 * Read all JSON messages from the replicate info topic and creates the 
 * latest catalog for PLOG monitor to use for controlling Kafka task 
 * configuration. This is only done during source connector startup,
 * therefore it creates a consumer each time
 * 
 * @return synchronized sorted catalog of replicate info
 * @throws Exception for any consumer errors
 */
public synchronized Map <String, ReplicateInfo> read () throws Exception {
    /* short lived consumer, no need to keep it alive seeing that 
     * it always consumes all messages again, the volume of messages
     * is low and the consumer doesn't need to be coordinated */
    Consumer<String, String> consumer = new KafkaConsumer<String, String> (
        consumerProps
    );
    consumer.subscribe(Arrays.asList(topicName));
    
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    try {
        logger.debug (
            "Polling topic: " + topicName + " for existing replicate " +
            "catalog"
        );
        
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            REPLICATE_INFO_POLL_TIMEOUT
        );
        consumer.commitSync();
        
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                logger.trace (
                    "Read " + topicName + " record, key: " + 
                    record.key() + " value: " + record.value()
                );
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            topicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    logger.debug (
        "Read replicate catalog: " + catalog.toString() + " " +
        "from topic: " + topicName
    );
    return catalog;
}
 
开发者ID:dbvisitsoftware,项目名称:replicate-connector-for-kafka,代码行数:68,代码来源:ReplicateInfoTopic.java


注:本文中的org.apache.kafka.clients.consumer.Consumer.commitSync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。