當前位置: 首頁>>代碼示例>>Java>>正文


Java Consumer.commitSync方法代碼示例

本文整理匯總了Java中org.apache.kafka.clients.consumer.Consumer.commitSync方法的典型用法代碼示例。如果您正苦於以下問題:Java Consumer.commitSync方法的具體用法?Java Consumer.commitSync怎麽用?Java Consumer.commitSync使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.kafka.clients.consumer.Consumer的用法示例。


在下文中一共展示了Consumer.commitSync方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: pollCommunicateOnce

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
private void pollCommunicateOnce(Consumer<ByteBuffer, ByteBuffer> consumer) {
    ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);

    if (records.isEmpty()) {
        if (!stalled && checkStalled(consumer)) {
            LOGGER.info("[I] Loader stalled {} / {}", f(leadId), f(localLoaderId));
            stalled = true;
            lead.notifyLocalLoaderStalled(leadId, localLoaderId);
        }
        // ToDo: Consider sending empty messages for heartbeat sake.
        return;
    }
    if (stalled) {
        stalled = false;
    }
    MutableLongList committedIds = new LongArrayList(records.count());

    for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
        committedIds.add(record.timestamp());
    }
    committedIds.sortThis();
    lead.updateInitialContext(localLoaderId, committedIds);
    consumer.commitSync();
}
 
開發者ID:epam,項目名稱:Lagerta,代碼行數:25,代碼來源:LocalLeadContextLoader.java

示例2: commitOffsets

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
static void commitOffsets(Consumer<?, ?> consumer, Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
                          boolean async) {

    if (offsetsToCommit == null || offsetsToCommit.isEmpty()) {
        return;
    }

    OffsetCommitCallback callback = (offsets, exception) -> {
        if (exception != null) {
            LOG.warn("Unable to commit offsets for {} TopicPartition(s) {}: {}",
                    offsets.size(),
                    offsetsAsString(offsets),
                    exception.getMessage(),
                    exception);
        } else {
            LOG.debug("Successfully committed offset(s) for {} TopicPartition(s): {}",
                    offsets.size(), offsetsAsString(offsets));
        }
    };

    if (async) {
        consumer.commitAsync(offsetsToCommit, callback);
    } else {
        consumer.commitSync(offsetsToCommit);
    }
}
 
開發者ID:rmap-project,項目名稱:rmap,代碼行數:27,代碼來源:KafkaUtils.java

示例3: main

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
public static void main(String[] args) {
    String topic = "persistent://sample/standalone/ns/my-topic";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
開發者ID:apache,項目名稱:incubator-pulsar,代碼行數:24,代碼來源:ConsumerExample.java

示例4: shiftToLastCommitted

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
private void shiftToLastCommitted(Consumer<?, ?> consumer, long commitId) {
    Map<TopicPartition, Long> partitionsAndTimestamps = getTopicPartitionStream(consumer)
            .collect(Collectors.toMap(identity(), v -> commitId));
    Map<TopicPartition, OffsetAndMetadata> partitionsAndOffsets = consumer
            .offsetsForTimes(partitionsAndTimestamps)
            .entrySet()
            .stream()
            .collect(Collectors.toMap(
                    Map.Entry::getKey,
                    v -> v.getValue() != null ? new OffsetAndMetadata(v.getValue().offset()) : new OffsetAndMetadata(0)
            ));
    consumer.commitSync(partitionsAndOffsets);
}
 
開發者ID:epam,項目名稱:Lagerta,代碼行數:14,代碼來源:LeadStateLoader.java

示例5: commitOffsets

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
public static void commitOffsets(TransactionsBuffer buffer, OffsetCalculator offsetCalculator,
    Consumer<ByteBuffer, ByteBuffer> consumer) {
    List<List<TransactionWrapper>> allTransactions = buffer.getCommittedTxs();
    Map<TopicPartition, OffsetAndMetadata> toCommit = offsetCalculator.calculateChangedOffsets(allTransactions);

    if (!toCommit.isEmpty()) {
        consumer.commitSync(toCommit);
        buffer.removeCommitted(allTransactions);
    }
}
 
開發者ID:epam,項目名稱:Lagerta,代碼行數:11,代碼來源:KafkaHelper.java

示例6: testSimpleConsumer

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Test
public void testSimpleConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleConsumer";

    Properties props = new Properties();
    props.put("bootstrap.servers", lookupUrl.toString());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<String, String> consumer = new PulsarKafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    Producer pulsarProducer = pulsarClient.createProducer(topic);

    for (int i = 0; i < 10; i++) {
        Message msg = MessageBuilder.create().setKey(Integer.toString(i)).setContent(("hello-" + i).getBytes())
                .build();
        pulsarProducer.send(msg);
    }

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        assertEquals(records.count(), 1);
        int idx = i;
        records.forEach(record -> {
            assertEquals(record.key(), Integer.toString(idx));
            assertEquals(record.value(), "hello-" + idx);
        });

        consumer.commitSync();
    }

    consumer.close();
}
 
開發者ID:apache,項目名稱:incubator-pulsar,代碼行數:37,代碼來源:KafkaConsumerTest.java

示例7: read

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
/**
 * Read all JSON messages from the replicate info topic and creates the 
 * latest catalog for PLOG monitor to use for controlling Kafka task 
 * configuration. This is only done during source connector startup,
 * therefore it creates a consumer each time
 * 
 * @return synchronized sorted catalog of replicate info
 * @throws Exception for any consumer errors
 */
public synchronized Map <String, ReplicateInfo> read () throws Exception {
    /* short lived consumer, no need to keep it alive seeing that 
     * it always consumes all messages again, the volume of messages
     * is low and the consumer doesn't need to be coordinated */
    Consumer<String, String> consumer = new KafkaConsumer<String, String> (
        consumerProps
    );
    consumer.subscribe(Arrays.asList(topicName));
    
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    try {
        logger.debug (
            "Polling topic: " + topicName + " for existing replicate " +
            "catalog"
        );
        
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            REPLICATE_INFO_POLL_TIMEOUT
        );
        consumer.commitSync();
        
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                logger.trace (
                    "Read " + topicName + " record, key: " + 
                    record.key() + " value: " + record.value()
                );
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            topicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    logger.debug (
        "Read replicate catalog: " + catalog.toString() + " " +
        "from topic: " + topicName
    );
    return catalog;
}
 
開發者ID:dbvisitsoftware,項目名稱:replicate-connector-for-kafka,代碼行數:68,代碼來源:ReplicateInfoTopic.java


注:本文中的org.apache.kafka.clients.consumer.Consumer.commitSync方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。