当前位置: 首页>>代码示例>>Java>>正文


Java KafkaConsumer.commitSync方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.commitSync方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.commitSync方法的具体用法?Java KafkaConsumer.commitSync怎么用?Java KafkaConsumer.commitSync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.commitSync方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: receive

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public String receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.err.println(buffer.size() + "----->" + record);

        }
        if (buffer.size() >= minBatchSize) {
            writeFileToHadoop(buffer);//先把buffer写入文件中
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:21,代码来源:KafkaConsumerForHive.java

示例2: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    OffsetSetterConfig config = null;

    try {
        config = createOffsetSetterConfig(args);
    } catch (ParseException e) {
        System.err.println(e.getMessage());
        System.exit(1);
    }

    Map<TopicPartition, OffsetAndMetadata> m = new HashMap<>();
    m.put(new TopicPartition(config.kafkaTopic, config.kafkaPartition), new OffsetAndMetadata(config.kafkaOffset));

    System.out.println("Creating Kafka consumer ...");
    KafkaConsumer<String, String> kc = new org.apache.kafka.clients.consumer.KafkaConsumer<>(config.kafkaProperties);
    System.out.println("Committing offset " + config.kafkaOffset + " to topic " + config.kafkaTopic + ", partition " + config.kafkaPartition + " ...");
    kc.commitSync(m);
    System.out.println("Closing Kafka consumer ...");
    kc.close();
    System.out.println("Done!");
}
 
开发者ID:lovromazgon,项目名称:kafka-offset-setter,代码行数:22,代码来源:OffsetSetter.java

示例3: cleanUp

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void cleanUp(KafkaConsumer<StatEventKey, StatAggregate> kafkaConsumer, int unCommittedRecCount) {

        //force a flush of anything in the aggregator
        if (statAggregator != null) {
            LOGGER.debug("Forcing a flush of aggregator {} on processor {}", statAggregator, this);
            flushAggregator();
        }
        if (kafkaConsumer != null) {
            if (unCommittedRecCount > 0) {
                LOGGER.debug("Committing kafka offset on processor {}", this);
                kafkaConsumer.commitSync();
            }
            LOGGER.debug("Closing kafka consumer on processor {}", this);
            kafkaConsumer.close();
        }
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:17,代码来源:StatisticsAggregationProcessor.java

示例4: commitInvalidOffsets

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void commitInvalidOffsets() {
    final KafkaConsumer consumer = new KafkaConsumer(TestUtils.consumerConfig(
        CLUSTER.bootstrapServers(),
        streamsConfiguration.getProperty(StreamsConfig.APPLICATION_ID_CONFIG),
        StringDeserializer.class,
        StringDeserializer.class));

    final Map<TopicPartition, OffsetAndMetadata> invalidOffsets = new HashMap<>();
    invalidOffsets.put(new TopicPartition(TOPIC_1_2, 0), new OffsetAndMetadata(5, null));
    invalidOffsets.put(new TopicPartition(TOPIC_2_2, 0), new OffsetAndMetadata(5, null));
    invalidOffsets.put(new TopicPartition(TOPIC_A_2, 0), new OffsetAndMetadata(5, null));
    invalidOffsets.put(new TopicPartition(TOPIC_C_2, 0), new OffsetAndMetadata(5, null));
    invalidOffsets.put(new TopicPartition(TOPIC_Y_2, 0), new OffsetAndMetadata(5, null));
    invalidOffsets.put(new TopicPartition(TOPIC_Z_2, 0), new OffsetAndMetadata(5, null));

    consumer.commitSync(invalidOffsets);

    consumer.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:KStreamsFineGrainedAutoResetIntegrationTest.java

示例5: migrateOffsets

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void migrateOffsets(String topicStr) {
  ZkUtils zkUtils = ZkUtils.apply(zookeeperConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT,
      JaasUtils.isZkSecurityEnabled());
  KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(kafkaProps);
  try {
    Map<TopicPartition, OffsetAndMetadata> kafkaOffsets =
        getKafkaOffsets(consumer, topicStr);
    if (!kafkaOffsets.isEmpty()) {
      log.info("Found Kafka offsets for topic " + topicStr +
          ". Will not migrate from zookeeper");
      log.debug("Offsets found: {}", kafkaOffsets);
      return;
    }

    log.info("No Kafka offsets found. Migrating zookeeper offsets");
    Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
        getZookeeperOffsets(zkUtils, topicStr);
    if (zookeeperOffsets.isEmpty()) {
      log.warn("No offsets to migrate found in Zookeeper");
      return;
    }

    log.info("Committing Zookeeper offsets to Kafka");
    log.debug("Offsets to commit: {}", zookeeperOffsets);
    consumer.commitSync(zookeeperOffsets);
    // Read the offsets to verify they were committed
    Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets =
        getKafkaOffsets(consumer, topicStr);
    log.debug("Offsets committed: {}", newKafkaOffsets);
    if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
      throw new FlumeException("Offsets could not be committed");
    }
  } finally {
    zkUtils.close();
    consumer.close();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:38,代码来源:KafkaSource.java

示例6: migrateOffsets

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void migrateOffsets() {
  ZkUtils zkUtils = ZkUtils.apply(zookeeperConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT,
      JaasUtils.isZkSecurityEnabled());
  KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps);
  try {
    Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
    if (!kafkaOffsets.isEmpty()) {
      logger.info("Found Kafka offsets for topic " + topicStr +
          ". Will not migrate from zookeeper");
      logger.debug("Offsets found: {}", kafkaOffsets);
      return;
    }

    logger.info("No Kafka offsets found. Migrating zookeeper offsets");
    Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets = getZookeeperOffsets(zkUtils);
    if (zookeeperOffsets.isEmpty()) {
      logger.warn("No offsets to migrate found in Zookeeper");
      return;
    }

    logger.info("Committing Zookeeper offsets to Kafka");
    logger.debug("Offsets to commit: {}", zookeeperOffsets);
    consumer.commitSync(zookeeperOffsets);
    // Read the offsets to verify they were committed
    Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
    logger.debug("Offsets committed: {}", newKafkaOffsets);
    if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
      throw new FlumeException("Offsets could not be committed");
    }
  } finally {
    zkUtils.close();
    consumer.close();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:35,代码来源:KafkaChannel.java

示例7: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消费者订阅的topic, 可同时订阅多个
    consumer.subscribe(Arrays.asList("kafka-test"));

    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:35,代码来源:ComsumerDemo3.java

示例8: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:ComsumerDemo4.java

示例9: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消费者订阅的topic, 可同时订阅多个
    consumer.subscribe(Arrays.asList("kafka-test"));

    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
        }
        // 数据达到批量要求,就写入DB,同步确认offset
        if (buffer.size() >= minBatchSize) {
            // insertIntoDb(buffer);
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:35,代码来源:ComsumerDemo2.java

示例10: flush

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
void flush(final KafkaConsumer<StatEventKey, StatAggregate> kafkaConsumer) {

        flushAggregator();
        kafkaConsumer.commitSync();
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:6,代码来源:StatisticsAggregationProcessor.java

示例11: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, ParseException {

                Properties properties = new Properties();

                //set the kafka boostrap Server
                properties.setProperty("bootstrap.servers", KafkaProperties.KAFKA_SERVER_URL);
                //tell the client if the key and value is a string or something else
                properties.setProperty("key.deserializer", StringDeserializer.class.getName());
                properties.setProperty("value.deserializer", StringDeserializer.class.getName());
                properties.setProperty("group.id","test");
                properties.setProperty("enable.auto.commit","true");
                properties.setProperty("auto.commit.interval.ms","1000");
                properties.setProperty("auto.offset.reset","earliest");
                properties.setProperty("security.protocol", KafkaProperties.SECURITY_PROTOCOL);
                properties.setProperty("ssl.truststore.location",KafkaProperties.TRUSTSTORE_LOCATION);
                properties.setProperty("ssl.truststore.password",KafkaProperties.TRUSTSTORE_PASSWORD);
                properties.setProperty("ssl.endpoint.identification.algorithm",KafkaProperties.ENDPOINT_ALGORITHM);

               KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
               //Change here the topic
               kafkaConsumer.subscribe(Arrays.asList(KafkaProperties.TOPIC));

                while(true) {
                    ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(100);
                    for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
    //                consumerRecord.value();
    //                consumerRecord.key();
    //                consumerRecord.offset();
    //                consumerRecord.partition();
    //                consumerRecord.topic();
    //                consumerRecord.timestamp();

                        System.out.println("Partition: " + consumerRecord.partition() +
                                ", Offset: " + consumerRecord.offset() +
                                ", Key: " + consumerRecord.key() +
                                ", Value: " + consumerRecord.value());

                    }
                    kafkaConsumer.commitSync();
            }
        }
 
开发者ID:koerbaecher,项目名称:docker-kafka-demo,代码行数:42,代码来源:ConsumeKafka.java

示例12: consume

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private void consume(ObjectMapper mapper, String brokers) throws IOException {
    KafkaConsumer<String, String> consumer;
    try (InputStream props = Resources.getResource("consumer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        if (properties.getProperty("group.id") == null) {
            properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
        }
        if (brokers != null && !brokers.isEmpty()) {
            properties.put("bootstrap.servers", brokers);
        }
        consumer = new KafkaConsumer<>(properties);
    }
    consumer.subscribe(Arrays.asList("fast-messages", "summary-markers"));
    //consumer.assign(Collections.singleton(new TopicPartition("fast-messages", 1)));
    int timeouts = 0;
    //noinspection InfiniteLoopStatement
    while (true) {
        // read records with a short timeout. If we time out, we don't really care.
        ConsumerRecords<String, String> records = consumer.poll(10000);
        Thread.yield();
        if (records.count() == 0) {
            timeouts++;
        } else {
            System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
            timeouts = 0;
        }
        for (ConsumerRecord<String, String> record : records) {
            switch (record.topic()) {
                case "fast-messages":
                    // the send time is encoded inside the message
                    JsonNode msg = mapper.readTree(record.value());
                    switch (msg.get("type").asText()) {
                        case "test":
                            SimpleDateFormat sdf = new SimpleDateFormat("dd.MM.yyyy HH:mm:ss");
                            Date date = new Date(msg.get("t").asLong());
                            System.out.printf("Thread: %s, Topic:%s, partition:%d, Value: %d, time: %s \n",
                                    Thread.currentThread().getName(),
                                    record.topic(), record.partition(),
                                    msg.get("k").asInt(), sdf.format(date));
                            break;
                        case "marker":
                            break;
                        default:
                            throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
                    }
                    break;
                default:
                    throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
            }
        }
        consumer.commitSync();
    }
}
 
开发者ID:javaronok,项目名称:kafka-mgd-sample,代码行数:55,代码来源:Consumer.java

示例13: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
  final String tweetsEndpoint = System.getenv("TWEETS_ENDPOINT");

  if (tweetsEndpoint == null || tweetsEndpoint.trim().isEmpty()) {
    throw new RuntimeException("TWEETS_ENDPOINT env variable empty");
  }

  final Properties consumerConfigs = new Properties();
  consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "tweets-source-kafka:9092");
  consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, System.getenv("GROUP_ID"));
  consumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  consumerConfigs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

  final KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(consumerConfigs, new StringDeserializer(), new StringDeserializer());

  kafkaConsumer.subscribe(Collections.singletonList("tweets"));

  final HttpClient httpClient = HttpClientBuilder.create().build();

  while (true) {
    final ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Long.MAX_VALUE);

    for (final ConsumerRecord<String, String> consumerRecord : consumerRecords) {
      final String value = consumerRecord.value();

      try {
        final JsonNode valueNode = objectMapper.readTree(value);
        out.println(valueNode.toString());
        final JsonNode payloadNode = valueNode.get("payload");
        ObjectNode node = (ObjectNode) payloadNode;
        node.remove("lang");
        ((ObjectNode) node.get("entities")).remove("user_mentions");
        ((ObjectNode) node.get("entities")).remove("media");
        ((ObjectNode) node.get("entities")).remove("urls");
        ((ObjectNode) node.get("user")).remove("friends_count");
        ((ObjectNode) node.get("user")).remove("followers_count");
        ((ObjectNode) node.get("user")).remove("statuses_count");
        out.println(node.toString());
        final String payloadValue = node.toString();
        final HttpPost httpPost = new HttpPost(tweetsEndpoint);
        final HttpEntity entity = new NStringEntity(payloadValue, ContentType.APPLICATION_JSON);
        httpPost.setEntity(entity);
        HttpResponse response = httpClient.execute(httpPost);
        out.println("Response: " + response.getStatusLine().getStatusCode());
        out.println("Response: " + IOUtils.toString(response.getEntity().getContent(), "UTF-8"));
      } catch (Exception e) {
        e.printStackTrace();
      }

    }

    kafkaConsumer.commitSync();
  }
}
 
开发者ID:jeqo,项目名称:talk-observing-distributed-systems,代码行数:55,代码来源:TweetsProducer.java


注:本文中的org.apache.kafka.clients.consumer.KafkaConsumer.commitSync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。