当前位置: 首页>>代码示例>>Java>>正文


Java KafkaTestUtils.consumerProps方法代码示例

本文整理汇总了Java中org.springframework.kafka.test.utils.KafkaTestUtils.consumerProps方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaTestUtils.consumerProps方法的具体用法?Java KafkaTestUtils.consumerProps怎么用?Java KafkaTestUtils.consumerProps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.springframework.kafka.test.utils.KafkaTestUtils的用法示例。


在下文中一共展示了KafkaTestUtils.consumerProps方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: nullKey

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
public void nullKey() throws Exception {
  Producer<Integer, String> producer = createProducer();

  ProducerRecord<Integer, String> record = new ProducerRecord<>("messages", "test");
  producer.send(record);

  final Map<String, Object> consumerProps = KafkaTestUtils
      .consumerProps("sampleRawConsumer", "false", embeddedKafka);
  consumerProps.put("auto.offset.reset", "earliest");

  final CountDownLatch latch = new CountDownLatch(1);
  createConsumer(latch, null);

  producer.close();
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:17,代码来源:TracingKafkaTest.java

示例2: createConsumer

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private void createConsumer(final CountDownLatch latch, final Integer key)
    throws InterruptedException {
  ExecutorService executorService = Executors.newSingleThreadExecutor();

  final Map<String, Object> consumerProps = KafkaTestUtils
      .consumerProps("sampleRawConsumer", "false", embeddedKafka);
  consumerProps.put("auto.offset.reset", "earliest");

  executorService.execute(() -> {
    KafkaConsumer<Integer, String> kafkaConsumer = new KafkaConsumer<>(consumerProps);
    TracingKafkaConsumer<Integer, String> tracingKafkaConsumer = new TracingKafkaConsumer<>(
        kafkaConsumer, mockTracer);

    tracingKafkaConsumer.subscribe(Collections.singletonList("messages"));

    while (latch.getCount() > 0) {
      ConsumerRecords<Integer, String> records = tracingKafkaConsumer.poll(100);
      for (ConsumerRecord<Integer, String> record : records) {
        SpanContext spanContext = TracingKafkaUtils
            .extractSpanContext(record.headers(), mockTracer);
        assertNotNull(spanContext);
        assertEquals("test", record.value());
        if (key != null) {
          assertEquals(key, record.key());
        }
        tracingKafkaConsumer.commitSync();
        latch.countDown();
      }
    }
    kafkaConsumer.close();
  });

  assertTrue(latch.await(30, TimeUnit.SECONDS));

}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:36,代码来源:TracingKafkaTest.java

示例3: consumerFactory

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Bean
public ConsumerFactory<Integer, String> consumerFactory() {
  final Map<String, Object> consumerProps = KafkaTestUtils
      .consumerProps("sampleRawConsumer", "false", embeddedKafka);
  consumerProps.put("auto.offset.reset", "earliest");

  return new TracingConsumerFactory<>(new DefaultKafkaConsumerFactory<>(consumerProps), tracer());
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:9,代码来源:TestConfiguration.java

示例4: setup

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("testT", "false", embeddedKafka);
	DefaultKafkaConsumerFactory<String, String> cf =
		new DefaultKafkaConsumerFactory<>(consumerProps);
	ContainerProperties containerProperties = new ContainerProperties(TEST_TOPIC);
	container = new KafkaMessageListenerContainer<>(cf, containerProperties);
	final BlockingQueue<ConsumerRecord<String, String>> records = new LinkedBlockingQueue<>();
	container.setupMessageListener((MessageListener<String, String>) record -> {
           log.error("Message received: " + record);
           records.add(record);
       });
	container.start();
	ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
	Map<String, Object> senderProps = KafkaTestUtils.senderProps(embeddedKafka.getBrokersAsString());
	ProducerFactory<String, String> pf =
		new DefaultKafkaProducerFactory<>(senderProps);
	template = new KafkaTemplate<>(pf);
	template.setDefaultTopic(TEST_TOPIC);
}
 
开发者ID:underscorenico,项目名称:skeleton-oms-java,代码行数:21,代码来源:HelloProcessTest.java

示例5: consumerProps

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private Map<String, Object> consumerProps(Object keyDeserializer, Object valueDeserializer) {
        Map<String, Object> props = KafkaTestUtils.consumerProps("group", "true", kafkaEmbedded);
//        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
//        props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        return props;
    }
 
开发者ID:rmap-project,项目名称:rmap,代码行数:12,代码来源:SimpleKafkaIT.java

示例6: setUp

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
	consumer = cf.createConsumer();
	embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:9,代码来源:KstreamBinderPojoInputStringOutputIntegrationTests.java

示例7: setUp

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
	consumer = cf.createConsumer();
	embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:9,代码来源:KStreamBinderWordCountIntegrationTests.java

示例8: setUp

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
	//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
	consumer = cf.createConsumer();
	embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:10,代码来源:KStreamBinderPojoInputAndPrimitiveTypeOutputTests.java

示例9: setUp

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // set up the Kafka consumer properties
  Map<String, Object> consumerProperties =
      KafkaTestUtils.consumerProps("sender", "false", embeddedKafka);

  // create a Kafka consumer factory
  DefaultKafkaConsumerFactory<String, String> consumerFactory =
      new DefaultKafkaConsumerFactory<String, String>(consumerProperties);

  // set the topic that needs to be consumed
  ContainerProperties containerProperties = new ContainerProperties(SENDER_TOPIC);

  // create a Kafka MessageListenerContainer
  container = new KafkaMessageListenerContainer<>(consumerFactory, containerProperties);

  // create a thread safe queue to store the received message
  records = new LinkedBlockingQueue<>();

  // setup a Kafka message listener
  container.setupMessageListener(new MessageListener<String, String>() {
    @Override
    public void onMessage(ConsumerRecord<String, String> record) {
      LOGGER.debug("test-listener received message='{}'", record.toString());
      records.add(record);
    }
  });

  // start the container and underlying message listener
  container.start();

  // wait until the container has the required number of assigned partitions
  ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
}
 
开发者ID:code-not-found,项目名称:spring-kafka,代码行数:35,代码来源:SpringKafkaSenderTest.java

示例10: noddyProducerConsumerTest

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
/**
 * Put some items on the queue and make sure they can be consumed
 */
@Test
public void noddyProducerConsumerTest() throws ExecutionException, InterruptedException {

    String[] topics = {TOPIC_MESSAGES};
    KafkaEmbededUtils.createTopics(kafkaEmbedded, topics);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
    KafkaProducer<Integer, String> producer = new KafkaProducer<>(senderProps);
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 0, 0, "message0")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 0, 1, "message1")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 1, 2, "message2")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 1, 3, "message3")).get();

    Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("noddyProducerConsumerTest-consumer",
            "false",
            kafkaEmbedded);
    //earliest ensures we can start the consumer at any point without it missing messages
    consumerProps.put("auto.offset.reset", "earliest");

    final CountDownLatch latch = new CountDownLatch(4);
    final CountDownLatch consumerShutdownLatch = new CountDownLatch(1);
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    executorService.execute(() -> {
        KafkaConsumer<Integer, String> kafkaConsumer = new KafkaConsumer<>(consumerProps);
        kafkaConsumer.subscribe(Collections.singletonList(TOPIC_MESSAGES));
        try {
            while (true) {
                ConsumerRecords<Integer, String> records = kafkaConsumer.poll(100);
                for (ConsumerRecord<Integer, String> record : records) {
                    LOGGER.info("consuming from topic = {}, partition = {}, offset = {}, key = {}, value = {}",
                            record.topic(), record.partition(), record.offset(), record.key(), record.value());
                    latch.countDown();
                }
                if (latch.getCount() == 0) {
                    break;
                }
            }
        } finally {
            kafkaConsumer.close();
            consumerShutdownLatch.countDown();
        }
    });

    assertThat(latch.await(90, TimeUnit.SECONDS)).isTrue();
    consumerShutdownLatch.await();
    producer.close();

    KafkaEmbededUtils.deleteTopics(kafkaEmbedded, topics);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:53,代码来源:EmbeddedKafkaIT.java

示例11: test_TwoGoodCountEventsRollUpAll

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
    public void test_TwoGoodCountEventsRollUpAll() throws ExecutionException, InterruptedException, DatatypeConfigurationException {

        setAppIdPrefixes("");
        module = initStreamProcessing();

        Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
        KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

        StatisticType statisticType = StatisticType.COUNT;
        String topic = INPUT_TOPICS_MAP.get(statisticType);

        EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.SECOND;

        addStatConfig(module.getMockStatisticConfigurationService(),
                GOOD_STAT_UUID,
                GOOD_STAT_NAME,
                statisticType,
                Arrays.asList(TAG_1, TAG_2),
                interval);

        ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

        Statistics statistics = StatisticsHelper.buildStatistics(
                StatisticsHelper.buildCountStatistic(time, 1L,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                ),
                StatisticsHelper.buildCountStatistic(time.plusDays(2), 1L,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                )
        );

        //Set a long purge retention to stop events being bumped up into the next interval
        setPurgeRetention(interval, Integer.MAX_VALUE);

        Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
        consumerProps.put("auto.offset.reset", "earliest");

//        startAllTopicsConsumer(consumerProps);

        ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
        List<BadStatMessage> badEvents = new ArrayList<>();

        //2 input msgs, each one is rolled up to 4 perms so expect 8
        int expectedGoodMsgCount = 2 * 4;
        int expectedBadMsgCount = 0;
        CountDownLatch intervalTopicsLatch = startIntervalTopicsConsumer(StatisticType.COUNT, consumerProps, expectedGoodMsgCount, topicToMsgsMap, true, 100);
        CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);

        //give the consumers and streams enough time to spin up
//        ThreadUtil.sleepAtLeastIgnoreInterrupts(1_000);

        LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
        producer.send(buildProducerRecord(topic, GOOD_STAT_UUID, statistics)).get();
        producer.close();

        //Wait for the expected numbers of messages to arrive or timeout if not
        assertThat(intervalTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();
        assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

        //both events go to same interval topic
        assertThat(topicToMsgsMap).hasSize(1);

        String topicName = topicToMsgsMap.keySet().stream().findFirst().get();
        assertThat(topicName).isEqualTo(TopicNameFactory.getIntervalTopicName(STATISTIC_ROLLUP_PERMS_TOPIC_PREFIX, statisticType, interval));
        List<ConsumerRecord<StatEventKey, StatAggregate>> messages = topicToMsgsMap.values().stream().findFirst().get();
        assertThat(messages).hasSize(expectedGoodMsgCount);

        //no bad events
        assertThat(badEvents).hasSize(expectedBadMsgCount);
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:74,代码来源:StatisticsFlatMappingServiceIT.java

示例12: test_TwoGoodValueEventsRollUpAll

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
    public void test_TwoGoodValueEventsRollUpAll() throws ExecutionException, InterruptedException, DatatypeConfigurationException {
        module = initStreamProcessing();

        Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
        KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

        StatisticType statisticType = StatisticType.VALUE;
        String topic = INPUT_TOPICS_MAP.get(statisticType);

        EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.SECOND;

        addStatConfig(module.getMockStatisticConfigurationService(),
                GOOD_STAT_UUID,
                GOOD_STAT_NAME,
                statisticType,
                Arrays.asList(TAG_1, TAG_2),
                interval);

        ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

        Statistics statistics = StatisticsHelper.buildStatistics(
                StatisticsHelper.buildValueStatistic(time, 1.5,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                ),
                StatisticsHelper.buildValueStatistic(time.plusHours(2), 1.5,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                )
        );
        dumpStatistics(statistics);

        //Set a long purge retention to stop events being bumped up into the next interval
        setPurgeRetention(interval, Integer.MAX_VALUE);


        Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
        consumerProps.put("auto.offset.reset", "earliest");

//        startAllTopicsConsumer(consumerProps);

        ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
        List<BadStatMessage> badEvents = new ArrayList<>();

        //2 input msgs, each one is rolled up to 4 perms so expect 8
        int expectedGoodMsgCount = 8;
        int expectedBadMsgCount = 0;
        CountDownLatch intervalTopicsLatch = startIntervalTopicsConsumer(statisticType, consumerProps, expectedGoodMsgCount, topicToMsgsMap, true, 100);
        CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);

        //give the consumers and streams enough time to spin up
//        ThreadUtil.sleepAtLeastIgnoreInterrupts(1_000);

        LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
        producer.send(buildProducerRecord(topic, GOOD_STAT_UUID, statistics)).get();
        producer.close();

        //Wait for the expected numbers of messages to arrive or timeout if not
        assertThat(intervalTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();
        assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

        //both events go to same interval topic
        assertThat(topicToMsgsMap).hasSize(1);
        String topicName = topicToMsgsMap.keySet().stream().findFirst().get();
        assertThat(topicName).isEqualTo(TopicNameFactory.getIntervalTopicName(STATISTIC_ROLLUP_PERMS_TOPIC_PREFIX, statisticType, interval));
        List<ConsumerRecord<StatEventKey, StatAggregate>> messages = topicToMsgsMap.values().stream().findFirst().get();
        messages.stream()
                .map(ConsumerRecord::toString)
                .forEach(LOGGER::debug);
        assertThat(messages).hasSize(expectedGoodMsgCount);

        //no bad events
        assertThat(badEvents).hasSize(expectedBadMsgCount);
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:76,代码来源:StatisticsFlatMappingServiceIT.java

示例13: test_cantUnmarshall

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
public void test_cantUnmarshall() throws ExecutionException, InterruptedException, DatatypeConfigurationException {
    module = initStreamProcessing();

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
    KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

    StatisticType statisticType = StatisticType.COUNT;
    String topic = INPUT_TOPICS_MAP.get(statisticType);

    EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.MINUTE;

    addStatConfig(module.getMockStatisticConfigurationService(),
            GOOD_STAT_UUID,
            GOOD_STAT_NAME,
            statisticType,
            Arrays.asList(TAG_1, TAG_2),
            interval);

    ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

    Statistics statistics = StatisticsHelper.buildStatistics(
            //the good, at this point
            StatisticsHelper.buildCountStatistic(time, 1L,
                    StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                    StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
            )
    );


    Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
    consumerProps.put("auto.offset.reset", "earliest");

    ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
    List<BadStatMessage> badEvents = new ArrayList<>();

    int expectedBadMsgCount = 1;

    CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);


    LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
    String statKey = GOOD_STAT_UUID;
    //corrupt the xml by renaming one of the element names
    String msgValue = statisticsMarshaller.marshallToXml(statistics)
            .replaceAll("statistic", "badElementName");
    ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, statKey, msgValue);
    producer.send(producerRecord).get();
    producer.close();

    //Wait for the expected numbers of messages to arrive or timeout if not
    assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

    assertThat(badEvents)
            .hasSize(expectedBadMsgCount);
    assertThat(
            badEvents.stream()
                    .map(BadStatMessage::getKey)
                    .distinct()
                    .collect(Collectors.toList()))
            .containsExactly(GOOD_STAT_UUID);
    assertThat(badEvents.get(0).getValue())
            .contains(StatisticsFlatMappingStreamFactory.UNMARSHALLING_ERROR_TEXT);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:65,代码来源:StatisticsFlatMappingServiceIT.java

示例14: setUp

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // set up the Kafka consumer properties
  Map<String, Object> consumerProperties =
      KafkaTestUtils.consumerProps("sender_group", "false", AllSpringKafkaTests.embeddedKafka);

  // create a Kafka consumer factory
  DefaultKafkaConsumerFactory<String, String> consumerFactory =
      new DefaultKafkaConsumerFactory<String, String>(consumerProperties);

  // set the topic that needs to be consumed
  ContainerProperties containerProperties =
      new ContainerProperties(AllSpringKafkaTests.SENDER_TOPIC);

  // create a Kafka MessageListenerContainer
  container = new KafkaMessageListenerContainer<>(consumerFactory, containerProperties);

  // create a thread safe queue to store the received message
  records = new LinkedBlockingQueue<>();

  // setup a Kafka message listener
  container.setupMessageListener(new MessageListener<String, String>() {
    @Override
    public void onMessage(ConsumerRecord<String, String> record) {
      LOGGER.debug("test-listener received message='{}'", record.toString());
      records.add(record);
    }
  });

  // start the container and underlying message listener
  container.start();
  // wait until the container has the required number of assigned partitions
  ContainerTestUtils.waitForAssignment(container,
      AllSpringKafkaTests.embeddedKafka.getPartitionsPerTopic());
}
 
开发者ID:code-not-found,项目名称:spring-kafka,代码行数:36,代码来源:SpringKafkaSenderTest.java


注:本文中的org.springframework.kafka.test.utils.KafkaTestUtils.consumerProps方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。