当前位置: 首页>>代码示例>>Java>>正文


Java KafkaTestUtils.producerProps方法代码示例

本文整理汇总了Java中org.springframework.kafka.test.utils.KafkaTestUtils.producerProps方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaTestUtils.producerProps方法的具体用法?Java KafkaTestUtils.producerProps怎么用?Java KafkaTestUtils.producerProps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.springframework.kafka.test.utils.KafkaTestUtils的用法示例。


在下文中一共展示了KafkaTestUtils.producerProps方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: receiveAndValidateFoo

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic("foos");
	template.sendDefault("{\"id\":\"123\"}");
	ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");

	assertThat(cr.key().equals(123));
	ObjectMapper om = new ObjectMapper();
	Long aLong = om.readValue(cr.value(), Long.class);
	assertThat(aLong.equals(1L));
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:14,代码来源:KStreamBinderPojoInputAndPrimitiveTypeOutputTests.java

示例2: test

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
  Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);

  Properties config = new Properties();
  config.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-app");
  config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, senderProps.get("bootstrap.servers"));
  config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
  config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

  Producer<Integer, String> producer = createProducer();
  ProducerRecord<Integer, String> record = new ProducerRecord<>("stream-test", 1, "test");
  producer.send(record);

  final Serde<String> stringSerde = Serdes.String();
  final Serde<Integer> intSerde = Serdes.Integer();

  KStreamBuilder builder = new KStreamBuilder();
  KStream<Integer, String> kStream = builder
      .stream(intSerde, stringSerde, "stream-test");

  kStream.map((key, value) -> new KeyValue<>(key, value + "map")).to("stream-out");

  KafkaStreams streams = new KafkaStreams(builder, new StreamsConfig(config),
      new TracingKafkaClientSupplier(mockTracer));
  streams.start();

  await().atMost(15, TimeUnit.SECONDS).until(reportedSpansSize(), equalTo(3));

  streams.close();
  producer.close();

  List<MockSpan> spans = mockTracer.finishedSpans();
  assertEquals(3, spans.size());
  checkSpans(spans);

  assertNull(mockTracer.activeSpan());
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:39,代码来源:TracingKafkaStreamsTest.java

示例3: receiveAndValidateFoo

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic("foos");
	template.sendDefault("{\"id\":\"123\"}");
	ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
	assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:10,代码来源:KstreamBinderPojoInputStringOutputIntegrationTests.java

示例4: receiveAndValidate

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic("words");
	template.sendDefault("foobar");
	ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
	assertThat(cr.value().contains("\"word\":\"foobar\",\"count\":1")).isTrue();
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:10,代码来源:KStreamBinderWordCountIntegrationTests.java

示例5: receiveAndValidateFoo

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic("foos");
	template.sendDefault("{\"id\":\"123\"}");
	ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
	assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();

	ProductCountApplication.Foo foo = context.getBean(ProductCountApplication.Foo.class);
	assertThat(foo.getProductStock(123).equals(1L));
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:13,代码来源:KStreamInteractiveQueryIntegrationTests.java

示例6: createProducer

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
private Producer<Integer, String> createProducer() {
  Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
  KafkaProducer<Integer, String> kafkaProducer = new KafkaProducer<>(senderProps);
  return new TracingKafkaProducer<>(kafkaProducer, mockTracer);
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:6,代码来源:TracingKafkaTest.java

示例7: producerFactory

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Bean
public ProducerFactory<Integer, String> producerFactory() {
  return new TracingProducerFactory<>(new DefaultKafkaProducerFactory<>(
      KafkaTestUtils.producerProps(embeddedKafka)), tracer());
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:6,代码来源:TestConfiguration.java

示例8: noddyProducerConsumerTest

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
/**
 * Put some items on the queue and make sure they can be consumed
 */
@Test
public void noddyProducerConsumerTest() throws ExecutionException, InterruptedException {

    String[] topics = {TOPIC_MESSAGES};
    KafkaEmbededUtils.createTopics(kafkaEmbedded, topics);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
    KafkaProducer<Integer, String> producer = new KafkaProducer<>(senderProps);
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 0, 0, "message0")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 0, 1, "message1")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 1, 2, "message2")).get();
    producer.send(new ProducerRecord<>(TOPIC_MESSAGES, 1, 3, "message3")).get();

    Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("noddyProducerConsumerTest-consumer",
            "false",
            kafkaEmbedded);
    //earliest ensures we can start the consumer at any point without it missing messages
    consumerProps.put("auto.offset.reset", "earliest");

    final CountDownLatch latch = new CountDownLatch(4);
    final CountDownLatch consumerShutdownLatch = new CountDownLatch(1);
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    executorService.execute(() -> {
        KafkaConsumer<Integer, String> kafkaConsumer = new KafkaConsumer<>(consumerProps);
        kafkaConsumer.subscribe(Collections.singletonList(TOPIC_MESSAGES));
        try {
            while (true) {
                ConsumerRecords<Integer, String> records = kafkaConsumer.poll(100);
                for (ConsumerRecord<Integer, String> record : records) {
                    LOGGER.info("consuming from topic = {}, partition = {}, offset = {}, key = {}, value = {}",
                            record.topic(), record.partition(), record.offset(), record.key(), record.value());
                    latch.countDown();
                }
                if (latch.getCount() == 0) {
                    break;
                }
            }
        } finally {
            kafkaConsumer.close();
            consumerShutdownLatch.countDown();
        }
    });

    assertThat(latch.await(90, TimeUnit.SECONDS)).isTrue();
    consumerShutdownLatch.await();
    producer.close();

    KafkaEmbededUtils.deleteTopics(kafkaEmbedded, topics);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:53,代码来源:EmbeddedKafkaIT.java

示例9: test_TwoGoodCountEventsRollUpAll

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
    public void test_TwoGoodCountEventsRollUpAll() throws ExecutionException, InterruptedException, DatatypeConfigurationException {

        setAppIdPrefixes("");
        module = initStreamProcessing();

        Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
        KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

        StatisticType statisticType = StatisticType.COUNT;
        String topic = INPUT_TOPICS_MAP.get(statisticType);

        EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.SECOND;

        addStatConfig(module.getMockStatisticConfigurationService(),
                GOOD_STAT_UUID,
                GOOD_STAT_NAME,
                statisticType,
                Arrays.asList(TAG_1, TAG_2),
                interval);

        ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

        Statistics statistics = StatisticsHelper.buildStatistics(
                StatisticsHelper.buildCountStatistic(time, 1L,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                ),
                StatisticsHelper.buildCountStatistic(time.plusDays(2), 1L,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                )
        );

        //Set a long purge retention to stop events being bumped up into the next interval
        setPurgeRetention(interval, Integer.MAX_VALUE);

        Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
        consumerProps.put("auto.offset.reset", "earliest");

//        startAllTopicsConsumer(consumerProps);

        ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
        List<BadStatMessage> badEvents = new ArrayList<>();

        //2 input msgs, each one is rolled up to 4 perms so expect 8
        int expectedGoodMsgCount = 2 * 4;
        int expectedBadMsgCount = 0;
        CountDownLatch intervalTopicsLatch = startIntervalTopicsConsumer(StatisticType.COUNT, consumerProps, expectedGoodMsgCount, topicToMsgsMap, true, 100);
        CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);

        //give the consumers and streams enough time to spin up
//        ThreadUtil.sleepAtLeastIgnoreInterrupts(1_000);

        LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
        producer.send(buildProducerRecord(topic, GOOD_STAT_UUID, statistics)).get();
        producer.close();

        //Wait for the expected numbers of messages to arrive or timeout if not
        assertThat(intervalTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();
        assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

        //both events go to same interval topic
        assertThat(topicToMsgsMap).hasSize(1);

        String topicName = topicToMsgsMap.keySet().stream().findFirst().get();
        assertThat(topicName).isEqualTo(TopicNameFactory.getIntervalTopicName(STATISTIC_ROLLUP_PERMS_TOPIC_PREFIX, statisticType, interval));
        List<ConsumerRecord<StatEventKey, StatAggregate>> messages = topicToMsgsMap.values().stream().findFirst().get();
        assertThat(messages).hasSize(expectedGoodMsgCount);

        //no bad events
        assertThat(badEvents).hasSize(expectedBadMsgCount);
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:74,代码来源:StatisticsFlatMappingServiceIT.java

示例10: test_TwoGoodValueEventsRollUpAll

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
    public void test_TwoGoodValueEventsRollUpAll() throws ExecutionException, InterruptedException, DatatypeConfigurationException {
        module = initStreamProcessing();

        Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
        KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

        StatisticType statisticType = StatisticType.VALUE;
        String topic = INPUT_TOPICS_MAP.get(statisticType);

        EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.SECOND;

        addStatConfig(module.getMockStatisticConfigurationService(),
                GOOD_STAT_UUID,
                GOOD_STAT_NAME,
                statisticType,
                Arrays.asList(TAG_1, TAG_2),
                interval);

        ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

        Statistics statistics = StatisticsHelper.buildStatistics(
                StatisticsHelper.buildValueStatistic(time, 1.5,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                ),
                StatisticsHelper.buildValueStatistic(time.plusHours(2), 1.5,
                        StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                        StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
                )
        );
        dumpStatistics(statistics);

        //Set a long purge retention to stop events being bumped up into the next interval
        setPurgeRetention(interval, Integer.MAX_VALUE);


        Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
        consumerProps.put("auto.offset.reset", "earliest");

//        startAllTopicsConsumer(consumerProps);

        ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
        List<BadStatMessage> badEvents = new ArrayList<>();

        //2 input msgs, each one is rolled up to 4 perms so expect 8
        int expectedGoodMsgCount = 8;
        int expectedBadMsgCount = 0;
        CountDownLatch intervalTopicsLatch = startIntervalTopicsConsumer(statisticType, consumerProps, expectedGoodMsgCount, topicToMsgsMap, true, 100);
        CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);

        //give the consumers and streams enough time to spin up
//        ThreadUtil.sleepAtLeastIgnoreInterrupts(1_000);

        LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
        producer.send(buildProducerRecord(topic, GOOD_STAT_UUID, statistics)).get();
        producer.close();

        //Wait for the expected numbers of messages to arrive or timeout if not
        assertThat(intervalTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();
        assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

        //both events go to same interval topic
        assertThat(topicToMsgsMap).hasSize(1);
        String topicName = topicToMsgsMap.keySet().stream().findFirst().get();
        assertThat(topicName).isEqualTo(TopicNameFactory.getIntervalTopicName(STATISTIC_ROLLUP_PERMS_TOPIC_PREFIX, statisticType, interval));
        List<ConsumerRecord<StatEventKey, StatAggregate>> messages = topicToMsgsMap.values().stream().findFirst().get();
        messages.stream()
                .map(ConsumerRecord::toString)
                .forEach(LOGGER::debug);
        assertThat(messages).hasSize(expectedGoodMsgCount);

        //no bad events
        assertThat(badEvents).hasSize(expectedBadMsgCount);
    }
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:76,代码来源:StatisticsFlatMappingServiceIT.java

示例11: test_cantUnmarshall

import org.springframework.kafka.test.utils.KafkaTestUtils; //导入方法依赖的package包/类
@Test
public void test_cantUnmarshall() throws ExecutionException, InterruptedException, DatatypeConfigurationException {
    module = initStreamProcessing();

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(kafkaEmbedded);
    KafkaProducer<String, String> producer = new KafkaProducer<>(senderProps, Serdes.String().serializer(), Serdes.String().serializer());

    StatisticType statisticType = StatisticType.COUNT;
    String topic = INPUT_TOPICS_MAP.get(statisticType);

    EventStoreTimeIntervalEnum interval = EventStoreTimeIntervalEnum.MINUTE;

    addStatConfig(module.getMockStatisticConfigurationService(),
            GOOD_STAT_UUID,
            GOOD_STAT_NAME,
            statisticType,
            Arrays.asList(TAG_1, TAG_2),
            interval);

    ZonedDateTime time = ZonedDateTime.now(ZoneOffset.UTC);

    Statistics statistics = StatisticsHelper.buildStatistics(
            //the good, at this point
            StatisticsHelper.buildCountStatistic(time, 1L,
                    StatisticsHelper.buildTagType(TAG_1, TAG_1 + "val1"),
                    StatisticsHelper.buildTagType(TAG_2, TAG_2 + "val1")
            )
    );


    Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dummyGroup", "false", kafkaEmbedded);
    consumerProps.put("auto.offset.reset", "earliest");

    ConcurrentMap<String, List<ConsumerRecord<StatEventKey, StatAggregate>>> topicToMsgsMap = new ConcurrentHashMap<>();
    List<BadStatMessage> badEvents = new ArrayList<>();

    int expectedBadMsgCount = 1;

    CountDownLatch badTopicsLatch = startBadEventsConsumer(consumerProps, expectedBadMsgCount, badEvents);


    LOGGER.info("Sending to {} stat events to topic {}", statistics.getStatistic().size(), topic);
    String statKey = GOOD_STAT_UUID;
    //corrupt the xml by renaming one of the element names
    String msgValue = statisticsMarshaller.marshallToXml(statistics)
            .replaceAll("statistic", "badElementName");
    ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, statKey, msgValue);
    producer.send(producerRecord).get();
    producer.close();

    //Wait for the expected numbers of messages to arrive or timeout if not
    assertThat(badTopicsLatch.await(30, TimeUnit.SECONDS)).isTrue();

    assertThat(badEvents)
            .hasSize(expectedBadMsgCount);
    assertThat(
            badEvents.stream()
                    .map(BadStatMessage::getKey)
                    .distinct()
                    .collect(Collectors.toList()))
            .containsExactly(GOOD_STAT_UUID);
    assertThat(badEvents.get(0).getValue())
            .contains(StatisticsFlatMappingStreamFactory.UNMARSHALLING_ERROR_TEXT);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:65,代码来源:StatisticsFlatMappingServiceIT.java


注:本文中的org.springframework.kafka.test.utils.KafkaTestUtils.producerProps方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。