本文整理汇总了Java中org.springframework.kafka.listener.MessageListener类的典型用法代码示例。如果您正苦于以下问题:Java MessageListener类的具体用法?Java MessageListener怎么用?Java MessageListener使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MessageListener类属于org.springframework.kafka.listener包,在下文中一共展示了MessageListener类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createSystemConsumer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
private void createSystemConsumer(String name, MessageListener<String, String> consumeEvent) {
log.info("Creating kafka consumer for topic {}", name);
ContainerProperties containerProps = new ContainerProperties(name);
Map<String, Object> props = kafkaProperties.buildConsumerProperties();
if (name.equals(applicationProperties.getKafkaSystemTopic())) {
props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
}
ConsumerFactory<String, String> factory = new DefaultKafkaConsumerFactory<>(props);
ConcurrentMessageListenerContainer<String, String> container =
new ConcurrentMessageListenerContainer<>(factory, containerProps);
container.setupMessageListener(consumeEvent);
container.start();
log.info("Successfully created kafka consumer for topic {}", name);
}
示例2: createKafkaConsumer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
/**
* Create topic consumer.
* @param tenant the kafka topic
*/
public void createKafkaConsumer(String tenant) {
StopWatch stopWatch = StopWatch.createStarted();
try {
log.info("START - SETUP:CreateTenant:kafka consumer tenantKey: {}", tenant);
ConcurrentMessageListenerContainer<String, String> container = consumers.get(tenant);
if (container != null) {
if (!container.isRunning()) {
container.start();
}
} else {
ContainerProperties containerProps = new ContainerProperties(tenant);
container = new ConcurrentMessageListenerContainer<>(consumerFactory, containerProps);
container.setupMessageListener((MessageListener<String, String>) consumer::consumeEvent);
container.setBeanName(tenant);
container.start();
consumers.put(tenant, container);
}
log.info("STOP - SETUP:CreateTenant:kafka consumer tenantKey: {}, result: OK, time = {} ms",
tenant, stopWatch.getTime());
} catch (Exception e) {
log.error("STOP - SETUP:CreateTenant:kafka consumer tenantKey: {}, result: FAIL, error: {}, time = {} ms",
tenant, e.getMessage(), stopWatch.getTime(), e);
}
}
示例3: messageListenerContainer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Bean
public MessageListenerContainer messageListenerContainer(
ConsumerFactory<String, DefaultAvMessage> consumerFactory,
MessageListener<String, AvMessage> messageListener,
ThreadPoolTaskScheduler kafkaClientThreadPoolTaskScheduler
) {
ContainerProperties props = new ContainerProperties(resultTopic);
// shouldn't be necessary but the default scheduler is not destroyed after shutdown
props.setScheduler(kafkaClientThreadPoolTaskScheduler);
MessageListenerContainer container = new ConcurrentMessageListenerContainer<>(
consumerFactory,
props
);
container.setupMessageListener(messageListener);
return container;
}
示例4: testAutoCommit
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Test
public void testAutoCommit() throws Exception {
LOG.info("Start testAutoCommit");
ContainerProperties containerProps = new ContainerProperties("topic3", "topic4");
final CountDownLatch latch = new CountDownLatch(4);
containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
LOG.info("received: " + message);
latch.countDown();
});
KafkaMessageListenerContainer<Integer, String> container = createContainer(containerProps,
IntegerDeserializer.class, StringDeserializer.class);
container.setBeanName("testAutoCommit");
container.start();
Thread.sleep(5000); // wait a bit for the container to start
KafkaTemplate<Integer, String> template = createTemplate(IntegerSerializer.class, StringSerializer.class);
template.setDefaultTopic("topic3");
template.sendDefault(0, "foo");
template.sendDefault(2, "bar");
template.sendDefault(0, "baz");
template.sendDefault(2, "qux");
template.flush();
assertTrue(latch.await(60, TimeUnit.SECONDS));
container.stop();
LOG.info("Stop testAutoCommit");
}
示例5: fileServerMessageListenerContainer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Bean
public MessageListenerContainer fileServerMessageListenerContainer(
ConsumerFactory<String, DefaultAvMessage> consumerFactory,
MessageListener<String, AvMessage> fileServerMessageListener,
ThreadPoolTaskScheduler kafkaServerThreadPoolTaskScheduler
) {
ContainerProperties props = new ContainerProperties(fileTopic);
// shouldn't be necessary but the default scheduler is not destroyed after shutdown
props.setScheduler(kafkaServerThreadPoolTaskScheduler);
MessageListenerContainer container = new ConcurrentMessageListenerContainer<>(
consumerFactory,
props
);
container.setupMessageListener(fileServerMessageListener);
return container;
}
示例6: createConsumer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
private void createConsumer(String name) {
log.info("Creating kafka consumer for tenant {}", name);
ContainerProperties containerProps = new ContainerProperties(name);
ConcurrentMessageListenerContainer<String, String> container =
new ConcurrentMessageListenerContainer<>(consumerFactory, containerProps);
container.setupMessageListener((MessageListener<String, String>) timelineConsumer::consumeEvent);
container.start();
log.info("Successfully created kafka consumer for tenant {}", name);
}
示例7: createCommandConsumer
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
private void createCommandConsumer(String name) {
log.info("Creating kafka command consumer for topic {}", name);
ContainerProperties containerProps = new ContainerProperties(name);
Map<String, Object> props = kafkaProperties.buildConsumerProperties();
props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
ConsumerFactory<String, String> factory = new DefaultKafkaConsumerFactory<>(props);
ConcurrentMessageListenerContainer<String, String> container =
new ConcurrentMessageListenerContainer<>(factory, containerProps);
container.setupMessageListener((MessageListener<String, String>) commandConsumer::consumeEvent);
container.start();
log.info("Successfully created kafka command consumer for topic {}", name);
}
示例8: setup
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Before
public void setup() throws Exception {
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("testT", "false", embeddedKafka);
DefaultKafkaConsumerFactory<String, String> cf =
new DefaultKafkaConsumerFactory<>(consumerProps);
ContainerProperties containerProperties = new ContainerProperties(TEST_TOPIC);
container = new KafkaMessageListenerContainer<>(cf, containerProperties);
final BlockingQueue<ConsumerRecord<String, String>> records = new LinkedBlockingQueue<>();
container.setupMessageListener((MessageListener<String, String>) record -> {
log.error("Message received: " + record);
records.add(record);
});
container.start();
ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
Map<String, Object> senderProps = KafkaTestUtils.senderProps(embeddedKafka.getBrokersAsString());
ProducerFactory<String, String> pf =
new DefaultKafkaProducerFactory<>(senderProps);
template = new KafkaTemplate<>(pf);
template.setDefaultTopic(TEST_TOPIC);
}
示例9: setUp
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
// set up the Kafka consumer properties
Map<String, Object> consumerProperties =
KafkaTestUtils.consumerProps("sender", "false", embeddedKafka);
// create a Kafka consumer factory
DefaultKafkaConsumerFactory<String, String> consumerFactory =
new DefaultKafkaConsumerFactory<String, String>(consumerProperties);
// set the topic that needs to be consumed
ContainerProperties containerProperties = new ContainerProperties(SENDER_TOPIC);
// create a Kafka MessageListenerContainer
container = new KafkaMessageListenerContainer<>(consumerFactory, containerProperties);
// create a thread safe queue to store the received message
records = new LinkedBlockingQueue<>();
// setup a Kafka message listener
container.setupMessageListener(new MessageListener<String, String>() {
@Override
public void onMessage(ConsumerRecord<String, String> record) {
LOGGER.debug("test-listener received message='{}'", record.toString());
records.add(record);
}
});
// start the container and underlying message listener
container.start();
// wait until the container has the required number of assigned partitions
ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
}
示例10: testSendIndexDTO
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Test
public void testSendIndexDTO() throws Exception {
LOG.info("Start testSendIndexDTO");
ContainerProperties containerProps = new ContainerProperties("topic1", "topic2");
List<IndexDTO> dtos = prepareIndexableDtos(rdfHandler, "/data/discos/rmd18mddcw", null)
.collect(Collectors.toList());
Queue<IndexDTO> expectedDtos = new ArrayDeque<>(dtos);
Queue<ExpectedActualDTOPair> receivedDtos = new ArrayDeque<>();
final CountDownLatch latch = new CountDownLatch(3);
containerProps.setMessageListener((MessageListener<Integer, IndexDTO>) message -> {
LOG.info("received: " + message);
IndexDTO expected = expectedDtos.remove();
LOG.debug("expected: " + expected);
IndexDTO actual = message.value();
LOG.debug("actual: " + actual);
receivedDtos.add(new ExpectedActualDTOPair(expected, actual));
LOG.debug("Decrementing latch.");
latch.countDown();
});
KafkaMessageListenerContainer<Integer, IndexDTO> container = createContainerForDto(containerProps, IntegerDeserializer.class, GenericJvmObjectDeserializer.class);
container.setBeanName("testSendIndexDTO");
container.start();
Thread.sleep(5000); // wait a bit for the container to start
KafkaTemplate<Integer, IndexDTO> template = createTemplate(IntegerSerializer.class, GenericJvmObjectSerializer.class);
template.setDefaultTopic("topic1");
prepareIndexableDtos(rdfHandler, "/data/discos/rmd18mddcw", null)
.peek(dto -> LOG.debug("Prepared DTO {}", dto))
.forEach(template::sendDefault);
// do anything with the completablefuture returned by the template?
template.flush();
assertTrue(latch.await(120, TimeUnit.SECONDS));
container.stop();
LOG.info("Stop testSendIndexDTO");
receivedDtos.forEach(pair -> assertEquals(pair.expected, pair.actual));
}
示例11: messageListener
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Bean
public MessageListener<String, AvMessage> messageListener(AvNetworkComponent avNetworkComponent) {
return avNetworkComponent;
}
示例12: fileServerMessageListener
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Bean
public MessageListener<String, AvMessage> fileServerMessageListener(
AvNetworkComponent avNetworkComponent
) {
return avNetworkComponent;
}
示例13: setUp
import org.springframework.kafka.listener.MessageListener; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
// set up the Kafka consumer properties
Map<String, Object> consumerProperties =
KafkaTestUtils.consumerProps("sender_group", "false", AllSpringKafkaTests.embeddedKafka);
// create a Kafka consumer factory
DefaultKafkaConsumerFactory<String, String> consumerFactory =
new DefaultKafkaConsumerFactory<String, String>(consumerProperties);
// set the topic that needs to be consumed
ContainerProperties containerProperties =
new ContainerProperties(AllSpringKafkaTests.SENDER_TOPIC);
// create a Kafka MessageListenerContainer
container = new KafkaMessageListenerContainer<>(consumerFactory, containerProperties);
// create a thread safe queue to store the received message
records = new LinkedBlockingQueue<>();
// setup a Kafka message listener
container.setupMessageListener(new MessageListener<String, String>() {
@Override
public void onMessage(ConsumerRecord<String, String> record) {
LOGGER.debug("test-listener received message='{}'", record.toString());
records.add(record);
}
});
// start the container and underlying message listener
container.start();
// wait until the container has the required number of assigned partitions
ContainerTestUtils.waitForAssignment(container,
AllSpringKafkaTests.embeddedKafka.getPartitionsPerTopic());
}