当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecord类代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecord的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecord类的具体用法?Java ConsumerRecord怎么用?Java ConsumerRecord使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConsumerRecord类属于org.apache.kafka.clients.consumer包,在下文中一共展示了ConsumerRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testProducerConsumer

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
public void testProducerConsumer() {
  String containerIpAddress = cp.getContainerIpAddress();
  System.out.println("containerIpAddress = " + containerIpAddress);
  Integer zookeeperPort = zooCp.getMappedPort(2181);
  System.out.println("zookeeperPort = " + zookeeperPort);
  Integer kafkaPort = cp.getMappedPort(9092);
  System.out.println("kafkaPort = " + kafkaPort);

  HelloProducer helloProducer = new HelloProducer();
  helloProducer.createProducer(cp.kafkaUrl());

  HelloConsumer helloConsumer = new HelloConsumer(cp.kafkaUrl());
  helloConsumer.consume();
  Collection<ConsumerRecord> messages = helloConsumer.getReceivedRecords();

  Assert.assertEquals("message consumed", messages.size(), 5);
  messages.forEach(stringStringConsumerRecord -> {
    Assert.assertEquals(stringStringConsumerRecord.key(), "testContainers");
    Assert.assertEquals(stringStringConsumerRecord.value(), "AreAwesome");
  });
}
 
开发者ID:gAmUssA,项目名称:testcontainers-java-module-confluent-platform,代码行数:23,代码来源:ProducerConsumerTest.java

示例2: testProducerConsumer

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
public void testProducerConsumer() {
  String host = environment.getServiceHost("kafka_1",29092);
  Integer port = environment.getServicePort("kafka_1", 29092);

  HelloProducer helloProducer = new HelloProducer();
  helloProducer.createProducer(host+":"+port);

  HelloConsumer helloConsumer = new HelloConsumer(host+":"+port);
  helloConsumer.consume();
  Collection<ConsumerRecord> messages = helloConsumer.getReceivedRecords();

  Assert.assertEquals("message consumed", messages.size(), 5);
  messages.forEach(stringStringConsumerRecord -> {
    Assert.assertEquals(stringStringConsumerRecord.key(), "testContainers");
    Assert.assertEquals(stringStringConsumerRecord.value(), "AreAwesome");
  });
}
 
开发者ID:gAmUssA,项目名称:testcontainers-java-module-confluent-platform,代码行数:19,代码来源:KafkaSingleNodeComposeTest.java

示例3: executeJobs

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
private void executeJobs(List<MessageHandler> listJob) throws InterruptedException {
	List<Future<ConsumerRecord<String, byte[]>>> invokeAll = threadPool.invokeAll(listJob);

	// 检查每个调用的状态,若调用失败则继续进行调用,直到全部调用完成为止
	for (Future<ConsumerRecord<String, byte[]>> future : invokeAll) {
		Future<ConsumerRecord<String, byte[]>> localFuture = future;
		boolean futureSuccess = false;
		while (!futureSuccess) {
			try {
				// 检测本次的执行结果,若失败则重试
				futureSuccess = localFuture.get() == null;
				if (!futureSuccess) {
					localFuture = threadPool.submit(new MessageHandler(localFuture.get()));
					Thread.sleep(1000);//slow down to avoid continues errors harm
				}
			} catch (ExecutionException e) {
				// 设计中,不会抛出异常
				throw new RuntimeException("Unexpected,it should not throw Exception", e);
			}
		}
	}
}
 
开发者ID:QNJR-GROUP,项目名称:EasyTransaction,代码行数:23,代码来源:KafkaEasyTransMsgConsumerImpl.java

示例4: convertMessages

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
private void convertMessages(ConsumerRecords<byte[], byte[]> msgs) {
    for (ConsumerRecord<byte[], byte[]> msg : msgs) {
        log.trace("Consuming message with key {}, value {}", msg.key(), msg.value());
        SchemaAndValue keyAndSchema = keyConverter.toConnectData(msg.topic(), msg.key());
        SchemaAndValue valueAndSchema = valueConverter.toConnectData(msg.topic(), msg.value());
        SinkRecord record = new SinkRecord(msg.topic(), msg.partition(),
                keyAndSchema.schema(), keyAndSchema.value(),
                valueAndSchema.schema(), valueAndSchema.value(),
                msg.offset(),
                ConnectUtils.checkAndConvertTimestamp(msg.timestamp()),
                msg.timestampType());
        record = transformationChain.apply(record);
        if (record != null) {
            messageBatch.add(record);
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:WorkerSinkTask.java

示例5: processMessage

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
private static boolean processMessage(ConsumerRecord<ByteBuffer, ByteBuffer> rawMsg) {
    int msg = rawMsg.value().getInt();
    // 10% of the messages are dropped
    if (random.nextInt(10) != 0) {
        // Sleeping up to 2.5 seconds
        LOG.info("Processing message: " + msg);
        try {
            Thread.sleep(random.nextInt(25)*100L);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        Integer previous = processedMessages.put(msg, msg);
        if (previous != null) {
            LOG.warn(String.format("Message %d was already processed!", msg));
        }

        int total = totalProcessed.incrementAndGet();
        LOG.info(String.format("Done processing message: %d. Total processed: %d.", msg, total));

        return true;
    } else {
        LOG.info("Dropping message: " + msg);
        return false;
    }
}
 
开发者ID:softwaremill,项目名称:kmq,代码行数:27,代码来源:StandaloneProcessor.java

示例6: receive

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
public List<String> receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    List<String> buffer = new ArrayList<String>();
    String msg = "";
    while (true) {
        System.err.println("consumer receive------------------");
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record.value());
        }
        consumer.close();
        return buffer;
    }


}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:18,代码来源:KafkaConsumers.java

示例7: onMessage

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Override
public void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment) {
    logger.info(cachingDateFormatter.format(System.currentTimeMillis()) + "-" + data.toString());
    // router topic
    String topic = data.topic();

    MessageHandler<K, V> messageHandler = messageHandlers.get(topic);
    if (null == messageHandler) {
        // TODO:需要处理 未找到注册的MessageHandler
        throw new RuntimeException("not found MessagHandler Instance");
    }
    // 获取运行时泛型
    Type messageType = ((ParameterizedType) messageHandler.getClass().getGenericInterfaces()[0]).getActualTypeArguments()[0];
    // create MessageChannel , MessageBuilder
    messageChannel = new KafkaMessageChannel(acknowledgment);
    messageChannel.putMessage(data);
    Message message = MessageBuilder.build(messageType, messageChannel).createMessage(data.key());
    messageHandler.handler(message);
}
 
开发者ID:ailang323,项目名称:tankms,代码行数:20,代码来源:KafkaMessageListenerAdapter.java

示例8: main

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = KafkaConsumerUtil.createConsumer();
        consumer.subscribe(Arrays.asList(TOPIC));
        boolean flag = true;

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);

            if (flag) {
                Set<TopicPartition> assignments = consumer.assignment();
                assignments.forEach(topicPartition ->
                        consumer.seekToBeginning(
                                Arrays.asList(topicPartition)));
                flag = false;
            }

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }
 
开发者ID:jeqo,项目名称:post-kafka-rewind-consumer-offset,代码行数:22,代码来源:KafkaConsumerFromBeginning.java

示例9: createFlowCommandBoltTest

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
public void createFlowCommandBoltTest() throws Exception {
    ConsumerRecord<String, String> record;
    String flowId = UUID.randomUUID().toString();

    createFlow(flowId);

    record = cacheConsumer.pollMessage();
    assertNotNull(record);
    assertNotNull(record.value());

    InfoData infoData = objectMapper.readValue(record.value(), InfoData.class);
    ImmutablePair<Flow, Flow> flow = ((FlowInfoData) infoData).getPayload();
    assertNotNull(flow);

    record = nbConsumer.pollMessage();
    assertNotNull(record);
    assertNotNull(record.value());

    InfoMessage infoMessage = objectMapper.readValue(record.value(), InfoMessage.class);
    FlowResponse response = (FlowResponse) infoMessage.getData();
    assertNotNull(response);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:24,代码来源:FlowTopologyTest.java

示例10: expectOnePoll

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
    // Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
    // returning empty data, we return one record. The expectation is that the data will be ignored by the
    // response behavior specified using the return value of this method.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(
            new IAnswer<ConsumerRecords<byte[], byte[]>>() {
                @Override
                public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
                    // "Sleep" so time will progress
                    time.sleep(1L);
                    ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(
                            Collections.singletonMap(
                                    new TopicPartition(TOPIC, PARTITION),
                                    Arrays.asList(
                                            new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE)
                                    )));
                    recordsReturned++;
                    return records;
                }
            });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:WorkerSinkTaskThreadedTest.java

示例11: main

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
    Map<String, Object> kafkaParams = new HashMap<>();
    kafkaParams.put("bootstrap.servers", "localhost:9092");
    kafkaParams.put("key.deserializer", StringDeserializer.class);
    kafkaParams.put("value.deserializer", StringDeserializer.class);
    kafkaParams.put("group.id", "use_a_separate_group_id_for_each_stream");
    kafkaParams.put("auto.offset.reset", "latest");
    kafkaParams.put("enable.auto.commit", false);

    Collection<String> topics = Arrays.asList("data-in");

    SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaSpark");
    JavaStreamingContext streamingContext = new JavaStreamingContext(sparkConf, Durations.seconds(5));

    final JavaInputDStream<ConsumerRecord<String, String>> stream =
            KafkaUtils.createDirectStream(
                    streamingContext,
                    LocationStrategies.PreferConsistent(),
                    ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)
            );

    JavaPairDStream<String, Integer>  countOfMessageKeys = stream
            .map((ConsumerRecord<String, String> record) -> record.key())
            .mapToPair((String s) -> new Tuple2<>(s, 1))
            .reduceByKey((Integer i1, Integer i2)-> i1 + i2);

    countOfMessageKeys.print();

    // Start the computation
    streamingContext.start();
    streamingContext.awaitTermination();
}
 
开发者ID:ebi-wp,项目名称:kafka-streams-api-websockets,代码行数:33,代码来源:SparkConsume.java

示例12: dumpFlowsTopologyEngineBoltTest

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
@Ignore
public void dumpFlowsTopologyEngineBoltTest() throws Exception {
    ConsumerRecord<String, String> nbRecord;
    String flowId = UUID.randomUUID().toString();

    List<Flow> payload = dumpFlowCommand(flowId);

    nbRecord = nbConsumer.pollMessage();
    assertNotNull(nbRecord);
    assertNotNull(nbRecord.value());

    InfoMessage response = objectMapper.readValue(nbRecord.value(), InfoMessage.class);
    assertNotNull(response);

    FlowsResponse responseData = (FlowsResponse) response.getData();
    assertNotNull(responseData);
    assertEquals(payload, responseData.getPayload());
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:20,代码来源:FlowTopologyTest.java

示例13: cacheReceivesWfmTopologyUpdatesAndSendsToTopologyEngine

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
public void cacheReceivesWfmTopologyUpdatesAndSendsToTopologyEngine() throws Exception {
    System.out.println("Network Update Test");

    sendSwitchUpdate(sw);

    ConsumerRecord<String, String> record = teConsumer.pollMessage();

    assertNotNull(record);
    assertNotNull(record.value());

    InfoMessage infoMessage = objectMapper.readValue(record.value(), InfoMessage.class);
    SwitchInfoData data = (SwitchInfoData) infoMessage.getData();
    assertNotNull(data);

    assertEquals(sw, data);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:18,代码来源:CacheTopologyTest.java

示例14: getProcessor

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
public static Consumer.Processor<String, Task> getProcessor() {
    return new Consumer.Processor<String, Task>() {
        @Override
        protected Boolean process(ConsumerRecords<String, Task> records) {
            for (ConsumerRecord<String, Task> record : records) {
                if (record.key() == null) {
                    log.error("Wrong task encountered. Task meta: {}", record);
                    continue;
                }
                IRequestServer requestServer = provider.getRequestServer(record.key());
                if (requestServer == null) {
                    log.error("Request Server not found for request type: {}", record.key());
                    continue;
                }
                log.info("Request server found: {}", requestServer.getClass());
                try {
                    requestServer.serve(record.value());
                } catch (ServiceException se) {
                    log.error("Service Exception occurred while serving request. Error: ", se);
                    continue;
                }
            }
            return true;
        }
    };
}
 
开发者ID:dixantmittal,项目名称:scalable-task-scheduler,代码行数:27,代码来源:RequestConsumers.java

示例15: ctrlDumpHandler

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入依赖的package包/类
@Test
@Ignore // TODO: ignoring on 2018.01.04 - failing in GCP but not Mac - needs troubleshooting
public void ctrlDumpHandler() throws Exception {
    CtrlRequest request = new CtrlRequest(
            "cachetopology/*", new RequestData("dump"), 1, "dump-correlation-id", Destination.WFM_CTRL);

    sendMessage(request, topology.getConfig().getKafkaCtrlTopic());

    ConsumerRecord<String, String> raw = ctrlConsumer.pollMessage();

    assertNotNull(raw);   // TODO: FAILED
    assertNotNull(raw.value());

    Message responseGeneric = objectMapper.readValue(raw.value(), Message.class);
    CtrlResponse response = (CtrlResponse) responseGeneric;
    ResponseData payload = response.getData();

    assertEquals(request.getCorrelationId(), response.getCorrelationId());
    assertEquals(CacheTopology.BOLT_ID_CACHE, payload.getComponent());
    assertTrue(payload instanceof DumpStateResponseData);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:22,代码来源:CacheTopologyTest.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecord类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。