当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecords类代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecords的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecords类的具体用法?Java ConsumerRecords怎么用?Java ConsumerRecords使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ConsumerRecords类属于org.apache.kafka.clients.consumer包,在下文中一共展示了ConsumerRecords类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: receive

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public String receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.err.println(buffer.size() + "----->" + record);

        }
        if (buffer.size() >= minBatchSize) {
            writeFileToHadoop(buffer);//先把buffer写入文件中
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:21,代码来源:KafkaConsumerForHive.java

示例2: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = createConsumer();
    consumer.subscribe(Arrays.asList(TOPIC));

    boolean flag = true;


    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        if (flag) {
            Set<TopicPartition> assignments = consumer.assignment();
            assignments.forEach(topicPartition ->
                    consumer.seek(
                            topicPartition,
                            90));
            flag = false;
        }


        for (ConsumerRecord<String, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
    }


}
 
开发者ID:jeqo,项目名称:post-kafka-rewind-consumer-offset,代码行数:26,代码来源:KafkaConsumerFromOffset.java

示例3: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
@Override
public void run() {
    while (true) {

        ConsumerRecords<byte[], ChangeDataRecord> changeRecords = consumer.poll(KAFKA_CONSUMER_POLL_TIMEOUT);
        for (ConsumerRecord<byte[], ChangeDataRecord> consumerRecord : changeRecords) {

            // The ChangeDataRecord contains all the changes made to a document
            ChangeDataRecord changeDataRecord = consumerRecord.value();
            String documentId = changeDataRecord.getId().getString();

            // Handle 'RECORD_INSERT'
            if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_INSERT && this.onInsert != null) {
                this.onInsert.handle(documentId);
            }

            // Handle 'RECORD_DELETE'
            if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_DELETE && this.onDelete != null) {
                this.onDelete.handle(documentId);
            }

        }
    }
}
 
开发者ID:mapr-demos,项目名称:mapr-music,代码行数:25,代码来源:CdcStatisticService.java

示例4: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public void run() {
    try {
        printJson(new StartupComplete());
        consumer.subscribe(Collections.singletonList(topic), this);

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
            Map<TopicPartition, OffsetAndMetadata> offsets = onRecordsReceived(records);

            if (!useAutoCommit) {
                if (useAsyncCommit)
                    consumer.commitAsync(offsets, this);
                else
                    commitSync(offsets);
            }
        }
    } catch (WakeupException e) {
        // ignore, we are closing
    } finally {
        consumer.close();
        printJson(new ShutdownComplete());
        shutdownLatch.countDown();
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:25,代码来源:VerifiableConsumer.java

示例5: receive

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public List<String> receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    List<String> buffer = new ArrayList<String>();
    String msg = "";
    while (true) {
        System.err.println("consumer receive------------------");
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record.value());
        }
        consumer.close();
        return buffer;
    }


}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:18,代码来源:KafkaConsumers.java

示例6: run

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
@Override
public void run() {
    try {
        consumer.subscribe(Collections.singletonList(Config.getProperty("input_topic")));
        while (!closed.get()) {
            ConsumerRecords<String, String> records = consumer.poll(3000);
            try {
                consumer.commitSync(); // commit
            } catch (Exception ignored) {
            }
            if (records.count() > 0) {
                handler.consume(records);
            }
        }
    } catch (WakeupException e) {
        if (!closed.get()) throw e;
    } finally {
        consumer.close();
    }
}
 
开发者ID:Zephery,项目名称:newblog,代码行数:21,代码来源:KafkaConsumerRunner.java

示例7: fillRowSet

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void fillRowSet(RowSet rowSet) throws BiremeException {
  CommitCallback callback = changeSet.callback;
  HashMap<String, Long> offsets = ((KafkaCommitCallback) callback).partitionOffset;
  Row row = null;

  for (ConsumerRecord<String, String> change :
      (ConsumerRecords<String, String>) changeSet.changes) {
    row = new Row();

    if (!transform(change, row)) {
      continue;
    }

    addToRowSet(row, rowSet);
    offsets.put(change.topic() + "+" + change.partition(), change.offset());
    callback.setNewestRecord(row.produceTime);
  }

  callback.setNumOfTables(rowSet.rowBucket.size());
  rowSet.callback = callback;
}
 
开发者ID:HashDataInc,项目名称:bireme,代码行数:24,代码来源:KafkaPipeLine.java

示例8: handleMessages

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
/**
 * 处理通过kafka consumer获取到的一批消息
 *
 * @param records 待处理的消息集合
 */
public void handleMessages(ConsumerRecords<String, byte[]> records) {

    // 按记录进行处理
    for (ConsumerRecord<String, byte[]> record : records) {
        // 过滤掉暂停的topic的消息
        if (consumerListener.filterPausedTopic(record.topic())) {
            listener.increaseFlowSize(record.serializedValueSize());
            try {
                this.chooseProcessor(record.key(), record.topic()).process(record);
            } catch (Exception e) {
                logger.error("sport process error", e);
                consumerListener.seek(record);
                break;
            }
        } else {
            listener.reduceFlowSize(record.serializedValueSize());
            consumerListener.syncOffset(record);
            logger.info("The record of topic {} was skipped whose offset is {}", record.topic(), record.offset());
        }
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:27,代码来源:AbstractMessageHandler.java

示例9: readKafkaTopic

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
    try {
        Properties properties = PropertiesUtils.getProps("consumer.properties");
        properties.setProperty("client.id","readKafkaTopic");
        properties.setProperty("group.id","readKafkaTopic");
        //properties.setProperty("bootstrap.servers", "localhost:9092");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        String topic = map.get("topic").toString();
        //System.out.println("topic="+topic);
        TopicPartition topicPartition = new TopicPartition(topic, 0);
        List<TopicPartition> topics = Arrays.asList(topicPartition);
        consumer.assign(topics);
        consumer.seekToEnd(topics);
        long current = consumer.position(topicPartition);
        long end = current;
        current -= 1000;
        if(current < 0) current = 0;
        consumer.seek(topicPartition, current);
        List<String> result = new ArrayList<>();
        while (current < end) {
            //System.out.println("topic position = "+current);
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                result.add(record.value());
                //System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
            }
            current = consumer.position(topicPartition);
        }
        consumer.close();
        return Response.ok().entity(result).build();
    } catch (Exception e) {
        logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
        return Response.status(204).entity(new Result(-1, e.getMessage())).build();
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:38,代码来源:DataTableResource.java

示例10: pollRequests

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
/**
 * Get the next batch of records by polling.
 * @return Next batch of records or null if no records available.
 */
private ConsumerRecords<byte[], byte[]> pollRequests() {
    ConsumerRecords<byte[], byte[]> records = null;

    try {
        records = consumer.poll(pollTimeMs);
    } catch (final InvalidOffsetException e) {
        resetInvalidOffsets(e);
    }

    if (rebalanceException != null) {
        if (!(rebalanceException instanceof ProducerFencedException)) {
            throw new StreamsException(logPrefix + " Failed to rebalance.", rebalanceException);
        }
    }

    return records;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:StreamThread.java

示例11: readKeyValues

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
/**
 * Returns up to `maxMessages` by reading via the provided consumer (the topic(s) to read from are
 * already configured in the consumer).
 *
 * @param topic          Kafka topic to read messages from
 * @param consumerConfig Kafka consumer configuration
 * @param maxMessages    Maximum number of messages to read via the consumer
 * @return The KeyValue elements retrieved via the consumer
 */
public static <K, V> List<KeyValue<K, V>> readKeyValues(String topic, Properties consumerConfig, int maxMessages) {
  KafkaConsumer<K, V> consumer = new KafkaConsumer<>(consumerConfig);
  consumer.subscribe(Collections.singletonList(topic));
  int pollIntervalMs = 100;
  int maxTotalPollTimeMs = 2000;
  int totalPollTimeMs = 0;
  List<KeyValue<K, V>> consumedValues = new ArrayList<>();
  while (totalPollTimeMs < maxTotalPollTimeMs && continueConsuming(consumedValues.size(), maxMessages)) {
    totalPollTimeMs += pollIntervalMs;
    ConsumerRecords<K, V> records = consumer.poll(pollIntervalMs);
    for (ConsumerRecord<K, V> record : records) {
      consumedValues.add(new KeyValue<>(record.key(), record.value()));
    }
  }
  consumer.close();
  return consumedValues;
}
 
开发者ID:kaiwaehner,项目名称:kafka-streams-machine-learning-examples,代码行数:27,代码来源:IntegrationTestUtils.java

示例12: expectOnePoll

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
    // Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
    // returning empty data, we return one record. The expectation is that the data will be ignored by the
    // response behavior specified using the return value of this method.
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(
            new IAnswer<ConsumerRecords<byte[], byte[]>>() {
                @Override
                public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
                    // "Sleep" so time will progress
                    time.sleep(1L);
                    ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(
                            Collections.singletonMap(
                                    new TopicPartition(TOPIC, PARTITION),
                                    Arrays.asList(
                                            new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE)
                                    )));
                    recordsReturned++;
                    return records;
                }
            });
    EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
    EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
    sinkTask.put(EasyMock.anyObject(Collection.class));
    return EasyMock.expectLastCall();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:WorkerSinkTaskThreadedTest.java

示例13: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();

    props.put("bootstrap.servers", "192.168.77.7:9092,192.168.77.7:9093,192.168.77.7:9094");
    props.put("group.id", "test-group-id");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

    consumer.subscribe(Collections.singletonList("test"));

    System.out.println("Subscribed to topic test");

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records)

            System.out.println(String.format("offset = %s, key = %s, value = %s", record.offset(), record.key(), record.value()));
    }
}
 
开发者ID:bpark,项目名称:kafka-docker-demo,代码行数:21,代码来源:ConsumerDemo.java

示例14: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
public static void main(String[] args) {
	// TODO Auto-generated method stub
	Properties props = new Properties();
	props.put("bootstrap.servers", "123.207.61.225:9092");
	props.put("group.id", "test-consumer-group");
	props.put("enable.auto.commit", "true");
	props.put("auto.commit.interval.ms", "1000");
	props.put("session.timeout.ms", "30000");
	props.put("log.level", "info");
	props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	consumer = new KafkaConsumer<String, String>(props);
	consumer.subscribe(Arrays.asList("test"));
	while (true) {
		ConsumerRecords<String, String> records = consumer.poll(100);
		for (ConsumerRecord<String, String> record : records)
			System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
	}
}
 
开发者ID:netkiller,项目名称:ipo,代码行数:20,代码来源:Kafka.java

示例15: retrieveOneMessage

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入依赖的package包/类
private static ConsumerRecord<byte[], byte[]> retrieveOneMessage(KafkaConsumer kafkaConsumer,
                                                                 TopicPartition topicPartition,
                                                                 long offset) {
  kafkaConsumer.seek(topicPartition, offset);
  ConsumerRecords<byte[], byte[]> records;
  ConsumerRecord<byte[], byte[]> record = null;
  while (record == null) {
    records = kafkaConsumer.poll(100);
    if (!records.isEmpty()) {
      LOG.debug("records.count() = {}", records.count());
      List<ConsumerRecord<byte[], byte[]>> reclist = records.records(topicPartition);
      if (reclist != null && !reclist.isEmpty()) {
        record = reclist.get(0);
        break;
      } else {
        LOG.info("recList is null or empty");
      }
    }
  }
  return record;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:22,代码来源:ReplicaStatsManager.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecords类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。