本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecord.value方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecord.value方法的具体用法?Java ConsumerRecord.value怎么用?Java ConsumerRecord.value使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.consumer.ConsumerRecord
的用法示例。
在下文中一共展示了ConsumerRecord.value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void run() {
while (true) {
ConsumerRecords<byte[], ChangeDataRecord> changeRecords = consumer.poll(KAFKA_CONSUMER_POLL_TIMEOUT);
for (ConsumerRecord<byte[], ChangeDataRecord> consumerRecord : changeRecords) {
// The ChangeDataRecord contains all the changes made to a document
ChangeDataRecord changeDataRecord = consumerRecord.value();
String documentId = changeDataRecord.getId().getString();
// Handle 'RECORD_INSERT'
if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_INSERT && this.onInsert != null) {
this.onInsert.handle(documentId);
}
// Handle 'RECORD_DELETE'
if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_DELETE && this.onDelete != null) {
this.onDelete.handle(documentId);
}
}
}
}
示例2: ack
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void ack(Object msgId) {
try {
if (msgId != null && ConsumerRecord.class.isInstance(msgId)) {
flowedMsgCount--;
ConsumerRecord<String, byte[]> record = getMessageId(msgId);
String recordString = new String(record.value());
JSONObject jsonObject = JSONObject.parseObject(recordString);
String dataSourceInfo = jsonObject.getString(DataPullConstants.DATA_SOURCE_INFO);
String dsKey = FullPullHelper.getDataSourceKey(JSONObject.parseObject(dataSourceInfo));
String splitIndex = jsonObject.getString(DataPullConstants.DATA_CHUNK_SPLIT_INDEX);
LOG.info("Acked Record offset--------is:{}, {}:split index is {}", record.offset(), dsKey, splitIndex);
}
processedCount++;
super.ack(msgId);
} catch (Exception e) {
LOG.error("DataPullingSpout:ack throwed exception!", e);
}
}
示例3: computeNext
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
protected KeyMessage<K,V> computeNext() {
if (iterator == null || !iterator.hasNext()) {
try {
long timeout = MIN_POLL_MS;
ConsumerRecords<K, V> records;
while ((records = consumer.poll(timeout)).isEmpty()) {
timeout = Math.min(MAX_POLL_MS, timeout * 2);
}
iterator = records.iterator();
} catch (Exception e) {
consumer.close();
return endOfData();
}
}
ConsumerRecord<K,V> mm = iterator.next();
return new KeyMessageImpl<>(mm.key(), mm.value());
}
示例4: decodePayload
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static <K, V> Payload<K, V> decodePayload(Deserializer<V> valueDeserializer, ConsumerRecord<K, byte[]> originConsumerRecord) {
TracingHeader tracingHeader = null;
ConsumerRecord<K, V> dataRecord = null;
boolean sampled = false;
byte[] data = originConsumerRecord.value();
byte[] vData = null;
if (data.length <= HEADER_LENGTH) {
vData = data;
} else {
ByteBuffer byteBuf = ByteBuffer.wrap(data);
short magic = byteBuf.getShort(0);
short tpLen = byteBuf.getShort(2);
if (magic == MAGIC && tpLen == TracingHeader.LENGTH) {
byte[] tpBytes = new byte[tpLen];
System.arraycopy(byteBuf.array(), HEADER_LENGTH, tpBytes, 0, tpLen);
tracingHeader = TracingHeader.fromBytes(tpBytes);
sampled = true;
int dataOffset = tpLen + HEADER_LENGTH;
vData = new byte[byteBuf.array().length - dataOffset];
System.arraycopy(byteBuf.array(), dataOffset, vData, 0, vData.length);
} else {
vData = data;
}
}
dataRecord = new ConsumerRecord<>(originConsumerRecord.topic(),
originConsumerRecord.partition(), originConsumerRecord.offset(),
originConsumerRecord.key(), valueDeserializer.deserialize(originConsumerRecord.topic(), vData));
return new Payload<>(tracingHeader, dataRecord, sampled);
}
示例5: consume
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void consume(ConsumerRecords<String, String> records) {
for (ConsumerRecord<String, String> record : records) {
String msg = record.value();
logger.info(msg);
}
}
示例6: transform
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public boolean transform(ConsumerRecord<String, String> change, Row row)
throws BiremeException {
MaxwellRecord record = new MaxwellRecord(change.value());
if (filter(record)) {
return false;
}
Table table = cxt.tablesInfo.get(getMappedTableName(record));
row.type = record.type;
row.produceTime = record.produceTime;
row.originTable = getOriginTableName(record);
row.mappedTable = getMappedTableName(record);
row.keys = formatColumns(record, table, table.keyNames, false);
if (row.type == RowType.INSERT || row.type == RowType.UPDATE) {
row.tuple = formatColumns(record, table, table.columnName, false);
}
if (row.type == RowType.UPDATE) {
row.oldKeys = formatColumns(record, table, table.keyNames, true);
if (row.keys.equals(row.oldKeys)) {
row.oldKeys = null;
}
}
return true;
}
示例7: readLine
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public String readLine() {
String line = "";
ConsumerRecords<String, String> records = this.consumer.poll(1000);
Iterator<ConsumerRecord<String, String>> iterator = records.iterator();
if (iterator.hasNext()) {
ConsumerRecord<String, String> record = iterator.next();
logger.info(String.format("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value()));
line = record.value();
}
return line;
}
示例8: run
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override public void run() {
consumer.subscribe(Collections.singletonList(topic));
try {
while (running) {
ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);
List<MessageMetadata> metadatas = new ArrayList<>(records.count());
for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
MessageMetadata metadata = (MessageMetadata)serializer.deserialize(record.key());
TransactionWrapper wrapper = new TransactionWrapper(metadata, record.value());
metadatas.add(metadata);
buffer.put(metadata.getTransactionId(), wrapper);
}
Collections.sort(metadatas, new MetadataComparator());
List<Long> transactionsToCommit = leadProxy.notifyTransactionsRead(consumerId, metadatas);
if (!transactionsToCommit.isEmpty()) {
committer.commit(transactionsToCommit, deserializerClosure, doneNotifier);
}
// TODO commit transaction only after applying in ignite
consumer.commitSync();
}
}
finally {
consumer.unsubscribe();
}
}
示例9: processSingleRecord
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void processSingleRecord(List<Long> txIds, ConsumerRecord<ByteBuffer, ByteBuffer> record) {
long txId = record.timestamp();
boolean found = txIds.remove(txId);
if (found) {
ProducerRecord<ByteBuffer, ByteBuffer> producerRecord =
new ProducerRecord<>(clusterConfig.getGapTopic(), record.key(), record.value());
producer.send(producerRecord);
}
}
示例10: consume
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private List<KafkaResult> consume() {
final List<KafkaResult> kafkaResultList = new ArrayList<>();
final ConsumerRecords consumerRecords = kafkaConsumer.poll(clientConfig.getPollTimeoutMs());
logger.info("Consumed {} records", consumerRecords.count());
final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
while (recordIterator.hasNext()) {
// Get next record
final ConsumerRecord consumerRecord = recordIterator.next();
// Convert to KafkaResult.
final KafkaResult kafkaResult = new KafkaResult(
consumerRecord.partition(),
consumerRecord.offset(),
consumerRecord.timestamp(),
consumerRecord.key(),
consumerRecord.value()
);
// Add to list.
kafkaResultList.add(kafkaResult);
}
// Commit offsets
commit();
return kafkaResultList;
}
示例11: TransactionWrapper
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public TransactionWrapper(ConsumerRecord<ByteBuffer, ByteBuffer> record, TransactionMetadata deserializedMetadata) {
GridArgumentCheck.notNull(deserializedMetadata, "metadata cannot be null");
this.value = record.value();
this.key = record.key();
this.topicPartition = new TopicPartition(record.topic(), record.partition());
this.offset = record.offset();
this.deserializedMetadata = deserializedMetadata;
}
示例12: extractAndConvertValue
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
Object value = record.value();
if (value instanceof PublishedEventWrapper)
try {
PublishedEventWrapper eventWrapper = (PublishedEventWrapper) value;
userContext.extractUserContext(eventWrapper.getUserContext());
operationContext.switchContext(eventWrapper.getOpId());
return objectMapper.readValue(eventWrapper.getEvent(), TypeFactory.rawClass(type));
} catch (IOException e) {
throw new SerializationException(e);
}
else
return super.extractAndConvertValue(record, type);
}
示例13: processConsumerRecords
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
/**
* @param record
*/
private void processConsumerRecords(final ConsumerRecord<String, Serializable> record) {
final MessageHandler messageHandler = topicHandlers.get(record.topic());
consumerContext.saveOffsetsBeforeProcessed(record.topic(), record.partition(),
record.offset());
//兼容没有包装的情况
final DefaultMessage message = record.value() instanceof DefaultMessage
? (DefaultMessage) record.value()
: new DefaultMessage((Serializable) record.value());
//第一阶段处理
messageHandler.p1Process(message);
//第二阶段处理
processExecutor.submit(new Runnable() {
@Override
public void run() {
try {
messageHandler.p2Process(message);
//
consumerContext.saveOffsetsAfterProcessed(record.topic(),
record.partition(), record.offset());
} catch (Exception e) {
boolean processed = messageHandler.onProcessError(message);
if (processed == false) {
errorMessageProcessor.submit(message, messageHandler);
}
logger.error("[" + messageHandler.getClass().getSimpleName()
+ "] process Topic[" + record.topic() + "] error",
e);
}
}
});
}
示例14: producerRecordFromConsumerRecord
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private static ProducerRecord<String, String> producerRecordFromConsumerRecord(String topic, ConsumerRecord<String, String> record) {
return new ProducerRecord<>(topic, record.key(), record.value());
}
示例15: extract
import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public long extract(ConsumerRecord<Object, Object> record, long previousTimestamp) {
if (record.value() instanceof Integer) return Long.valueOf(String.valueOf(record.value()));
return record.timestamp();
}