当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecord.value方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecord.value方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecord.value方法的具体用法?Java ConsumerRecord.value怎么用?Java ConsumerRecord.value使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecord的用法示例。


在下文中一共展示了ConsumerRecord.value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void run() {
    while (true) {

        ConsumerRecords<byte[], ChangeDataRecord> changeRecords = consumer.poll(KAFKA_CONSUMER_POLL_TIMEOUT);
        for (ConsumerRecord<byte[], ChangeDataRecord> consumerRecord : changeRecords) {

            // The ChangeDataRecord contains all the changes made to a document
            ChangeDataRecord changeDataRecord = consumerRecord.value();
            String documentId = changeDataRecord.getId().getString();

            // Handle 'RECORD_INSERT'
            if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_INSERT && this.onInsert != null) {
                this.onInsert.handle(documentId);
            }

            // Handle 'RECORD_DELETE'
            if (changeDataRecord.getType() == ChangeDataRecordType.RECORD_DELETE && this.onDelete != null) {
                this.onDelete.handle(documentId);
            }

        }
    }
}
 
开发者ID:mapr-demos,项目名称:mapr-music,代码行数:25,代码来源:CdcStatisticService.java

示例2: ack

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void ack(Object msgId) {
    try {
        if (msgId != null && ConsumerRecord.class.isInstance(msgId)) {
            flowedMsgCount--;

            ConsumerRecord<String, byte[]> record = getMessageId(msgId);
            String recordString = new String(record.value());
            JSONObject jsonObject = JSONObject.parseObject(recordString);
            String dataSourceInfo = jsonObject.getString(DataPullConstants.DATA_SOURCE_INFO);
            String dsKey = FullPullHelper.getDataSourceKey(JSONObject.parseObject(dataSourceInfo));
            String splitIndex = jsonObject.getString(DataPullConstants.DATA_CHUNK_SPLIT_INDEX);
            LOG.info("Acked Record offset--------is:{}, {}:split index is {}", record.offset(), dsKey, splitIndex);
        }

        processedCount++;
        super.ack(msgId);
    } catch (Exception e) {
        LOG.error("DataPullingSpout:ack throwed exception!", e);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:22,代码来源:DataPullingSpout.java

示例3: computeNext

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
protected KeyMessage<K,V> computeNext() {
  if (iterator == null || !iterator.hasNext()) {
    try {
      long timeout = MIN_POLL_MS;
      ConsumerRecords<K, V> records;

      while ((records = consumer.poll(timeout)).isEmpty()) {
        timeout = Math.min(MAX_POLL_MS, timeout * 2);
      }
      iterator = records.iterator();
    } catch (Exception e) {
      consumer.close();
      return endOfData();
    }
  }
  ConsumerRecord<K,V> mm = iterator.next();
  return new KeyMessageImpl<>(mm.key(), mm.value());
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:20,代码来源:ConsumeDataIterator.java

示例4: decodePayload

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static <K, V> Payload<K, V> decodePayload(Deserializer<V> valueDeserializer, ConsumerRecord<K, byte[]> originConsumerRecord) {
    TracingHeader tracingHeader = null;
    ConsumerRecord<K, V> dataRecord = null;
    boolean sampled = false;
    byte[] data = originConsumerRecord.value();
    byte[] vData = null;
    if (data.length <= HEADER_LENGTH) {
        vData = data;
    } else {
        ByteBuffer byteBuf = ByteBuffer.wrap(data);
        short magic = byteBuf.getShort(0);
        short tpLen = byteBuf.getShort(2);
        if (magic == MAGIC && tpLen == TracingHeader.LENGTH) {
            byte[] tpBytes = new byte[tpLen];
            System.arraycopy(byteBuf.array(), HEADER_LENGTH, tpBytes, 0, tpLen);
            tracingHeader = TracingHeader.fromBytes(tpBytes);
            sampled = true;
            int dataOffset = tpLen + HEADER_LENGTH;
            vData = new byte[byteBuf.array().length - dataOffset];
            System.arraycopy(byteBuf.array(), dataOffset, vData, 0, vData.length);
        } else {
            vData = data;
        }
    }
    dataRecord = new ConsumerRecord<>(originConsumerRecord.topic(),
            originConsumerRecord.partition(), originConsumerRecord.offset(),
            originConsumerRecord.key(), valueDeserializer.deserialize(originConsumerRecord.topic(), vData));
    return new Payload<>(tracingHeader, dataRecord, sampled);
}
 
开发者ID:YanXs,项目名称:nighthawk,代码行数:30,代码来源:PayloadCodec.java

示例5: consume

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void consume(ConsumerRecords<String, String> records) {
    for (ConsumerRecord<String, String> record : records) {
        String msg = record.value();
        logger.info(msg);
    }
}
 
开发者ID:Zephery,项目名称:newblog,代码行数:8,代码来源:KafkaConsumerHandlerImpl.java

示例6: transform

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public boolean transform(ConsumerRecord<String, String> change, Row row)
    throws BiremeException {
  MaxwellRecord record = new MaxwellRecord(change.value());

  if (filter(record)) {
    return false;
  }

  Table table = cxt.tablesInfo.get(getMappedTableName(record));

  row.type = record.type;
  row.produceTime = record.produceTime;
  row.originTable = getOriginTableName(record);
  row.mappedTable = getMappedTableName(record);
  row.keys = formatColumns(record, table, table.keyNames, false);

  if (row.type == RowType.INSERT || row.type == RowType.UPDATE) {
    row.tuple = formatColumns(record, table, table.columnName, false);
  }

  if (row.type == RowType.UPDATE) {
    row.oldKeys = formatColumns(record, table, table.keyNames, true);

    if (row.keys.equals(row.oldKeys)) {
      row.oldKeys = null;
    }
  }

  return true;
}
 
开发者ID:HashDataInc,项目名称:bireme,代码行数:32,代码来源:MaxwellPipeLine.java

示例7: readLine

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public String readLine() {
	String line = "";
	ConsumerRecords<String, String> records = this.consumer.poll(1000);
	Iterator<ConsumerRecord<String, String>> iterator = records.iterator();
	if (iterator.hasNext()) {
		ConsumerRecord<String, String> record = iterator.next();
		logger.info(String.format("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value()));
		line = record.value();
	}
	return line;
}
 
开发者ID:netkiller,项目名称:ipo,代码行数:13,代码来源:KafkaInput.java

示例8: run

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override public void run() {
    consumer.subscribe(Collections.singletonList(topic));
    try {
        while (running) {
            ConsumerRecords<ByteBuffer, ByteBuffer> records = consumer.poll(POLL_TIMEOUT);
            List<MessageMetadata> metadatas = new ArrayList<>(records.count());

            for (ConsumerRecord<ByteBuffer, ByteBuffer> record : records) {
                MessageMetadata metadata = (MessageMetadata)serializer.deserialize(record.key());
                TransactionWrapper wrapper = new TransactionWrapper(metadata, record.value());

                metadatas.add(metadata);
                buffer.put(metadata.getTransactionId(), wrapper);
            }
            Collections.sort(metadatas, new MetadataComparator());
            List<Long> transactionsToCommit = leadProxy.notifyTransactionsRead(consumerId, metadatas);

            if (!transactionsToCommit.isEmpty()) {
                committer.commit(transactionsToCommit, deserializerClosure, doneNotifier);
            }
            // TODO commit transaction only after applying in ignite
            consumer.commitSync();
        }
    }
    finally {
        consumer.unsubscribe();
    }
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:29,代码来源:SubscriberConsumer.java

示例9: processSingleRecord

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void processSingleRecord(List<Long> txIds, ConsumerRecord<ByteBuffer, ByteBuffer> record) {
    long txId = record.timestamp();
    boolean found = txIds.remove(txId);
    if (found) {
        ProducerRecord<ByteBuffer, ByteBuffer> producerRecord =
                new ProducerRecord<>(clusterConfig.getGapTopic(), record.key(), record.value());
        producer.send(producerRecord);
    }
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:10,代码来源:ReconcilerImpl.java

示例10: consume

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private List<KafkaResult> consume() {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords consumerRecords = kafkaConsumer.poll(clientConfig.getPollTimeoutMs());

    logger.info("Consumed {} records", consumerRecords.count());
    final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
    while (recordIterator.hasNext()) {
        // Get next record
        final ConsumerRecord consumerRecord = recordIterator.next();

        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit();
    return kafkaResultList;
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:28,代码来源:WebKafkaConsumer.java

示例11: TransactionWrapper

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public TransactionWrapper(ConsumerRecord<ByteBuffer, ByteBuffer> record, TransactionMetadata deserializedMetadata) {
    GridArgumentCheck.notNull(deserializedMetadata, "metadata cannot be null");
    this.value = record.value();
    this.key = record.key();
    this.topicPartition = new TopicPartition(record.topic(), record.partition());
    this.offset = record.offset();
    this.deserializedMetadata = deserializedMetadata;
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:9,代码来源:TransactionWrapper.java

示例12: extractAndConvertValue

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
    Object value = record.value();
    if (value instanceof PublishedEventWrapper)
        try {
            PublishedEventWrapper eventWrapper = (PublishedEventWrapper) value;
            userContext.extractUserContext(eventWrapper.getUserContext());
            operationContext.switchContext(eventWrapper.getOpId());
            return objectMapper.readValue(eventWrapper.getEvent(), TypeFactory.rawClass(type));
        } catch (IOException e) {
            throw new SerializationException(e);
        }
    else
        return super.extractAndConvertValue(record, type);
}
 
开发者ID:kloiasoft,项目名称:eventapis,代码行数:16,代码来源:EventMessageConverter.java

示例13: processConsumerRecords

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
/**
 * @param record
 */
private void processConsumerRecords(final ConsumerRecord<String, Serializable> record) {
    final MessageHandler messageHandler = topicHandlers.get(record.topic());

    consumerContext.saveOffsetsBeforeProcessed(record.topic(), record.partition(),
        record.offset());
    //兼容没有包装的情况
    final DefaultMessage message = record.value() instanceof DefaultMessage
        ? (DefaultMessage) record.value()
        : new DefaultMessage((Serializable) record.value());
    //第一阶段处理
    messageHandler.p1Process(message);
    //第二阶段处理
    processExecutor.submit(new Runnable() {
        @Override
        public void run() {
            try {
                messageHandler.p2Process(message);
                //
                consumerContext.saveOffsetsAfterProcessed(record.topic(),
                    record.partition(), record.offset());
            } catch (Exception e) {
                boolean processed = messageHandler.onProcessError(message);
                if (processed == false) {
                    errorMessageProcessor.submit(message, messageHandler);
                }
                logger.error("[" + messageHandler.getClass().getSimpleName()
                             + "] process Topic[" + record.topic() + "] error",
                    e);
            }
        }
    });
}
 
开发者ID:warlock-china,项目名称:azeroth,代码行数:36,代码来源:NewApiTopicConsumer.java

示例14: producerRecordFromConsumerRecord

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private static ProducerRecord<String, String> producerRecordFromConsumerRecord(String topic, ConsumerRecord<String, String> record) {
    return new ProducerRecord<>(topic, record.key(), record.value());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:4,代码来源:TransactionalMessageCopier.java

示例15: extract

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public long extract(ConsumerRecord<Object, Object> record, long previousTimestamp) {

    if (record.value() instanceof Integer) return Long.valueOf(String.valueOf(record.value()));
    return record.timestamp();
}
 
开发者ID:carlosmenezes,项目名称:mockafka,代码行数:7,代码来源:TestTimestampExtractor.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecord.value方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。