当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecord.key方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecord.key方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecord.key方法的具体用法?Java ConsumerRecord.key怎么用?Java ConsumerRecord.key使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecord的用法示例。


在下文中一共展示了ConsumerRecord.key方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: execute

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void execute(Tuple input) {
    try {
        if (input.getValueByField("reloadControl") instanceof ConsumerRecord) {
            ConsumerRecord<String, byte[]> reloadRecord = (ConsumerRecord<String, byte[]>) input.getValueByField("reloadControl");
            if (reloadRecord != null) {
                String key = reloadRecord.key();
                if(key.equals("EXTRACTOR_RELOAD_CONF")){
                    String json = new String(reloadRecord.value(), "utf-8");
                    logger.info("kafka producer bolt receive reload configure control. the event is {}.", json);
                    reloadConfig(json);
                    return;
                }
            }
        }
        MessageVo msgVo = (MessageVo) input.getValueByField("message");
        if (msgVo != null) {
            logger.debug("execute kafka send the message which batchId is {} ", msgVo.getBatchId());
            sendDataToKafka(msgVo.getBatchId(), msgVo.getMessage(), input);
            //logger.info("execute kafka send the message which batchId is {} ", msgVo.getBatchId());
        }
    }catch (Exception e){
        logger.info("execute error");
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:26,代码来源:KafkaProducerBolt.java

示例2: processControlCommand

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void processControlCommand(ConsumerRecord<String, byte[]>  record, Tuple input) {
    try {
        String key = record.key();
        String json = new String(record.value(), "utf-8");
        ControlType cmd = ControlType.getCommand(key);
        switch (cmd) {
            case DISPATCHER_RELOAD_CONFIG:
                reloadConfig(json);
                break;

            default:
                /* do nothing */
                break;
        }
        this.collector.ack(input);
    } catch (Exception ex) {
        logger.error("DispatcherBout processControlCommand():", ex);
        collector.reportError(ex);
        this.collector.fail(input);
    }

}
 
开发者ID:BriData,项目名称:DBus,代码行数:23,代码来源:DispatcherBout.java

示例3: poll

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public List<StatMessage> poll() {
                /* 快速取,如果没有就立刻返回 */
    ConsumerRecords<String, String> records = consumer.poll(1000);
    if (records.count() == 0) {
        count++;
        if (count % 60 == 0) {
            count = 0;
            LOG.info(String.format("running on %s (offset=%d).......", statTopic,  consumer.position(statTopicPartition)));
        }
        return null;
    }

    LOG.info(String.format("KafkaSource got %d records......", records.count()));

    List<StatMessage> list = new ArrayList<>();
    for (ConsumerRecord<String, String> record : records) {
        String key = record.key();
        long offset = record.offset();

        StatMessage msg = StatMessage.parse(record.value());
        list.add(msg);
        //logger.info(String.format("KafkaSource got record key=%s, offset=%d......", key, offset));
    }

    return list;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:27,代码来源:KafkaSource.java

示例4: consume

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public boolean consume(ConsumerRecords<String, String> records) {
  for (ConsumerRecord<String, String> consumerRecord : records) {
    log.info("topic:{}, key:{}, value:{}", consumerRecord.topic(), consumerRecord.key(), consumerRecord.value());
    String deviceId = consumerRecord.key();
    String payload = consumerRecord.value();
    Optional<Device> deviceOpt = authService.findDeviceById(DeviceId.fromString(deviceId));

    if (deviceOpt.isPresent()) {
      Device device = deviceOpt.get();
      JsonObject root = (JsonObject) new JsonParser().parse(payload);
      int messageId = root.getAsJsonPrimitive("messageId").getAsInt();
      FromDeviceMsg msg = JsonConverter.convertToTelemetry(root.get("d"), messageId);
      BasicToDeviceActorSessionMsg basicToDeviceActorSessionMsg = new BasicToDeviceActorSessionMsg(device,
          new BasicAdaptorToSessionActorMsg(deviceSessionCtx, msg));
      processor.process(basicToDeviceActorSessionMsg);
    }

  }
  return true;
}
 
开发者ID:osswangxining,项目名称:iotplatform,代码行数:21,代码来源:KafkaMsgReceiver4MQTT.java

示例5: computeNext

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
protected KeyMessage<K,V> computeNext() {
  if (iterator == null || !iterator.hasNext()) {
    try {
      long timeout = MIN_POLL_MS;
      ConsumerRecords<K, V> records;

      while ((records = consumer.poll(timeout)).isEmpty()) {
        timeout = Math.min(MAX_POLL_MS, timeout * 2);
      }
      iterator = records.iterator();
    } catch (Exception e) {
      consumer.close();
      return endOfData();
    }
  }
  ConsumerRecord<K,V> mm = iterator.next();
  return new KeyMessageImpl<>(mm.key(), mm.value());
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:20,代码来源:ConsumeDataIterator.java

示例6: getProcessor

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static Consumer.Processor<String, Task> getProcessor() {
    return new Consumer.Processor<String, Task>() {
        @Override
        protected Boolean process(ConsumerRecords<String, Task> records) {
            for (ConsumerRecord<String, Task> record : records) {
                if (record.key() == null) {
                    log.error("Wrong task encountered. Task meta: {}", record);
                    continue;
                }
                IRequestServer requestServer = provider.getRequestServer(record.key());
                if (requestServer == null) {
                    log.error("Request Server not found for request type: {}", record.key());
                    continue;
                }
                log.info("Request server found: {}", requestServer.getClass());
                try {
                    requestServer.serve(record.value());
                } catch (ServiceException se) {
                    log.error("Service Exception occurred while serving request. Error: ", se);
                    continue;
                }
            }
            return true;
        }
    };
}
 
开发者ID:dixantmittal,项目名称:scalable-task-scheduler,代码行数:27,代码来源:RequestConsumers.java

示例7: reconsumeLater

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException {

		// add all header to headList except RETRY_COUNT
		Headers headers = consumeRecord.headers();
		List<Header> headerList = new ArrayList<Header>(8);
		Iterator<Header> iterator = headers.iterator();
		Integer retryCount = -1;
		boolean hasOrignalHeader = false;
		while (iterator.hasNext()) {
			Header next = iterator.next();
			if (next.key().equals(RETRY_COUNT_KEY)) {
				retryCount = serializer.deserialize(next.value());
				continue;
			}
			
			if(next.key().equals(ORGINAL_TOPIC)){
				hasOrignalHeader = true;
			}
			headerList.add(next);
		}
		
		// add RETRY_COUNT to header
		retryCount++;
		headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount)));
		
		if(!hasOrignalHeader){
			headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic())));
		}

		// send message to corresponding queue according to retry times
		String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount);
		
		ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic,
				consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(),
				consumeRecord.value(), headerList);
		Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record);
		publishKafkaMessage.get();
	}
 
开发者ID:QNJR-GROUP,项目名称:EasyTransaction,代码行数:39,代码来源:KafkaEasyTransMsgConsumerImpl.java

示例8: decodePayload

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static <K, V> Payload<K, V> decodePayload(Deserializer<V> valueDeserializer, ConsumerRecord<K, byte[]> originConsumerRecord) {
    TracingHeader tracingHeader = null;
    ConsumerRecord<K, V> dataRecord = null;
    boolean sampled = false;
    byte[] data = originConsumerRecord.value();
    byte[] vData = null;
    if (data.length <= HEADER_LENGTH) {
        vData = data;
    } else {
        ByteBuffer byteBuf = ByteBuffer.wrap(data);
        short magic = byteBuf.getShort(0);
        short tpLen = byteBuf.getShort(2);
        if (magic == MAGIC && tpLen == TracingHeader.LENGTH) {
            byte[] tpBytes = new byte[tpLen];
            System.arraycopy(byteBuf.array(), HEADER_LENGTH, tpBytes, 0, tpLen);
            tracingHeader = TracingHeader.fromBytes(tpBytes);
            sampled = true;
            int dataOffset = tpLen + HEADER_LENGTH;
            vData = new byte[byteBuf.array().length - dataOffset];
            System.arraycopy(byteBuf.array(), dataOffset, vData, 0, vData.length);
        } else {
            vData = data;
        }
    }
    dataRecord = new ConsumerRecord<>(originConsumerRecord.topic(),
            originConsumerRecord.partition(), originConsumerRecord.offset(),
            originConsumerRecord.key(), valueDeserializer.deserialize(originConsumerRecord.topic(), vData));
    return new Payload<>(tracingHeader, dataRecord, sampled);
}
 
开发者ID:YanXs,项目名称:nighthawk,代码行数:30,代码来源:PayloadCodec.java

示例9: consume

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private List<KafkaResult> consume() {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords consumerRecords = kafkaConsumer.poll(clientConfig.getPollTimeoutMs());

    logger.info("Consumed {} records", consumerRecords.count());
    final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
    while (recordIterator.hasNext()) {
        // Get next record
        final ConsumerRecord consumerRecord = recordIterator.next();

        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit();
    return kafkaResultList;
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:28,代码来源:WebKafkaConsumer.java

示例10: processSingleRecord

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void processSingleRecord(List<Long> txIds, ConsumerRecord<ByteBuffer, ByteBuffer> record) {
    long txId = record.timestamp();
    boolean found = txIds.remove(txId);
    if (found) {
        ProducerRecord<ByteBuffer, ByteBuffer> producerRecord =
                new ProducerRecord<>(clusterConfig.getGapTopic(), record.key(), record.value());
        producer.send(producerRecord);
    }
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:10,代码来源:ReconcilerImpl.java

示例11: TransactionWrapper

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public TransactionWrapper(ConsumerRecord<ByteBuffer, ByteBuffer> record, TransactionMetadata deserializedMetadata) {
    GridArgumentCheck.notNull(deserializedMetadata, "metadata cannot be null");
    this.value = record.value();
    this.key = record.key();
    this.topicPartition = new TopicPartition(record.topic(), record.partition());
    this.offset = record.offset();
    this.deserializedMetadata = deserializedMetadata;
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:9,代码来源:TransactionWrapper.java

示例12: read

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
void read(ConsumerRecord<String, byte[]> record) {
    String key = record.key();
    if (key.startsWith(CONNECTOR_STATUS_PREFIX)) {
        readConnectorStatus(key, record.value());
    } else if (key.startsWith(TASK_STATUS_PREFIX)) {
        readTaskStatus(key, record.value());
    } else {
        log.warn("Discarding record with invalid key {}", key);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KafkaStatusBackingStore.java

示例13: processNext

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private long processNext(final List<ConsumerRecord<byte[], byte[]>> records, final StateRestorer restorer, final Long endOffset) {
    for (final ConsumerRecord<byte[], byte[]> record : records) {
        final long offset = record.offset();
        if (restorer.hasCompleted(offset, endOffset)) {
            return offset;
        }
        if (record.key() != null) {
            restorer.restore(record.key(), record.value());
        }
    }
    return consumer.position(restorer.partition());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:13,代码来源:StoreChangelogReader.java

示例14: processControlEvent

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void processControlEvent(ConsumerRecord<String, byte[]> record) throws Exception {
    Object processed = cache.getIfPresent(record.offset());
    if (processed == null) {
        String key = record.key();
        Command cmd = Command.parse(key);
        String event = new String(record.value(), "utf-8");
        ControlMessage message = ControlMessage.parse(event);
        logger.info("Received a ControlMessage {key:{}, event:{}}", key, event);
        switch (cmd) {
            case APPENDER_TOPIC_RESUME:

                final CtrlCommand ctrlCmd = CtrlCommand.parse(Command.APPENDER_TOPIC_RESUME, message);
                EmitData emitData = ctrlCmd.execCmd((cmd1, args) -> {
                    TopicResumeCmd rCmd = (TopicResumeCmd) cmd1;

                    consumerListener.resumeTopic(rCmd.getTopic(), message);

                    EmitData data = new EmitData();
                    data.add(EmitData.MESSAGE, message);
                    data.add(EmitData.CTRL_CMD, rCmd);

                    return data;
                });

                // 发送命令给bolt,开始发送数据,停止过滤termination的表
                List<Object> values = new Values(emitData, Command.APPENDER_TOPIC_RESUME);
                listener.emitData(values, record);
                return; // 不用继续执行

            case APPENDER_RELOAD_CONFIG:

                listener.reduceFlowSize(record.serializedValueSize());
                listener.markReloading(record, Collections.singletonMap("message", message));
                consumerListener.syncOffset(record);
                break;

            case PAUSE_APPENDER_DATA_TOPIC:
                listener.reduceFlowSize(record.serializedValueSize());
                consumerListener.pauseAppender();
                consumerListener.syncOffset(record);
                logger.info("All of the data topics are paused");
                break;
            case RESUME_APPENDER:
                listener.reduceFlowSize(record.serializedValueSize());
                consumerListener.resumeAppender();
                consumerListener.syncOffset(record);
                logger.info("All of the data topics are resumed");
                break;
            default:
                listener.reduceFlowSize(record.serializedValueSize());
                consumerListener.syncOffset(record);
                break;
        }
    } else {
        listener.reduceFlowSize(record.serializedValueSize());
        consumerListener.syncOffset(record);
        logger.info("Data have bean processed->{}", record.toString());
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:60,代码来源:CtrlEventProcessor.java

示例15: extract

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public long extract(final ConsumerRecord<Object, Object> record) {
    return (long) record.key();
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:5,代码来源:KafkaStreamsSandbox.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecord.key方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。