当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecord.offset方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecord.offset方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecord.offset方法的具体用法?Java ConsumerRecord.offset怎么用?Java ConsumerRecord.offset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecord的用法示例。


在下文中一共展示了ConsumerRecord.offset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: poll

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public List<StatMessage> poll() {
                /* 快速取,如果没有就立刻返回 */
    ConsumerRecords<String, String> records = consumer.poll(1000);
    if (records.count() == 0) {
        count++;
        if (count % 60 == 0) {
            count = 0;
            LOG.info(String.format("running on %s (offset=%d).......", statTopic,  consumer.position(statTopicPartition)));
        }
        return null;
    }

    LOG.info(String.format("KafkaSource got %d records......", records.count()));

    List<StatMessage> list = new ArrayList<>();
    for (ConsumerRecord<String, String> record : records) {
        String key = record.key();
        long offset = record.offset();

        StatMessage msg = StatMessage.parse(record.value());
        list.add(msg);
        //logger.info(String.format("KafkaSource got record key=%s, offset=%d......", key, offset));
    }

    return list;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:27,代码来源:KafkaSource.java

示例2: pollNextTransaction

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public TransactionInput pollNextTransaction(long timeout, TimeUnit unit) {
    Iterator<ConsumerRecord<Integer, byte[]>> iterator = m_consumer.poll(unit.toMillis(timeout)).iterator();
    if (!iterator.hasNext()) {
        return null;
    }

    ConsumerRecord<Integer, byte[]> firstRecord = iterator.next();
    if (iterator.hasNext()) {
        throw new IllegalStateException("Kafka should not return more than one record");
    }

    return new TransactionInput() {
        @Override
        public InputStream getInputStream() throws IOException {
            return new ByteArrayInputStream(firstRecord.value());
        }

        @Override
        public long getTransactionId() {
            return firstRecord.offset();
        }
    };
}
 
开发者ID:Axway,项目名称:iron,代码行数:25,代码来源:KafkaTransactionStore.java

示例3: fromKafka

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
static Message<? extends com.google.protobuf.Message> fromKafka(com.google.protobuf.Message protoMessage, Envelope envelope, ConsumerRecord<String, byte[]> record) {
    boolean wasReceived = true;

    Topic topic = new Topic(record.topic());
    String partitioningKey = record.key();
    int partitionId = record.partition();
    long offset = record.offset();

    String messageId = envelope.getMessageId();
    String correlationId = envelope.getCorrelationId();

    MessageType type = MessageType.of(protoMessage);

    String requestCorrelationId = envelope.getRequestCorrelationId();
    Topic replyTo = new Topic(envelope.getReplyTo());

    Metadata meta = new Metadata(wasReceived, topic, partitioningKey, partitionId, offset, messageId, correlationId, requestCorrelationId, replyTo, type);
    return new Message<>(protoMessage, meta);
}
 
开发者ID:Sixt,项目名称:ja-micro,代码行数:20,代码来源:Messages.java

示例4: convertMessages

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void convertMessages(ConsumerRecords<byte[], byte[]> msgs) {
    for (ConsumerRecord<byte[], byte[]> msg : msgs) {
        log.trace("Consuming message with key {}, value {}", msg.key(), msg.value());
        SchemaAndValue keyAndSchema = keyConverter.toConnectData(msg.topic(), msg.key());
        SchemaAndValue valueAndSchema = valueConverter.toConnectData(msg.topic(), msg.value());
        SinkRecord record = new SinkRecord(msg.topic(), msg.partition(),
                keyAndSchema.schema(), keyAndSchema.value(),
                valueAndSchema.schema(), valueAndSchema.value(),
                msg.offset(),
                ConnectUtils.checkAndConvertTimestamp(msg.timestamp()),
                msg.timestampType());
        record = transformationChain.apply(record);
        if (record != null) {
            messageBatch.add(record);
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:WorkerSinkTask.java

示例5: decodePayload

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static <K, V> Payload<K, V> decodePayload(Deserializer<V> valueDeserializer, ConsumerRecord<K, byte[]> originConsumerRecord) {
    TracingHeader tracingHeader = null;
    ConsumerRecord<K, V> dataRecord = null;
    boolean sampled = false;
    byte[] data = originConsumerRecord.value();
    byte[] vData = null;
    if (data.length <= HEADER_LENGTH) {
        vData = data;
    } else {
        ByteBuffer byteBuf = ByteBuffer.wrap(data);
        short magic = byteBuf.getShort(0);
        short tpLen = byteBuf.getShort(2);
        if (magic == MAGIC && tpLen == TracingHeader.LENGTH) {
            byte[] tpBytes = new byte[tpLen];
            System.arraycopy(byteBuf.array(), HEADER_LENGTH, tpBytes, 0, tpLen);
            tracingHeader = TracingHeader.fromBytes(tpBytes);
            sampled = true;
            int dataOffset = tpLen + HEADER_LENGTH;
            vData = new byte[byteBuf.array().length - dataOffset];
            System.arraycopy(byteBuf.array(), dataOffset, vData, 0, vData.length);
        } else {
            vData = data;
        }
    }
    dataRecord = new ConsumerRecord<>(originConsumerRecord.topic(),
            originConsumerRecord.partition(), originConsumerRecord.offset(),
            originConsumerRecord.key(), valueDeserializer.deserialize(originConsumerRecord.topic(), vData));
    return new Payload<>(tracingHeader, dataRecord, sampled);
}
 
开发者ID:YanXs,项目名称:nighthawk,代码行数:30,代码来源:PayloadCodec.java

示例6: consume

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private List<KafkaResult> consume() {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords consumerRecords = kafkaConsumer.poll(clientConfig.getPollTimeoutMs());

    logger.info("Consumed {} records", consumerRecords.count());
    final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
    while (recordIterator.hasNext()) {
        // Get next record
        final ConsumerRecord consumerRecord = recordIterator.next();

        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit();
    return kafkaResultList;
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:28,代码来源:WebKafkaConsumer.java

示例7: TransactionWrapper

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public TransactionWrapper(ConsumerRecord<ByteBuffer, ByteBuffer> record, TransactionMetadata deserializedMetadata) {
    GridArgumentCheck.notNull(deserializedMetadata, "metadata cannot be null");
    this.value = record.value();
    this.key = record.key();
    this.topicPartition = new TopicPartition(record.topic(), record.partition());
    this.offset = record.offset();
    this.deserializedMetadata = deserializedMetadata;
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:9,代码来源:TransactionWrapper.java

示例8: update

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void update(final ConsumerRecord<byte[], byte[]> record) {
    final SourceNodeAndDeserializer sourceNodeAndDeserializer = deserializers.get(record.topic());
    final ConsumerRecord<Object, Object> deserialized = sourceNodeAndDeserializer.deserializer.deserialize(record);
    final ProcessorRecordContext recordContext =
            new ProcessorRecordContext(deserialized.timestamp(),
                                       deserialized.offset(),
                                       deserialized.partition(),
                                       deserialized.topic());
    processorContext.setRecordContext(recordContext);
    processorContext.setCurrentNode(sourceNodeAndDeserializer.sourceNode);
    sourceNodeAndDeserializer.sourceNode.process(deserialized.key(), deserialized.value());
    offsets.put(new TopicPartition(record.topic(), record.partition()), deserialized.offset() + 1);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:16,代码来源:GlobalStateUpdateTask.java

示例9: restoreState

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private void restoreState(final StateRestoreCallback stateRestoreCallback,
                          final List<TopicPartition> topicPartitions,
                          final Map<TopicPartition, Long> highWatermarks) {
    for (final TopicPartition topicPartition : topicPartitions) {
        consumer.assign(Collections.singletonList(topicPartition));
        final Long checkpoint = checkpointableOffsets.get(topicPartition);
        if (checkpoint != null) {
            consumer.seek(topicPartition, checkpoint);
        } else {
            consumer.seekToBeginning(Collections.singletonList(topicPartition));
        }

        long offset = consumer.position(topicPartition);
        final Long highWatermark = highWatermarks.get(topicPartition);

        while (offset < highWatermark) {
            final ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
            for (ConsumerRecord<byte[], byte[]> record : records) {
                offset = record.offset() + 1;
                if (record.key() != null) {
                    stateRestoreCallback.restore(record.key(), record.value());
                }
            }
        }
        checkpointableOffsets.put(topicPartition, offset);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:28,代码来源:GlobalStateManagerImpl.java

示例10: processNext

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
private long processNext(final List<ConsumerRecord<byte[], byte[]>> records, final StateRestorer restorer, final Long endOffset) {
    for (final ConsumerRecord<byte[], byte[]> record : records) {
        final long offset = record.offset();
        if (restorer.hasCompleted(offset, endOffset)) {
            return offset;
        }
        if (record.key() != null) {
            restorer.restore(record.key(), record.value());
        }
    }
    return consumer.position(restorer.partition());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:13,代码来源:StoreChangelogReader.java

示例11: updateStandbyStates

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(final TopicPartition storePartition,
                                                         final List<ConsumerRecord<byte[], byte[]>> records) {
    final long limit = offsetLimit(storePartition);
    List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;

    // restore states from changelog records
    final StateRestoreCallback restoreCallback = restoreCallbacks.get(storePartition.topic());

    long lastOffset = -1L;
    int count = 0;
    for (final ConsumerRecord<byte[], byte[]> record : records) {
        if (record.offset() < limit) {
            try {
                restoreCallback.restore(record.key(), record.value());
            } catch (final Exception e) {
                throw new ProcessorStateException(String.format("%s exception caught while trying to restore state from %s", logPrefix, storePartition), e);
            }
            lastOffset = record.offset();
        } else {
            if (remainingRecords == null) {
                remainingRecords = new ArrayList<>(records.size() - count);
            }

            remainingRecords.add(record);
        }
        count++;
    }

    // record the restored offset for its change log partition
    restoredOffsets.put(storePartition, lastOffset + 1);

    return remainingRecords;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:ProcessorStateManager.java

示例12: bufferRecord

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public void bufferRecord(ConsumerRecord<Integer, Integer> record) {
    recordBuffer.add(
        new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), 0L,
                             TimestampType.CREATE_TIME, 0L, 0, 0,
                             serializer.serialize(record.topic(), record.key()),
                             serializer.serialize(record.topic(), record.value())));
    endOffset = record.offset();

    super.updateEndOffsets(Collections.singletonMap(assignedPartition, endOffset));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:MockRestoreConsumer.java

示例13: fromRecord

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public static MarkerKey fromRecord(ConsumerRecord r) {
    return new MarkerKey(r.partition(), r.offset());
}
 
开发者ID:softwaremill,项目名称:kmq,代码行数:4,代码来源:MarkerKey.java

示例14: execute

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
@Override
public void execute(Tuple input) {
    ConsumerRecord<String, byte[]> record = (ConsumerRecord<String, byte[]>) input.getValueByField("record");

    long kafkaOffset = record.offset();
    String fromTopic = record.topic();

    FullyOffset currentOffset = new FullyOffset(0, 0, 0);

    try {
        //处理ctrl topic的数据
        if (fromTopic.equalsIgnoreCase(dsInfo.getCtrlTopic())) {
            processControlCommand(record, input);
            return;
        }

        // a 读取message数据
        processor.preProcess(record);

        // b 一次读取一个partition
        List<DispatcherPackage> list;
        int partitionOffset = 0;


        do {
            partitionOffset++;
            list = processor.getNextList();
            if (list == null) {
                break;
            }

            //分schema后的子包
            int subOffset = 1;
            for (DispatcherPackage subPackage : list) {
                currentOffset = new FullyOffset(kafkaOffset, partitionOffset, subOffset);

                // 1 获得数据
                String key = subPackage.getKey();
                byte[] content = subPackage.getContent();
                int msgCount = subPackage.getMsgCount();
                String schemaName = subPackage.getSchemaName();
                String toTopic = subPackage.getToTopic();

                ContinuousFullyOffset continuousOffset = getSchemaFullyOffset(schemaName);
                continuousOffset.setProcessingOffset(currentOffset);
                if (key == null) {
                    // 2 构建数据消息的key,记录上一个offset是谁, 主要是用于日志查错
                    subPackage.setKey(continuousOffset.toString());
                    key = subPackage.getKey();
                }

                logger.debug(String.format("  currentOffset=%s, from_topic: %s, (to_topic:%s, schemaName=%s), Key=%s, msg_count=%d",
                        currentOffset.toString(), fromTopic, toTopic, schemaName, key, msgCount));
                this.collector.emit(input, new Values(subPackage, currentOffset));

                continuousOffset.setProcessedOffset(currentOffset);

                subOffset++;
            }
        } while (true);

        this.collector.ack(input);

    } catch (Exception ex) {
        // Print something in the log
        logger.error(String.format("FAIL! Dispatcher bolt fails at offset (%s).", currentOffset.toString()));
        // Call fail
        this.collector.fail(input);

        collector.reportError(ex);
        throw new RuntimeException(ex);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:74,代码来源:DispatcherBout.java

示例15: QueueElement

import org.apache.kafka.clients.consumer.ConsumerRecord; //导入方法依赖的package包/类
public QueueElement(ConsumerRecord<String, byte[]> record) {
    this.key = record.offset();
    setRecord(record);
}
 
开发者ID:BriData,项目名称:DBus,代码行数:5,代码来源:QueueElement.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecord.offset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。