本文整理汇总了Java中org.apache.kafka.connect.sink.SinkRecord.kafkaOffset方法的典型用法代码示例。如果您正苦于以下问题:Java SinkRecord.kafkaOffset方法的具体用法?Java SinkRecord.kafkaOffset怎么用?Java SinkRecord.kafkaOffset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.sink.SinkRecord
的用法示例。
在下文中一共展示了SinkRecord.kafkaOffset方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: project
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
switch (compatibility) {
case BACKWARD:
case FULL:
case FORWARD:
Schema sourceSchema = record.valueSchema();
Object value = record.value();
if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
return record;
}
Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
record.key(), currentSchema, projected, record.kafkaOffset());
default:
return record;
}
}
示例2: writeRecord
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
private void writeRecord(SinkRecord record) throws IOException {
long expectedOffset = offset + recordCounter;
if (offset == -1) {
offset = record.kafkaOffset();
} else if (record.kafkaOffset() != expectedOffset) {
// Currently it's possible to see stale data with the wrong offset after a rebalance when you
// rewind, which we do since we manage our own offsets. See KAFKA-2894.
if (!sawInvalidOffset) {
log.info(
"Ignoring stale out-of-order record in {}-{}. Has offset {} instead of expected offset {}",
record.topic(), record.kafkaPartition(), record.kafkaOffset(), expectedOffset);
}
sawInvalidOffset = true;
return;
}
if (sawInvalidOffset) {
log.info(
"Recovered from stale out-of-order records in {}-{} with offset {}",
record.topic(), record.kafkaPartition(), expectedOffset);
sawInvalidOffset = false;
}
String encodedPartition = partitioner.encodePartition(record);
RecordWriter<SinkRecord> writer = getWriter(record, encodedPartition);
writer.write(record);
if (!startOffsets.containsKey(encodedPartition)) {
startOffsets.put(encodedPartition, record.kafkaOffset());
offsets.put(encodedPartition, record.kafkaOffset());
} else {
offsets.put(encodedPartition, record.kafkaOffset());
}
recordCounter++;
}