本文整理汇总了Java中org.apache.kafka.connect.sink.SinkRecord.key方法的典型用法代码示例。如果您正苦于以下问题:Java SinkRecord.key方法的具体用法?Java SinkRecord.key怎么用?Java SinkRecord.key使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.sink.SinkRecord
的用法示例。
在下文中一共展示了SinkRecord.key方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: project
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
switch (compatibility) {
case BACKWARD:
case FULL:
case FORWARD:
Schema sourceSchema = record.valueSchema();
Object value = record.value();
if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
return record;
}
Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
record.key(), currentSchema, projected, record.kafkaOffset());
default:
return record;
}
}
示例2: put
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public void put(SinkRecord record) {
try {
ByteArrayOutputStream resultStream = new ByteArrayOutputStream();
Writer writer = new OutputStreamWriter(resultStream);
Object key = record.key();
if (key != null) {
writer.write(key.toString());
writer.write(',');
}
Object value = record.value();
if (value != null) {
writer.write(value.toString());
}
writer.write('\n');
writer.close();
this.buffer.put(resultStream.toByteArray());
} catch (IOException exception) {
//TODO: check exception
throw new RuntimeException(exception);
}
}
示例3: put
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
@Override
public void put(Collection<SinkRecord> sinkRecords) {
// If KinesisProducers cannot write to Kinesis Streams (because of
// connectivity issues, access issues
// or misconfigured shards we will pause consumption of messages till
// backlog is cleared
validateOutStandingRecords();
String partitionKey;
for (SinkRecord sinkRecord : sinkRecords) {
ListenableFuture<UserRecordResult> f;
// Kinesis does not allow empty partition key
if (sinkRecord.key() != null && !sinkRecord.key().toString().trim().equals("")) {
partitionKey = sinkRecord.key().toString().trim();
} else {
partitionKey = Integer.toString(sinkRecord.kafkaPartition());
}
if (singleKinesisProducerPerPartition)
f = addUserRecord(producerMap.get(sinkRecord.kafkaPartition() + "@" + sinkRecord.topic()), streamName,
partitionKey, usePartitionAsHashKey, sinkRecord);
else
f = addUserRecord(kinesisProducer, streamName, partitionKey, usePartitionAsHashKey, sinkRecord);
Futures.addCallback(f, callback);
}
}
示例4: batchWrite
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public void batchWrite(final Collection<SinkRecord> records) throws SQLException {
if (records == null || records.isEmpty()) {
return;
}
List<String> sqlList = new ArrayList<>();
for (SinkRecord record : records) {
String topic = record.topic();
/**没有pk的key是多少?**/
String key = (String) record.key();
String val = (String) record.value();
log.info("===>>>topic:{},partition:{},offset:{},\n===>>>key:{},value:{}", topic, record.kafkaPartition(), record.kafkaOffset(), record.key(), record.value());
RowMapPK rowMapPK = getRowMapPK(key);
RowMap rowMap = JSON.parseObject(val, RowMap.class);
/**数据过滤**/
if (filter.match(rowMap)) {
//将新老数据输出到指定文件
ExportRowMap exportRowMap = new ExportRowMap(rowMapPK, rowMap);
datalog.info(exportRowMap.toString());
rowMap = dbRedirector.redirectDb(topic, rowMap);
log.info("===>>>Assembler RowMap:{}", rowMap.toString());
String sql = assembler.getSql(rowMapPK, rowMap);
log.info("===>>>Assembler GET SQL:{}", sql);
if (StringUtils.isNotEmpty(sql)) {
sqlList.add(sql);
}
}
}
flush(sqlList);
}