本文整理汇总了Java中org.apache.kafka.connect.sink.SinkRecord.topic方法的典型用法代码示例。如果您正苦于以下问题:Java SinkRecord.topic方法的具体用法?Java SinkRecord.topic怎么用?Java SinkRecord.topic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.sink.SinkRecord
的用法示例。
在下文中一共展示了SinkRecord.topic方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: project
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
switch (compatibility) {
case BACKWARD:
case FULL:
case FORWARD:
Schema sourceSchema = record.valueSchema();
Object value = record.value();
if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
return record;
}
Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
record.key(), currentSchema, projected, record.kafkaOffset());
default:
return record;
}
}
示例2: batchWrite
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public void batchWrite(final Collection<SinkRecord> records) throws SQLException {
if (records == null || records.isEmpty()) {
return;
}
List<String> sqlList = new ArrayList<>();
for (SinkRecord record : records) {
String topic = record.topic();
/**没有pk的key是多少?**/
String key = (String) record.key();
String val = (String) record.value();
log.info("===>>>topic:{},partition:{},offset:{},\n===>>>key:{},value:{}", topic, record.kafkaPartition(), record.kafkaOffset(), record.key(), record.value());
RowMapPK rowMapPK = getRowMapPK(key);
RowMap rowMap = JSON.parseObject(val, RowMap.class);
/**数据过滤**/
if (filter.match(rowMap)) {
//将新老数据输出到指定文件
ExportRowMap exportRowMap = new ExportRowMap(rowMapPK, rowMap);
datalog.info(exportRowMap.toString());
rowMap = dbRedirector.redirectDb(topic, rowMap);
log.info("===>>>Assembler RowMap:{}", rowMap.toString());
String sql = assembler.getSql(rowMapPK, rowMap);
log.info("===>>>Assembler GET SQL:{}", sql);
if (StringUtils.isNotEmpty(sql)) {
sqlList.add(sql);
}
}
}
flush(sqlList);
}
示例3: batchWrite
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public void batchWrite(final Collection<SinkRecord> records) throws SQLException {
if (records == null || records.isEmpty()) {
return;
}
final List<String> sqlList = new ArrayList<>(records.size());
for (SinkRecord record : records) {
String topic = record.topic();
/** 没有pk的key是多少? **/
String key = StrUtils.valueOf(record.key());
String val = StrUtils.valueOf(record.value());
log.info("===>>>topic:{},partition:{},offset:{},\n===>>>key:{},value:{}", topic, record.kafkaPartition(),
record.kafkaOffset(), record.key(), record.value());
RowMapPK rowMapPK = getRowMapPK(key);
RowMap rowMap = JSON.parseObject(val, RowMap.class);
MatchCase matchCase = filterChain.matchCase(rowMap);
ExportRowMap exportRowMap = new ExportRowMap(rowMapPK, rowMap);
datalog.info(exportRowMap.toString());
LogUtil.backUp(exportRowMap);
rowMap = dbTableReplacer.replace(rowMap);
log.info("===>>>Assembler RowMap:{}", rowMap.toString());
String sql = assembler.getSql(matchCase, rowMapPK, rowMap);
log.info("===>>>Assembler GET SQL:{}", sql);
if (StringUtils.isNotEmpty(sql)) {
sqlList.add(sql);
}
}
flush(sqlList);
}