本文整理汇总了Java中org.apache.kafka.connect.sink.SinkRecord.valueSchema方法的典型用法代码示例。如果您正苦于以下问题:Java SinkRecord.valueSchema方法的具体用法?Java SinkRecord.valueSchema怎么用?Java SinkRecord.valueSchema使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.sink.SinkRecord
的用法示例。
在下文中一共展示了SinkRecord.valueSchema方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: project
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
switch (compatibility) {
case BACKWARD:
case FULL:
case FORWARD:
Schema sourceSchema = record.valueSchema();
Object value = record.value();
if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
return record;
}
Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
record.key(), currentSchema, projected, record.kafkaOffset());
default:
return record;
}
}
示例2: convertRecord
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
public static DeletableRecord convertRecord(SinkRecord record, boolean ignoreSchema, String versionType) {
final Schema schema;
final Object value;
if (!ignoreSchema) {
schema = preProcessSchema(record.valueSchema());
value = preProcessValue(record.value(), record.valueSchema(), schema);
} else {
schema = record.valueSchema();
value = record.value();
}
final String payload = new String(JSON_CONVERTER.fromConnectData(record.topic(), schema, value), StandardCharsets.UTF_8);
if (StringUtils.isNotBlank(payload)) {
DeleteEvent deleteEvent = GSON.fromJson(payload, DeleteEvent.class);
return new DeletableRecord(new Key(deleteEvent.getIndex(), deleteEvent.getType(), deleteEvent.getId()), deleteEvent.getVersion(), versionType);
} else {
return null;
}
}
示例3: encodePartition
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
@Override
public String encodePartition(SinkRecord sinkRecord) {
Object value = sinkRecord.value();
Schema valueSchema = sinkRecord.valueSchema();
long timestamp;
if (value instanceof Struct) {
Struct struct = (Struct) value;
Object partitionKey = struct.get(fieldName);
Schema.Type type = valueSchema.field(fieldName).schema().type();
switch (type) {
case INT8:
case INT16:
case INT32:
case INT64:
timestamp = ((Number) partitionKey).longValue();
break;
case STRING:
String timestampStr = (String) partitionKey;
timestamp = Long.valueOf(timestampStr).longValue();
break;
default:
log.error("Type {} is not supported as a partition key.", type.getName());
throw new PartitionException("Error encoding partition.");
}
} else {
log.error("Value is not Struct type.");
throw new PartitionException("Error encoding partition.");
}
DateTime bucket = new DateTime(getPartition(partitionDurationMs, timestamp, formatter.getZone()));
return bucket.toString(formatter);
}
示例4: encodePartition
import org.apache.kafka.connect.sink.SinkRecord; //导入方法依赖的package包/类
@Override
public String encodePartition(SinkRecord sinkRecord) {
Object value = sinkRecord.value();
Schema valueSchema = sinkRecord.valueSchema();
if (value instanceof Struct) {
Struct struct = (Struct) value;
Object partitionKey = struct.get(fieldName);
Type type = valueSchema.field(fieldName).schema().type();
switch (type) {
case INT8:
case INT16:
case INT32:
case INT64:
Number record = (Number) partitionKey;
return fieldName + "=" + record.toString();
case STRING:
return fieldName + "=" + (String) partitionKey;
case BOOLEAN:
boolean booleanRecord = (boolean) partitionKey;
return fieldName + "=" + Boolean.toString(booleanRecord);
default:
log.error("Type {} is not supported as a partition key.", type.getName());
throw new PartitionException("Error encoding partition.");
}
} else {
log.error("Value is not Struct type.");
throw new PartitionException("Error encoding partition.");
}
}