本文整理汇总了Java中org.apache.kafka.connect.data.Struct.get方法的典型用法代码示例。如果您正苦于以下问题:Java Struct.get方法的具体用法?Java Struct.get怎么用?Java Struct.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.data.Struct
的用法示例。
在下文中一共展示了Struct.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: applyWithSchema
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
private R applyWithSchema(R record) {
Schema valueSchema = operatingSchema(record);
Schema updatedSchema = getOrBuildSchema(valueSchema);
// Whole-record casting
if (wholeValueCastType != null)
return newRecord(record, updatedSchema, castValueToType(operatingValue(record), wholeValueCastType));
// Casting within a struct
final Struct value = requireStruct(operatingValue(record), PURPOSE);
final Struct updatedValue = new Struct(updatedSchema);
for (Field field : value.schema().fields()) {
final Object origFieldValue = value.get(field);
final Schema.Type targetType = casts.get(field.name());
final Object newFieldValue = targetType != null ? castValueToType(origFieldValue, targetType) : origFieldValue;
updatedValue.put(updatedSchema.field(field.name()), newFieldValue);
}
return newRecord(record, updatedSchema, updatedValue);
}
示例2: applyWithSchema
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
private R applyWithSchema(R record) {
final Struct value = requireStruct(operatingValue(record), PURPOSE);
Schema updatedSchema = schemaUpdateCache.get(value.schema());
if (updatedSchema == null) {
updatedSchema = makeUpdatedSchema(value.schema());
schemaUpdateCache.put(value.schema(), updatedSchema);
}
final Struct updatedValue = new Struct(updatedSchema);
for (Field field : updatedSchema.fields()) {
final Object fieldValue = value.get(reverseRenamed(field.name()));
updatedValue.put(field.name(), fieldValue);
}
return newRecord(record, updatedSchema, updatedValue);
}
示例3: encodePartition
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
@Override
public String encodePartition(SinkRecord sinkRecord) {
Object value = sinkRecord.value();
Schema valueSchema = sinkRecord.valueSchema();
long timestamp;
if (value instanceof Struct) {
Struct struct = (Struct) value;
Object partitionKey = struct.get(fieldName);
Schema.Type type = valueSchema.field(fieldName).schema().type();
switch (type) {
case INT8:
case INT16:
case INT32:
case INT64:
timestamp = ((Number) partitionKey).longValue();
break;
case STRING:
String timestampStr = (String) partitionKey;
timestamp = Long.valueOf(timestampStr).longValue();
break;
default:
log.error("Type {} is not supported as a partition key.", type.getName());
throw new PartitionException("Error encoding partition.");
}
} else {
log.error("Value is not Struct type.");
throw new PartitionException("Error encoding partition.");
}
DateTime bucket = new DateTime(getPartition(partitionDurationMs, timestamp, formatter.getZone()));
return bucket.toString(formatter);
}
示例4: encodePartition
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
@Override
public String encodePartition(SinkRecord sinkRecord) {
Object value = sinkRecord.value();
Schema valueSchema = sinkRecord.valueSchema();
if (value instanceof Struct) {
Struct struct = (Struct) value;
Object partitionKey = struct.get(fieldName);
Type type = valueSchema.field(fieldName).schema().type();
switch (type) {
case INT8:
case INT16:
case INT32:
case INT64:
Number record = (Number) partitionKey;
return fieldName + "=" + record.toString();
case STRING:
return fieldName + "=" + (String) partitionKey;
case BOOLEAN:
boolean booleanRecord = (boolean) partitionKey;
return fieldName + "=" + Boolean.toString(booleanRecord);
default:
log.error("Type {} is not supported as a partition key.", type.getName());
throw new PartitionException("Error encoding partition.");
}
} else {
log.error("Value is not Struct type.");
throw new PartitionException("Error encoding partition.");
}
}
示例5: applyValueWithSchema
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
private Struct applyValueWithSchema(Struct value, Schema updatedSchema) {
Struct updatedValue = new Struct(updatedSchema);
for (Field field : value.schema().fields()) {
final Object updatedFieldValue;
if (field.name().equals(config.field)) {
updatedFieldValue = convertTimestamp(value.get(field), timestampTypeFromSchema(field.schema()));
} else {
updatedFieldValue = value.get(field);
}
updatedValue.put(field.name(), updatedFieldValue);
}
return updatedValue;
}
示例6: buildUpdatedSchema
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
/**
* Build an updated Struct Schema which flattens all nested fields into a single struct, handling cases where
* optionality and default values of the flattened fields are affected by the optionality and default values of
* parent/ancestor schemas (e.g. flattened field is optional because the parent schema was optional, even if the
* schema itself is marked as required).
* @param schema the schema to translate
* @param fieldNamePrefix the prefix to use on field names, i.e. the delimiter-joined set of ancestor field names
* @param newSchema the flattened schema being built
* @param optional true if any ancestor schema is optional
* @param defaultFromParent the default value, if any, included via the parent/ancestor schemas
*/
private void buildUpdatedSchema(Schema schema, String fieldNamePrefix, SchemaBuilder newSchema, boolean optional, Struct defaultFromParent) {
for (Field field : schema.fields()) {
final String fieldName = fieldName(fieldNamePrefix, field.name());
final boolean fieldIsOptional = optional || field.schema().isOptional();
Object fieldDefaultValue = null;
if (field.schema().defaultValue() != null) {
fieldDefaultValue = field.schema().defaultValue();
} else if (defaultFromParent != null) {
fieldDefaultValue = defaultFromParent.get(field);
}
switch (field.schema().type()) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
case BYTES:
newSchema.field(fieldName, convertFieldSchema(field.schema(), fieldIsOptional, fieldDefaultValue));
break;
case STRUCT:
buildUpdatedSchema(field.schema(), fieldName, newSchema, fieldIsOptional, (Struct) fieldDefaultValue);
break;
default:
throw new DataException("Flatten transformation does not support " + field.schema().type()
+ " for record without schemas (for field " + fieldName + ").");
}
}
}
示例7: applyWithSchema
import org.apache.kafka.connect.data.Struct; //导入方法依赖的package包/类
private R applyWithSchema(R record) {
final Struct value = requireStruct(operatingValue(record), PURPOSE);
final Struct updatedValue = new Struct(value.schema());
for (Field field : value.schema().fields()) {
final Object origFieldValue = value.get(field);
updatedValue.put(field, maskedFields.contains(field.name()) ? masked(origFieldValue) : origFieldValue);
}
return newRecord(record, updatedValue);
}