本文整理汇总了Java中org.apache.kafka.connect.data.SchemaBuilder.field方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaBuilder.field方法的具体用法?Java SchemaBuilder.field怎么用?Java SchemaBuilder.field使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.connect.data.SchemaBuilder
的用法示例。
在下文中一共展示了SchemaBuilder.field方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeUpdatedSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private Schema makeUpdatedSchema(Schema schema) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
for (Field field : schema.fields()) {
builder.field(field.name(), field.schema());
}
if (topicField != null) {
builder.field(topicField.name, topicField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
if (partitionField != null) {
builder.field(partitionField.name, partitionField.optional ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA);
}
if (offsetField != null) {
builder.field(offsetField.name, offsetField.optional ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA);
}
if (timestampField != null) {
builder.field(timestampField.name, timestampField.optional ? OPTIONAL_TIMESTAMP_SCHEMA : Timestamp.SCHEMA);
}
if (staticField != null) {
builder.field(staticField.name, staticField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
return builder.build();
}
示例2: valueSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
public static Schema valueSchema(SObjectDescriptor descriptor) {
String name = String.format("%s.%s", SObjectHelper.class.getPackage().getName(), descriptor.name());
SchemaBuilder builder = SchemaBuilder.struct();
builder.name(name);
for (SObjectDescriptor.Field field : descriptor.fields()) {
if (isTextArea(field)) {
continue;
}
Schema schema = schema(field);
builder.field(field.name(), schema);
}
builder.field(FIELD_OBJECT_TYPE, Schema.OPTIONAL_STRING_SCHEMA);
builder.field(FIELD_EVENT_TYPE, Schema.OPTIONAL_STRING_SCHEMA);
return builder.build();
}
示例3: buildProjectNode
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private ProjectNode buildProjectNode(final Schema inputSchema, final PlanNode sourcePlanNode) {
SchemaBuilder projectionSchema = SchemaBuilder.struct();
ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(inputSchema,
functionRegistry);
for (int i = 0; i < analysis.getSelectExpressions().size(); i++) {
Expression expression = analysis.getSelectExpressions().get(i);
String alias = analysis.getSelectExpressionAlias().get(i);
Schema expressionType = expressionTypeManager.getExpressionType(expression);
projectionSchema = projectionSchema.field(alias, expressionType);
}
return new ProjectNode(new PlanNodeId("Project"), sourcePlanNode, projectionSchema,
analysis.getSelectExpressions());
}
示例4: createSelectValueMapperAndSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
Pair<Schema, SelectValueMapper> createSelectValueMapperAndSchema(final List<Pair<String, Expression>> expressionPairList) {
try {
final CodeGenRunner codeGenRunner = new CodeGenRunner(schema, functionRegistry);
final SchemaBuilder schemaBuilder = SchemaBuilder.struct();
final List<ExpressionMetadata> expressionEvaluators = new ArrayList<>();
for (Pair<String, Expression> expressionPair : expressionPairList) {
final ExpressionMetadata
expressionEvaluator =
codeGenRunner.buildCodeGenFromParseTree(expressionPair.getRight());
schemaBuilder.field(expressionPair.getLeft(), expressionEvaluator.getExpressionType());
expressionEvaluators.add(expressionEvaluator);
}
return new Pair<>(schemaBuilder.build(), new SelectValueMapper(genericRowValueTypeEnforcer,
expressionPairList,
expressionEvaluators));
} catch (Exception e) {
throw new KsqlException("Code generation failed for SelectValueMapper", e);
}
}
示例5: buildDatasourceSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private Schema buildDatasourceSchema(String name, ArrayNode fields) {
SchemaBuilder dataSourceBuilder = SchemaBuilder.struct().name(name);
for (int i = 0; i < fields.size(); i++) {
String fieldName = fields.get(i).get("name").textValue();
String fieldType;
if (fields.get(i).get("type").isArray()) {
fieldType = fields.get(i).get("type").get(0).textValue();
} else {
fieldType = fields.get(i).get("type").textValue();
}
dataSourceBuilder.field(fieldName, getKsqlType(fieldType));
}
return dataSourceBuilder.build();
}
示例6: getResultDatasource
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private StructuredDataSource getResultDatasource(Select select, Table into) {
SchemaBuilder dataSource = SchemaBuilder.struct().name(into.toString());
for (SelectItem selectItem : select.getSelectItems()) {
if (selectItem instanceof SingleColumn) {
SingleColumn singleColumn = (SingleColumn) selectItem;
String fieldName = singleColumn.getAlias().get();
dataSource = dataSource.field(fieldName, Schema.BOOLEAN_SCHEMA);
}
}
KsqlTopic ksqlTopic = new KsqlTopic(into.getName().toString(), into.getName().toString(),
null);
StructuredDataSource
resultStream =
new KsqlStream("AstBuilder-Into", into.getName().toString(), dataSource.schema(), dataSource.fields().get(0),
null,
ksqlTopic
);
return resultStream;
}
示例7: getOrBuildSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private Schema getOrBuildSchema(Schema valueSchema) {
Schema updatedSchema = schemaUpdateCache.get(valueSchema);
if (updatedSchema != null)
return updatedSchema;
final SchemaBuilder builder;
if (wholeValueCastType != null) {
builder = SchemaUtil.copySchemaBasics(valueSchema, convertFieldType(wholeValueCastType));
} else {
builder = SchemaUtil.copySchemaBasics(valueSchema, SchemaBuilder.struct());
for (Field field : valueSchema.fields()) {
SchemaBuilder fieldBuilder =
convertFieldType(casts.containsKey(field.name()) ? casts.get(field.name()) : field.schema().type());
if (field.schema().isOptional())
fieldBuilder.optional();
if (field.schema().defaultValue() != null)
fieldBuilder.defaultValue(castValueToType(field.schema().defaultValue(), fieldBuilder.type()));
builder.field(field.name(), fieldBuilder.build());
}
}
if (valueSchema.isOptional())
builder.optional();
if (valueSchema.defaultValue() != null)
builder.defaultValue(castValueToType(valueSchema.defaultValue(), builder.type()));
updatedSchema = builder.build();
schemaUpdateCache.put(valueSchema, updatedSchema);
return updatedSchema;
}
示例8: buildUpdatedSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
/**
* Build an updated Struct Schema which flattens all nested fields into a single struct, handling cases where
* optionality and default values of the flattened fields are affected by the optionality and default values of
* parent/ancestor schemas (e.g. flattened field is optional because the parent schema was optional, even if the
* schema itself is marked as required).
* @param schema the schema to translate
* @param fieldNamePrefix the prefix to use on field names, i.e. the delimiter-joined set of ancestor field names
* @param newSchema the flattened schema being built
* @param optional true if any ancestor schema is optional
* @param defaultFromParent the default value, if any, included via the parent/ancestor schemas
*/
private void buildUpdatedSchema(Schema schema, String fieldNamePrefix, SchemaBuilder newSchema, boolean optional, Struct defaultFromParent) {
for (Field field : schema.fields()) {
final String fieldName = fieldName(fieldNamePrefix, field.name());
final boolean fieldIsOptional = optional || field.schema().isOptional();
Object fieldDefaultValue = null;
if (field.schema().defaultValue() != null) {
fieldDefaultValue = field.schema().defaultValue();
} else if (defaultFromParent != null) {
fieldDefaultValue = defaultFromParent.get(field);
}
switch (field.schema().type()) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
case BYTES:
newSchema.field(fieldName, convertFieldSchema(field.schema(), fieldIsOptional, fieldDefaultValue));
break;
case STRUCT:
buildUpdatedSchema(field.schema(), fieldName, newSchema, fieldIsOptional, (Struct) fieldDefaultValue);
break;
default:
throw new DataException("Flatten transformation does not support " + field.schema().type()
+ " for record without schemas (for field " + fieldName + ").");
}
}
}
示例9: makeUpdatedSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
private Schema makeUpdatedSchema(Schema schema) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
for (Field field : schema.fields()) {
if (filter(field.name())) {
builder.field(renamed(field.name()), field.schema());
}
}
return builder.build();
}
示例10: testOptionalFieldStruct
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
@Test
public void testOptionalFieldStruct() {
final Flatten<SourceRecord> xform = new Flatten.Value<>();
xform.configure(Collections.<String, String>emptyMap());
SchemaBuilder builder = SchemaBuilder.struct();
builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA);
Schema supportedTypesSchema = builder.build();
builder = SchemaBuilder.struct();
builder.field("B", supportedTypesSchema);
Schema oneLevelNestedSchema = builder.build();
Struct supportedTypes = new Struct(supportedTypesSchema);
supportedTypes.put("opt_int32", null);
Struct oneLevelNestedStruct = new Struct(oneLevelNestedSchema);
oneLevelNestedStruct.put("B", supportedTypes);
SourceRecord transformed = xform.apply(new SourceRecord(null, null,
"topic", 0,
oneLevelNestedSchema, oneLevelNestedStruct));
assertEquals(Schema.Type.STRUCT, transformed.valueSchema().type());
Struct transformedStruct = (Struct) transformed.value();
assertNull(transformedStruct.get("B.opt_int32"));
}
示例11: testOptionalAndDefaultValuesNested
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
@Test
public void testOptionalAndDefaultValuesNested() {
// If we have a nested structure where an entire sub-Struct is optional, all flattened fields generated from its
// children should also be optional. Similarly, if the parent Struct has a default value, the default value for
// the flattened field
final Flatten<SourceRecord> xform = new Flatten.Value<>();
xform.configure(Collections.<String, String>emptyMap());
SchemaBuilder builder = SchemaBuilder.struct().optional();
builder.field("req_field", Schema.STRING_SCHEMA);
builder.field("opt_field", SchemaBuilder.string().optional().defaultValue("child_default").build());
Struct childDefaultValue = new Struct(builder);
childDefaultValue.put("req_field", "req_default");
builder.defaultValue(childDefaultValue);
Schema schema = builder.build();
// Intentionally leave this entire value empty since it is optional
Struct value = new Struct(schema);
SourceRecord transformed = xform.apply(new SourceRecord(null, null, "topic", 0, schema, value));
assertNotNull(transformed);
Schema transformedSchema = transformed.valueSchema();
assertEquals(Schema.Type.STRUCT, transformedSchema.type());
assertEquals(2, transformedSchema.fields().size());
// Required field should pick up both being optional and the default value from the parent
Schema transformedReqFieldSchema = SchemaBuilder.string().optional().defaultValue("req_default").build();
assertEquals(transformedReqFieldSchema, transformedSchema.field("req_field").schema());
// The optional field should still be optional but should have picked up the default value. However, since
// the parent didn't specify the default explicitly, we should still be using the field's normal default
Schema transformedOptFieldSchema = SchemaBuilder.string().optional().defaultValue("child_default").build();
assertEquals(transformedOptFieldSchema, transformedSchema.field("opt_field").schema());
}
示例12: addField
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
void addField(SchemaBuilder builder, String name, Schema.Type schemaType) {
log.trace("addField() - name = {} schemaType = {}", name, schemaType);
builder.field(
name,
SchemaBuilder.type(schemaType).optional().build()
);
}
示例13: getAvroSerdeKsqlSchema
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
/**
* Rename field names to be consistent with the internal column names.
* @param schema
* @return
*/
public static Schema getAvroSerdeKsqlSchema(Schema schema) {
SchemaBuilder schemaBuilder = SchemaBuilder.struct();
for (Field field: schema.fields()) {
schemaBuilder.field(field.name().replace(".", "_"), field.schema());
}
return schemaBuilder.build();
}
示例14: buildChangeSetFields
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
/**
* Build the fields for a change set kafka message
*
* @param kbuilder schema builder to build key fields
* @param vbuilder schema builder to build message fields
*/
private void buildChangeSetFields (
SchemaBuilder kbuilder,
SchemaBuilder vbuilder
) {
Schema keySchema = Schema.STRING_SCHEMA;
Schema valueSchema;
/* sub struct builder force all fields as optional */
SchemaBuilder sbuilder =
SchemaBuilder.struct().name ("CHANGE_SET");
for (Column column: metadata.getColumns().values()) {
/* when publishing keys add only key columns or all columns when
* table has no key constraints present and the column meets the
* suplog requirements
*/
if (mode.publishKeys() &&
(column.isKey()
|| (!metadata.hasKey() && column.canUseAsSuplogKey())
))
{
logger.debug (
"Adding column: " + column.getSafeName() + " as key"
);
buildFieldFromColumn (kbuilder, column);
}
/* optional field for all KEY/NEW/OLD/LOB values in map */
buildFieldFromColumn (sbuilder, column, true);
}
valueSchema = sbuilder.build();
vbuilder.field (
"CHANGE_DATA",
SchemaBuilder.map (keySchema, valueSchema)
);
}
示例15: updateChangeSetFields
import org.apache.kafka.connect.data.SchemaBuilder; //导入方法依赖的package包/类
/**
* Update the fields for a change set kafka message and key, if needed
*
* @param prevSchema previous schema to use in field comparison
* @param keyBuilder schema builder to build key fields
* @param valueBuilder schema builder to build message fields
*/
private void updateChangeSetFields (
Schema prevSchema,
SchemaBuilder keyBuilder,
SchemaBuilder valueBuilder
) throws Exception {
Schema keySchema = Schema.STRING_SCHEMA;
Schema valueSchema;
/* sub struct builder force all fields as optional */
SchemaBuilder subBuilder =
SchemaBuilder.struct().name ("CHANGE_SET");
updateSchemaFields(
prevSchema.field("CHANGE_DATA").schema().valueSchema(),
keyBuilder,
subBuilder,
metadata.getColumns().values()
);
valueSchema = subBuilder.build();
valueBuilder.field (
"CHANGE_DATA",
SchemaBuilder.map (keySchema, valueSchema)
);
}