本文整理匯總了Java中org.apache.kafka.connect.data.Schema.type方法的典型用法代碼示例。如果您正苦於以下問題:Java Schema.type方法的具體用法?Java Schema.type怎麽用?Java Schema.type使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.connect.data.Schema
的用法示例。
在下文中一共展示了Schema.type方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: apply
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Override
public R apply(R record) {
final Schema schema = operatingSchema(record);
requireSchema(schema, "updating schema metadata");
final boolean isArray = schema.type() == Schema.Type.ARRAY;
final boolean isMap = schema.type() == Schema.Type.MAP;
final Schema updatedSchema = new ConnectSchema(
schema.type(),
schema.isOptional(),
schema.defaultValue(),
schemaName != null ? schemaName : schema.name(),
schemaVersion != null ? schemaVersion : schema.version(),
schema.doc(),
schema.parameters(),
schema.fields(),
isMap ? schema.keySchema() : null,
isMap || isArray ? schema.valueSchema() : null
);
return newRecord(record, updatedSchema);
}
示例2: processValue
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private Object processValue(Schema schema, Object value) {
switch (schema.type()) {
case BOOLEAN:
case FLOAT32:
case FLOAT64:
case INT8:
case INT16:
case INT32:
case INT64:
case BYTES:
case STRING:
return value;
case MAP:
case ARRAY:
case STRUCT:
return new DataException("Unsupported schema type: " + schema.type());
default:
throw new DataException("Unknown schema type: " + schema.type());
}
}
示例3: convert
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
public static TypeInfo convert(Schema schema) {
// TODO: throw an error on recursive types
switch (schema.type()) {
case STRUCT:
return convertStruct(schema);
case ARRAY:
return convertArray(schema);
case MAP:
return convertMap(schema);
default:
return convertPrimitive(schema);
}
}
示例4: fromConnectData
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema != null && schema.type() != Schema.Type.BYTES)
throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString());
if (value != null && !(value instanceof byte[]))
throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass());
return (byte[]) value;
}
示例5: copySchemaBasics
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private static SchemaBuilder copySchemaBasics(Schema source, SchemaBuilder target) {
if (source.isOptional()) {
target.optional();
}
if (source.defaultValue() != null && source.type() != Schema.Type.STRUCT) {
final Object preProcessedDefaultValue = preProcessValue(source.defaultValue(), source, target);
target.defaultValue(preProcessedDefaultValue);
}
return target;
}
示例6: build
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
public MsSqlChange build(TableMetadataProvider.TableMetadata tableMetadata, ResultSet resultSet, Time time) throws SQLException {
MsSqlChange change = new MsSqlChange();
change.timestamp = time.milliseconds();
change.databaseName = tableMetadata.databaseName();
change.schemaName = tableMetadata.schemaName();
change.tableName = tableMetadata.tableName();
final long sysChangeVersion = resultSet.getLong("__metadata_sys_change_version");
final long sysChangeCreationVersion = resultSet.getLong("__metadata_sys_change_creation_version");
final String changeOperation = resultSet.getString("__metadata_sys_change_operation");
change.metadata = ImmutableMap.of(
"sys_change_operation", changeOperation,
"sys_change_creation_version", String.valueOf(sysChangeCreationVersion),
"sys_change_version", String.valueOf(sysChangeVersion)
);
switch (changeOperation) {
case "I":
change.changeType = ChangeType.INSERT;
break;
case "U":
change.changeType = ChangeType.UPDATE;
break;
case "D":
change.changeType = ChangeType.DELETE;
break;
default:
throw new UnsupportedOperationException(
String.format("Unsupported sys_change_operation of '%s'", changeOperation)
);
}
log.trace("build() - changeType = {}", change.changeType);
change.keyColumns = new ArrayList<>(tableMetadata.keyColumns().size());
change.valueColumns = new ArrayList<>(tableMetadata.columnSchemas().size());
for (Map.Entry<String, Schema> kvp : tableMetadata.columnSchemas().entrySet()) {
String columnName = kvp.getKey();
Schema schema = kvp.getValue();
Object value;
if (Schema.Type.INT8 == schema.type()) {
// Really lame Microsoft. A tiny int is stored as a single byte with a value of 0-255.
// Explain how this should be returned as a short?
value = resultSet.getByte(columnName);
} else if (Schema.Type.INT32 == schema.type() &&
Date.LOGICAL_NAME.equals(schema.name())) {
value = new java.util.Date(
resultSet.getDate(columnName, calendar).getTime()
);
} else if (Schema.Type.INT32 == schema.type() &&
org.apache.kafka.connect.data.Time.LOGICAL_NAME.equals(schema.name())) {
value = new java.util.Date(
resultSet.getTime(columnName, calendar).getTime()
);
} else {
value = resultSet.getObject(columnName);
}
log.trace("build() - columnName = '{}' value = '{}'", columnName, value);
MsSqlColumnValue columnValue = new MsSqlColumnValue(columnName, schema, value);
change.valueColumns.add(columnValue);
if (tableMetadata.keyColumns().contains(columnName)) {
change.keyColumns.add(columnValue);
}
}
return change;
}
示例7: asJsonSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
public ObjectNode asJsonSchema(Schema schema) {
if (schema == null)
return null;
ObjectNode cached = fromConnectSchemaCache.get(schema);
if (cached != null)
return cached;
final ObjectNode jsonSchema;
switch (schema.type()) {
case BOOLEAN:
jsonSchema = JsonSchema.BOOLEAN_SCHEMA.deepCopy();
break;
case BYTES:
jsonSchema = JsonSchema.BYTES_SCHEMA.deepCopy();
break;
case FLOAT64:
jsonSchema = JsonSchema.DOUBLE_SCHEMA.deepCopy();
break;
case FLOAT32:
jsonSchema = JsonSchema.FLOAT_SCHEMA.deepCopy();
break;
case INT8:
jsonSchema = JsonSchema.INT8_SCHEMA.deepCopy();
break;
case INT16:
jsonSchema = JsonSchema.INT16_SCHEMA.deepCopy();
break;
case INT32:
jsonSchema = JsonSchema.INT32_SCHEMA.deepCopy();
break;
case INT64:
jsonSchema = JsonSchema.INT64_SCHEMA.deepCopy();
break;
case STRING:
jsonSchema = JsonSchema.STRING_SCHEMA.deepCopy();
break;
case ARRAY:
jsonSchema = JsonNodeFactory.instance.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.ARRAY_TYPE_NAME);
jsonSchema.set(JsonSchema.ARRAY_ITEMS_FIELD_NAME, asJsonSchema(schema.valueSchema()));
break;
case MAP:
jsonSchema = JsonNodeFactory.instance.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.MAP_TYPE_NAME);
jsonSchema.set(JsonSchema.MAP_KEY_FIELD_NAME, asJsonSchema(schema.keySchema()));
jsonSchema.set(JsonSchema.MAP_VALUE_FIELD_NAME, asJsonSchema(schema.valueSchema()));
break;
case STRUCT:
jsonSchema = JsonNodeFactory.instance.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.STRUCT_TYPE_NAME);
ArrayNode fields = JsonNodeFactory.instance.arrayNode();
for (Field field : schema.fields()) {
ObjectNode fieldJsonSchema = asJsonSchema(field.schema()).deepCopy();
fieldJsonSchema.put(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME, field.name());
fields.add(fieldJsonSchema);
}
jsonSchema.set(JsonSchema.STRUCT_FIELDS_FIELD_NAME, fields);
break;
default:
throw new DataException("Couldn't translate unsupported schema type " + schema + ".");
}
jsonSchema.put(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME, schema.isOptional());
if (schema.name() != null)
jsonSchema.put(JsonSchema.SCHEMA_NAME_FIELD_NAME, schema.name());
if (schema.version() != null)
jsonSchema.put(JsonSchema.SCHEMA_VERSION_FIELD_NAME, schema.version());
if (schema.doc() != null)
jsonSchema.put(JsonSchema.SCHEMA_DOC_FIELD_NAME, schema.doc());
if (schema.parameters() != null) {
ObjectNode jsonSchemaParams = JsonNodeFactory.instance.objectNode();
for (Map.Entry<String, String> prop : schema.parameters().entrySet())
jsonSchemaParams.put(prop.getKey(), prop.getValue());
jsonSchema.set(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME, jsonSchemaParams);
}
if (schema.defaultValue() != null)
jsonSchema.set(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME, convertToJson(schema, schema.defaultValue()));
fromConnectSchemaCache.put(schema, jsonSchema);
return jsonSchema;
}
示例8: convertToConnect
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private static Object convertToConnect(Schema schema, JsonNode jsonValue) {
final Schema.Type schemaType;
if (schema != null) {
schemaType = schema.type();
if (jsonValue.isNull()) {
if (schema.defaultValue() != null)
return schema.defaultValue(); // any logical type conversions should already have been applied
if (schema.isOptional())
return null;
throw new DataException("Invalid null value for required " + schemaType + " field");
}
} else {
switch (jsonValue.getNodeType()) {
case NULL:
// Special case. With no schema
return null;
case BOOLEAN:
schemaType = Schema.Type.BOOLEAN;
break;
case NUMBER:
if (jsonValue.isIntegralNumber())
schemaType = Schema.Type.INT64;
else
schemaType = Schema.Type.FLOAT64;
break;
case ARRAY:
schemaType = Schema.Type.ARRAY;
break;
case OBJECT:
schemaType = Schema.Type.MAP;
break;
case STRING:
schemaType = Schema.Type.STRING;
break;
case BINARY:
case MISSING:
case POJO:
default:
schemaType = null;
break;
}
}
final JsonToConnectTypeConverter typeConverter = TO_CONNECT_CONVERTERS.get(schemaType);
if (typeConverter == null)
throw new DataException("Unknown schema type: " + String.valueOf(schemaType));
Object converted = typeConverter.convert(schema, jsonValue);
if (schema != null && schema.name() != null) {
LogicalTypeConverter logicalConverter = TO_CONNECT_LOGICAL_CONVERTERS.get(schema.name());
if (logicalConverter != null)
converted = logicalConverter.convert(schema, converted);
}
return converted;
}