本文整理匯總了Java中org.apache.kafka.connect.data.Schema.fields方法的典型用法代碼示例。如果您正苦於以下問題:Java Schema.fields方法的具體用法?Java Schema.fields怎麽用?Java Schema.fields使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.connect.data.Schema
的用法示例。
在下文中一共展示了Schema.fields方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: convert
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Override
public Object convert(Schema schema, JsonNode value) {
if (!value.isObject())
throw new DataException("Structs should be encoded as JSON objects, but found " + value.getNodeType());
// We only have ISchema here but need Schema, so we need to materialize the actual schema. Using ISchema
// avoids having to materialize the schema for non-Struct types but it cannot be avoided for Structs since
// they require a schema to be provided at construction. However, the schema is only a SchemaBuilder during
// translation of schemas to JSON; during the more common translation of data to JSON, the call to schema.schema()
// just returns the schema Object and has no overhead.
Struct result = new Struct(schema.schema());
for (Field field : schema.fields())
result.put(field, convertToConnect(field.schema(), value.get(field.name())));
return result;
}
示例2: makeUpdatedSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private Schema makeUpdatedSchema(Schema schema) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
for (Field field : schema.fields()) {
builder.field(field.name(), field.schema());
}
if (topicField != null) {
builder.field(topicField.name, topicField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
if (partitionField != null) {
builder.field(partitionField.name, partitionField.optional ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA);
}
if (offsetField != null) {
builder.field(offsetField.name, offsetField.optional ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA);
}
if (timestampField != null) {
builder.field(timestampField.name, timestampField.optional ? OPTIONAL_TIMESTAMP_SCHEMA : Timestamp.SCHEMA);
}
if (staticField != null) {
builder.field(staticField.name, staticField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
return builder.build();
}
示例3: applyWithSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private R applyWithSchema(R record) {
final Struct value = requireStruct(operatingValue(record), PURPOSE);
Schema updatedSchema = schemaUpdateCache.get(value.schema());
if (updatedSchema == null) {
updatedSchema = makeUpdatedSchema(value.schema());
schemaUpdateCache.put(value.schema(), updatedSchema);
}
final Struct updatedValue = new Struct(updatedSchema);
for (Field field : updatedSchema.fields()) {
final Object fieldValue = value.get(reverseRenamed(field.name()));
updatedValue.put(field.name(), fieldValue);
}
return newRecord(record, updatedSchema, updatedValue);
}
示例4: convertSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
public static List<FieldSchema> convertSchema(Schema schema) {
List<FieldSchema> columns = new ArrayList<>();
if (Schema.Type.STRUCT.equals(schema.type())) {
for (Field field: schema.fields()) {
columns.add(new FieldSchema(
field.name(), convert(field.schema()).getTypeName(), field.schema().doc()));
}
}
return columns;
}
示例5: convertStruct
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
public static TypeInfo convertStruct(Schema schema) {
final List<Field> fields = schema.fields();
final List<String> names = new ArrayList<>(fields.size());
final List<TypeInfo> types = new ArrayList<>(fields.size());
for (Field field : fields) {
names.add(field.name());
types.add(convert(field.schema()));
}
return TypeInfoFactory.getStructTypeInfo(names, types);
}
示例6: testCreateTable
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testCreateTable() throws Exception {
prepareData(TOPIC, PARTITION);
Partitioner partitioner = HiveTestUtils.getPartitioner();
Schema schema = createSchema();
hive.createTable(hiveDatabase, TOPIC, schema, partitioner);
String location = "partition=" + String.valueOf(PARTITION);
hiveMetaStore.addPartition(hiveDatabase, TOPIC, location);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
List<FieldSchema> partitionCols = table.getPartitionKeys();
assertEquals(1, partitionCols.size());
assertEquals("partition", partitionCols.get(0).getName());
String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "12"};
String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC);
String[] rows = result.split("\n");
// Only 6 of the 7 records should have been delivered due to flush_size = 3
assertEquals(6, rows.length);
for (String row: rows) {
String[] parts = HiveTestUtils.parseOutput(row);
for (int j = 0; j < expectedResult.length; ++j) {
assertEquals(expectedResult[j], parts[j]);
}
}
}
示例7: testAlterSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testAlterSchema() throws Exception {
prepareData(TOPIC, PARTITION);
Partitioner partitioner = HiveTestUtils.getPartitioner();
Schema schema = createSchema();
hive.createTable(hiveDatabase, TOPIC, schema, partitioner);
String location = "partition=" + String.valueOf(PARTITION);
hiveMetaStore.addPartition(hiveDatabase, TOPIC, location);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
Schema newSchema = createNewSchema();
hive.alterSchema(hiveDatabase, TOPIC, newSchema);
String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "abc", "12"};
String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC);
String[] rows = result.split("\n");
// Only 6 of the 7 records should have been delivered due to flush_size = 3
assertEquals(6, rows.length);
for (String row: rows) {
String[] parts = HiveTestUtils.parseOutput(row);
for (int j = 0; j < expectedResult.length; ++j) {
assertEquals(expectedResult[j], parts[j]);
}
}
}
示例8: testAlterSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testAlterSchema() throws Exception {
prepareData(TOPIC, PARTITION);
Partitioner partitioner = HiveTestUtils.getPartitioner();
Schema schema = createSchema();
hive.createTable(hiveDatabase, TOPIC, schema, partitioner);
String location = "partition=" + String.valueOf(PARTITION);
hiveMetaStore.addPartition(hiveDatabase, TOPIC, location);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
Schema newSchema = createNewSchema();
hive.alterSchema(hiveDatabase, TOPIC, newSchema);
String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "NULL", "12"};
String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC);
String[] rows = result.split("\n");
// Only 6 of the 7 records should have been delivered due to flush_size = 3
assertEquals(6, rows.length);
for (String row: rows) {
String[] parts = HiveTestUtils.parseOutput(row);
for (int j = 0; j < expectedResult.length; ++j) {
assertEquals(expectedResult[j], parts[j]);
}
}
}
示例9: buildUpdatedSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
/**
* Build an updated Struct Schema which flattens all nested fields into a single struct, handling cases where
* optionality and default values of the flattened fields are affected by the optionality and default values of
* parent/ancestor schemas (e.g. flattened field is optional because the parent schema was optional, even if the
* schema itself is marked as required).
* @param schema the schema to translate
* @param fieldNamePrefix the prefix to use on field names, i.e. the delimiter-joined set of ancestor field names
* @param newSchema the flattened schema being built
* @param optional true if any ancestor schema is optional
* @param defaultFromParent the default value, if any, included via the parent/ancestor schemas
*/
private void buildUpdatedSchema(Schema schema, String fieldNamePrefix, SchemaBuilder newSchema, boolean optional, Struct defaultFromParent) {
for (Field field : schema.fields()) {
final String fieldName = fieldName(fieldNamePrefix, field.name());
final boolean fieldIsOptional = optional || field.schema().isOptional();
Object fieldDefaultValue = null;
if (field.schema().defaultValue() != null) {
fieldDefaultValue = field.schema().defaultValue();
} else if (defaultFromParent != null) {
fieldDefaultValue = defaultFromParent.get(field);
}
switch (field.schema().type()) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
case BYTES:
newSchema.field(fieldName, convertFieldSchema(field.schema(), fieldIsOptional, fieldDefaultValue));
break;
case STRUCT:
buildUpdatedSchema(field.schema(), fieldName, newSchema, fieldIsOptional, (Struct) fieldDefaultValue);
break;
default:
throw new DataException("Flatten transformation does not support " + field.schema().type()
+ " for record without schemas (for field " + fieldName + ").");
}
}
}
示例10: makeUpdatedSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
private Schema makeUpdatedSchema(Schema schema) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
for (Field field : schema.fields()) {
if (filter(field.name())) {
builder.field(renamed(field.name()), field.schema());
}
}
return builder.build();
}
示例11: assertMatchingSchema
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
protected void assertMatchingSchema(Struct value, Schema schema) {
assertSame(schema, value.schema());
assertEquals(schema.name(), value.schema().name());
for (Field field : schema.fields()) {
String fieldName = field.name();
assertEquals(schema.field(fieldName).name(), value.schema().field(fieldName).name());
assertEquals(schema.field(fieldName).index(), value.schema().field(fieldName).index());
assertSame(schema.field(fieldName).schema(), value.schema().field(fieldName).schema());
}
}
示例12: testSyncWithHiveAvro
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testSyncWithHiveAvro() throws Exception {
DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
hdfsWriter.recover(TOPIC_PARTITION);
String key = "key";
Schema schema = createSchema();
Struct record = createRecord(schema);
Collection<SinkRecord> sinkRecords = new ArrayList<>();
for (long offset = 0; offset < 7; offset++) {
SinkRecord sinkRecord =
new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
sinkRecords.add(sinkRecord);
}
hdfsWriter.write(sinkRecords);
hdfsWriter.close(assignment);
hdfsWriter.stop();
Map<String, String> props = createProps();
props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
hdfsWriter = new DataWriter(config, context, avroData);
hdfsWriter.syncWithHive();
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
List<String> expectedPartitions = new ArrayList<>();
String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));
List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);
assertEquals(expectedPartitions, partitions);
hdfsWriter.close(assignment);
hdfsWriter.stop();
}
示例13: testHiveIntegrationAvro
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testHiveIntegrationAvro() throws Exception {
Map<String, String> props = createProps();
props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
DataWriter hdfsWriter = new DataWriter(config, context, avroData);
hdfsWriter.recover(TOPIC_PARTITION);
String key = "key";
Schema schema = createSchema();
Struct record = createRecord(schema);
Collection<SinkRecord> sinkRecords = new ArrayList<>();
for (long offset = 0; offset < 7; offset++) {
SinkRecord sinkRecord =
new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
sinkRecords.add(sinkRecord);
}
hdfsWriter.write(sinkRecords);
hdfsWriter.close(assignment);
hdfsWriter.stop();
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
List<String> expectedPartitions = new ArrayList<>();
String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));
List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);
assertEquals(expectedPartitions, partitions);
}
示例14: testHiveIntegrationTopicWithDotsAvro
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testHiveIntegrationTopicWithDotsAvro() throws Exception {
assignment.add(TOPIC_WITH_DOTS_PARTITION);
Map<String, String> props = createProps();
props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
DataWriter hdfsWriter = new DataWriter(config, context, avroData);
hdfsWriter.recover(TOPIC_WITH_DOTS_PARTITION);
String key = "key";
Schema schema = createSchema();
Struct record = createRecord(schema);
Collection<SinkRecord> sinkRecords = new ArrayList<>();
for (long offset = 0; offset < 7; offset++) {
SinkRecord sinkRecord =
new SinkRecord(TOPIC_WITH_DOTS, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
sinkRecords.add(sinkRecord);
}
hdfsWriter.write(sinkRecords);
hdfsWriter.close(assignment);
hdfsWriter.stop();
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC_WITH_DOTS);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
List<String> expectedPartitions = new ArrayList<>();
String directory = TOPIC_WITH_DOTS + "/" + "partition=" + String.valueOf(PARTITION);
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));
List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC_WITH_DOTS, (short)-1);
assertEquals(expectedPartitions, partitions);
}
示例15: testHiveIntegrationFieldPartitionerAvro
import org.apache.kafka.connect.data.Schema; //導入方法依賴的package包/類
@Test
public void testHiveIntegrationFieldPartitionerAvro() throws Exception {
Map<String, String> props = createProps();
props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName());
props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int");
HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
DataWriter hdfsWriter = new DataWriter(config, context, avroData);
String key = "key";
Schema schema = createSchema();
Struct[] records = createRecords(schema);
ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
long offset = 0;
for (Struct record : records) {
for (long count = 0; count < 3; count++) {
SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record,
offset + count);
sinkRecords.add(sinkRecord);
}
offset = offset + 3;
}
hdfsWriter.write(sinkRecords);
hdfsWriter.close(assignment);
hdfsWriter.stop();
Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
List<String> expectedColumnNames = new ArrayList<>();
for (Field field: schema.fields()) {
expectedColumnNames.add(field.name());
}
List<String> actualColumnNames = new ArrayList<>();
for (FieldSchema column: table.getSd().getCols()) {
actualColumnNames.add(column.getName());
}
assertEquals(expectedColumnNames, actualColumnNames);
String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG);
String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16);
String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17);
String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18);
List<String> expectedPartitions = new ArrayList<>();
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1));
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2));
expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3));
List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);
assertEquals(expectedPartitions, partitions);
ArrayList<String[]> expectedResult = new ArrayList<>();
for (int i = 16; i <= 18; ++i) {
String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"};
for (int j = 0; j < 3; ++j) {
expectedResult.add(part);
}
}
String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC);
String[] rows = result.split("\n");
assertEquals(9, rows.length);
for (int i = 0; i < rows.length; ++i) {
String[] parts = HiveTestUtils.parseOutput(rows[i]);
for (int j = 0; j < expectedResult.get(i).length; ++j) {
assertEquals(expectedResult.get(i)[j], parts[j]);
}
}
}