本文整理汇总了Java中org.apache.kafka.connect.data.Struct类的典型用法代码示例。如果您正苦于以下问题:Java Struct类的具体用法?Java Struct怎么用?Java Struct使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Struct类属于org.apache.kafka.connect.data包,在下文中一共展示了Struct类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: delete
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void delete() {
final Schema keySchema = SchemaBuilder.struct()
.field("id", Schema.STRING_SCHEMA)
.build();
final Struct key = new Struct(keySchema)
.put("id", "asdf");
final SinkRecord record = new SinkRecord(
"testing",
1,
keySchema,
key,
null,
null,
123L
);
this.task.put(Arrays.asList(record));
}
示例2: readerWithProjection
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test(expected = DataException.class)
public void readerWithProjection() throws Throwable {
Map<String, Object> cfg = new HashMap<String, Object>() {{
put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, projectionSchema.toString());
put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_PARQUET, getFileExtension());
}};
reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
while (reader.hasNext()) {
Struct record = reader.next();
assertNotNull(record.schema().field(FIELD_INDEX));
assertNotNull(record.schema().field(FIELD_NAME));
assertNull(record.schema().field(FIELD_SURNAME));
}
reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
readAllData();
}
示例3: readAllDataWithoutHeader
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void readAllDataWithoutHeader() throws Throwable {
Path file = createDataFile(false);
FileReader reader = getReader(fs, file, new HashMap<String, Object>() {{
put(DelimitedTextFileReader.FILE_READER_DELIMITED_TOKEN, ",");
put(DelimitedTextFileReader.FILE_READER_DELIMITED_HEADER, "false");
put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_DELIMITED, getFileExtension());
}});
assertTrue(reader.hasNext());
int recordCount = 0;
while (reader.hasNext()) {
Struct record = reader.next();
checkData(record, recordCount);
recordCount++;
}
assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);
}
示例4: simpleKeyValue
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void simpleKeyValue() {
Map<String, String> map = new HashMap<>();
map.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "true");
FluentdSourceConnectorConfig config = new FluentdSourceConnectorConfig(map);
EventEntry eventEntry = EventEntry.of(
Instant.now(),
ValueFactory.newMap(
ValueFactory.newString("message"),
ValueFactory.newString("This is a message."),
ValueFactory.newString("flag"),
ValueFactory.newBoolean(true)));
MessagePackConverver converter = new MessagePackConverver(config);
SourceRecord sourceRecord = converter.convert("topic", "tag", 0L, eventEntry);
assertEquals(Schema.STRING_SCHEMA, sourceRecord.keySchema());
assertEquals("tag", sourceRecord.key());
assertEquals("topic", sourceRecord.valueSchema().name());
Struct struct = (Struct) sourceRecord.value();
assertEquals("This is a message.", struct.get("message"));
assertTrue(struct.getBoolean("flag"));
}
示例5: readerWithProjection
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test(expected = DataException.class)
public void readerWithProjection() throws Throwable {
Map<String, Object> cfg = new HashMap<String, Object>() {{
put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, projectionSchema.toString());
}};
reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
while (reader.hasNext()) {
Struct record = reader.next();
assertNotNull(record.schema().field(FIELD_INDEX));
assertNotNull(record.schema().field(FIELD_NAME));
assertNull(record.schema().field(FIELD_SURNAME));
}
reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
readAllData();
}
示例6: expectStart
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void expectStart(final List<ConsumerRecord<String, byte[]>> preexistingRecords,
final Map<byte[], Struct> deserializations) throws Exception {
storeLog.start();
PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
for (ConsumerRecord<String, byte[]> rec : preexistingRecords)
capturedConsumedCallback.getValue().onCompletion(null, rec);
return null;
}
});
for (Map.Entry<byte[], Struct> deserializationEntry : deserializations.entrySet()) {
// Note null schema because default settings for internal serialization are schema-less
EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(deserializationEntry.getKey())))
.andReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue())));
}
}
示例7: readAllDataWithoutHeader
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void readAllDataWithoutHeader() throws Throwable {
Path file = createDataFile(false);
FileReader reader = getReader(fs, file, new HashMap<String, Object>() {{
put(DelimitedTextFileReader.FILE_READER_DELIMITED_TOKEN, ",");
put(DelimitedTextFileReader.FILE_READER_DELIMITED_HEADER, "false");
}});
assertTrue(reader.hasNext());
int recordCount = 0;
while (reader.hasNext()) {
Struct record = reader.next();
checkData(record, recordCount);
recordCount++;
}
assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);
}
示例8: defaultFieldNames
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void defaultFieldNames() throws Throwable {
Map<String, Object> customReaderCfg = new HashMap<>();
reader = getReader(fs, dataFile, customReaderCfg);
assertTrue(reader.getFilePath().equals(dataFile));
assertTrue(reader.hasNext());
int recordCount = 0;
while (reader.hasNext()) {
Struct record = reader.next();
checkData(SequenceFileReader.FIELD_NAME_KEY_DEFAULT, SequenceFileReader.FIELD_NAME_VALUE_DEFAULT, record, recordCount);
recordCount++;
}
assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);
}
示例9: updateSchemaOfStruct
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void updateSchemaOfStruct() {
final String fieldName1 = "f1";
final String fieldName2 = "f2";
final String fieldValue1 = "value1";
final int fieldValue2 = 1;
final Schema schema = SchemaBuilder.struct()
.name("my.orig.SchemaDefn")
.field(fieldName1, Schema.STRING_SCHEMA)
.field(fieldName2, Schema.INT32_SCHEMA)
.build();
final Struct value = new Struct(schema).put(fieldName1, fieldValue1).put(fieldName2, fieldValue2);
final Schema newSchema = SchemaBuilder.struct()
.name("my.updated.SchemaDefn")
.field(fieldName1, Schema.STRING_SCHEMA)
.field(fieldName2, Schema.INT32_SCHEMA)
.build();
Struct newValue = (Struct) SetSchemaMetadata.updateSchemaIn(value, newSchema);
assertMatchingSchema(newValue, newSchema);
}
示例10: verify
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
assertEquals(expectedFiles.size(), statuses.length);
int index = 0;
for (FileStatus status : statuses) {
Path filePath = status.getPath();
assertTrue(expectedFiles.contains(status.getPath()));
Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
assertEquals(3, avroRecords.size());
for (Object avroRecord: avroRecords) {
assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
}
index++;
}
}
示例11: prepareData
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void prepareData(String topic, int partition) throws Exception {
TopicPartition tp = new TopicPartition(topic, partition);
DataWriter hdfsWriter = createWriter(context, avroData);
hdfsWriter.recover(tp);
String key = "key";
Schema schema = createSchema();
Struct record = createRecord(schema);
Collection<SinkRecord> sinkRecords = new ArrayList<>();
for (long offset = 0; offset < 7; offset++) {
SinkRecord sinkRecord =
new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
sinkRecords.add(sinkRecord);
}
hdfsWriter.write(sinkRecords);
hdfsWriter.close(assignment);
hdfsWriter.stop();
}
示例12: envelope
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void envelope() {
final Envelope input = new Envelope(
13246312L,
true,
"exchange",
"routingKey"
);
final Struct actual = MessageConverter.envelope(input);
assertNotNull(actual, "actual should not be null.");
assertField(input.getDeliveryTag(), actual, MessageConverter.FIELD_ENVELOPE_DELIVERYTAG);
assertField(input.getExchange(), actual, MessageConverter.FIELD_ENVELOPE_EXCHANGE);
assertField(input.getRoutingKey(), actual, MessageConverter.FIELD_ENVELOPE_ROUTINGKEY);
assertField(input.isRedeliver(), actual, MessageConverter.FIELD_ENVELOPE_ISREDELIVER);
}
示例13: applyWithSchema
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private R applyWithSchema(R record) {
final Struct value = requireStruct(operatingValue(record), PURPOSE);
Schema updatedSchema = schemaUpdateCache.get(value.schema());
if (updatedSchema == null) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(value.schema(), SchemaBuilder.struct());
Struct defaultValue = (Struct) value.schema().defaultValue();
buildUpdatedSchema(value.schema(), "", builder, value.schema().isOptional(), defaultValue);
updatedSchema = builder.build();
schemaUpdateCache.put(value.schema(), updatedSchema);
}
final Struct updatedValue = new Struct(updatedSchema);
buildWithSchema(value, "", updatedValue);
return newRecord(record, updatedSchema, updatedValue);
}
示例14: putSafeWithNoPreviousValueIsPropagated
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void putSafeWithNoPreviousValueIsPropagated() {
final Converter converter = mock(Converter.class);
final KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
final byte[] value = new byte[0];
final Capture<Struct> statusValueStruct = newCapture();
converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), capture(statusValueStruct));
EasyMock.expectLastCall().andReturn(value);
kafkaBasedLog.send(eq("status-connector-" + CONNECTOR), eq(value), anyObject(Callback.class));
expectLastCall();
replayAll();
final ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.FAILED, WORKER_ID, 0);
store.putSafe(status);
verifyAll();
assertEquals(status.state().toString(), statusValueStruct.getValue().get(KafkaStatusBackingStore.STATE_KEY_NAME));
assertEquals(status.workerId(), statusValueStruct.getValue().get(KafkaStatusBackingStore.WORKER_ID_KEY_NAME));
assertEquals(status.generation(), statusValueStruct.getValue().get(KafkaStatusBackingStore.GENERATION_KEY_NAME));
}
示例15: apply
import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Override
public R apply(R record) {
final Schema schema = operatingSchema(record);
final Object value = operatingValue(record);
if (schema == null) {
return newRecord(record, null, Collections.singletonMap(fieldName, value));
} else {
Schema updatedSchema = schemaUpdateCache.get(schema);
if (updatedSchema == null) {
updatedSchema = SchemaBuilder.struct().field(fieldName, schema).build();
schemaUpdateCache.put(schema, updatedSchema);
}
final Struct updatedValue = new Struct(updatedSchema).put(fieldName, value);
return newRecord(record, updatedSchema, updatedValue);
}
}