本文整理汇总了Java中io.confluent.connect.avro.AvroData类的典型用法代码示例。如果您正苦于以下问题:Java AvroData类的具体用法?Java AvroData怎么用?Java AvroData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AvroData类属于io.confluent.connect.avro包,在下文中一共展示了AvroData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: putRecords
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public static byte[] putRecords(Collection<SinkRecord> records, AvroData avroData) throws IOException {
final DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>());
ByteArrayOutputStream out = new ByteArrayOutputStream();
Schema schema = null;
for (SinkRecord record : records) {
if (schema == null) {
schema = record.valueSchema();
org.apache.avro.Schema avroSchema = avroData.fromConnectSchema(schema);
writer.create(avroSchema, out);
}
Object value = avroData.fromConnectData(schema, record.value());
// AvroData wraps primitive types so their schema can be included. We need to unwrap
// NonRecordContainers to just their value to properly handle these types
if (value instanceof NonRecordContainer) {
value = ((NonRecordContainer) value).getValue();
}
writer.append(value);
}
writer.flush();
return out.toByteArray();
}
示例2: testRetrieveSchema
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
@Test
public void testRetrieveSchema() throws Exception {
final TableId table = TableId.of("test", "kafka_topic");
final String testTopic = "kafka-topic";
final String testSubject = "kafka-topic-value";
final String testAvroSchemaString =
"{\"type\": \"record\", "
+ "\"name\": \"testrecord\", "
+ "\"fields\": [{\"name\": \"f1\", \"type\": \"string\"}]}";
final SchemaMetadata testSchemaMetadata = new SchemaMetadata(1, 1, testAvroSchemaString);
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
when(schemaRegistryClient.getLatestSchemaMetadata(testSubject)).thenReturn(testSchemaMetadata);
SchemaRegistrySchemaRetriever testSchemaRetriever = new SchemaRegistrySchemaRetriever(
schemaRegistryClient,
new AvroData(0)
);
Schema expectedKafkaConnectSchema =
SchemaBuilder.struct().field("f1", Schema.STRING_SCHEMA).name("testrecord").build();
assertEquals(expectedKafkaConnectSchema, testSchemaRetriever.retrieveSchema(table, testTopic));
}
示例3: TopicPartitionWriter
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public TopicPartitionWriter(
TopicPartition tp,
Storage storage,
RecordWriterProvider writerProvider,
Partitioner partitioner,
HdfsSinkConnectorConfig connectorConfig,
SinkTaskContext context,
AvroData avroData) {
this(tp, storage, writerProvider, partitioner, connectorConfig, context, avroData, null, null, null, null, null);
}
示例4: configureConnector
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
protected void configureConnector() {
connectorConfig = new HdfsSinkConnectorConfig(connectorProps);
topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);
int schemaCacheSize = connectorConfig.getInt(HdfsSinkConnectorConfig.SCHEMA_CACHE_SIZE_CONFIG);
avroData = new AvroData(schemaCacheSize);
}
示例5: getRecordWriter
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
@Override
public RecordWriter<SinkRecord> getRecordWriter(
Configuration conf, final String fileName, SinkRecord record, final AvroData avroData)
throws IOException {
final Map<String, List<Object>> data = Data.getData();
if (!data.containsKey(fileName)) {
data.put(fileName, new LinkedList<>());
}
return new MemoryRecordWriter(fileName);
}
示例6: calcByteSize
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
private int calcByteSize(List<SinkRecord> sinkRecords) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>());
AvroData avroData = new AvroData(1);
boolean writerInit = false;
for(SinkRecord sinkRecord: sinkRecords){
if(!writerInit){
writer.create(avroData.fromConnectSchema(sinkRecord.valueSchema()), baos);
writerInit = true;
}
writer.append(avroData.fromConnectData(sinkRecord.valueSchema(), sinkRecord.value()));
}
return baos.size();
}
示例7: configure
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
@Override
public void configure(Map<String, String> properties) {
SchemaRegistrySchemaRetrieverConfig config =
new SchemaRegistrySchemaRetrieverConfig(properties);
schemaRegistryClient =
new CachedSchemaRegistryClient(config.getString(config.LOCATION_CONFIG), 0);
avroData = new AvroData(config.getInt(config.AVRO_DATA_CACHE_SIZE_CONFIG));
}
示例8: GenericRecordToStruct
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public GenericRecordToStruct() {
this.avroData = new AvroData(CACHE_SIZE);
}
示例9: HiveUtil
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public HiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, HiveMetaStore hiveMetaStore) {
this.url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
this.topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
this.avroData = avroData;
this.hiveMetaStore = hiveMetaStore;
}
示例10: AvroHiveUtil
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public AvroHiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, HiveMetaStore hiveMetaStore) {
super(connectorConfig, avroData, hiveMetaStore);
}
示例11: AvroFileReader
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public AvroFileReader(AvroData avroData) {
this.avroData = avroData;
}
示例12: getSchemaFileReader
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public SchemaFileReader getSchemaFileReader(AvroData avroData) {
return new AvroFileReader(avroData);
}
示例13: getHiveUtil
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public HiveUtil getHiveUtil(HdfsSinkConnectorConfig config, AvroData avroData, HiveMetaStore hiveMetaStore) {
return new AvroHiveUtil(config, avroData, hiveMetaStore);
}
示例14: getAvroData
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public AvroData getAvroData() {
return avroData;
}
示例15: ParquetHiveUtil
import io.confluent.connect.avro.AvroData; //导入依赖的package包/类
public ParquetHiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, HiveMetaStore hiveMetaStore) {
super(connectorConfig, avroData, hiveMetaStore);
}