本文整理汇总了Java中io.confluent.kafka.schemaregistry.client.SchemaRegistryClient类的典型用法代码示例。如果您正苦于以下问题:Java SchemaRegistryClient类的具体用法?Java SchemaRegistryClient怎么用?Java SchemaRegistryClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SchemaRegistryClient类属于io.confluent.kafka.schemaregistry.client包,在下文中一共展示了SchemaRegistryClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRetrieveSchema
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
@Test
public void testRetrieveSchema() throws Exception {
final TableId table = TableId.of("test", "kafka_topic");
final String testTopic = "kafka-topic";
final String testSubject = "kafka-topic-value";
final String testAvroSchemaString =
"{\"type\": \"record\", "
+ "\"name\": \"testrecord\", "
+ "\"fields\": [{\"name\": \"f1\", \"type\": \"string\"}]}";
final SchemaMetadata testSchemaMetadata = new SchemaMetadata(1, 1, testAvroSchemaString);
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
when(schemaRegistryClient.getLatestSchemaMetadata(testSubject)).thenReturn(testSchemaMetadata);
SchemaRegistrySchemaRetriever testSchemaRetriever = new SchemaRegistrySchemaRetriever(
schemaRegistryClient,
new AvroData(0)
);
Schema expectedKafkaConnectSchema =
SchemaBuilder.struct().field("f1", Schema.STRING_SCHEMA).name("testrecord").build();
assertEquals(expectedKafkaConnectSchema, testSchemaRetriever.retrieveSchema(table, testTopic));
}
示例2: buildStream
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
@Override
public SchemaKStream buildStream(final StreamsBuilder builder,
final KsqlConfig ksqlConfig,
final KafkaTopicClient kafkaTopicClient,
final MetastoreUtil metastoreUtil,
final FunctionRegistry functionRegistry,
final Map<String, Object> props,
final SchemaRegistryClient schemaRegistryClient) {
final SchemaKStream schemaKStream = getSource().buildStream(builder,
ksqlConfig,
kafkaTopicClient,
metastoreUtil,
functionRegistry,
props, schemaRegistryClient);
schemaKStream.setOutputNode(this);
return schemaKStream.toQueue(getLimit());
}
示例3: tableForJoin
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
SchemaKTable tableForJoin(
final StreamsBuilder builder,
final KsqlConfig ksqlConfig,
final KafkaTopicClient kafkaTopicClient,
final MetastoreUtil metastoreUtil,
final FunctionRegistry functionRegistry,
final Map<String, Object> props,
final SchemaRegistryClient schemaRegistryClient) {
Map<String, Object> joinTableProps = new HashMap<>();
joinTableProps.putAll(props);
joinTableProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final SchemaKStream schemaKStream = right.buildStream(builder, ksqlConfig, kafkaTopicClient,
metastoreUtil, functionRegistry,
joinTableProps, schemaRegistryClient);
if (!(schemaKStream instanceof SchemaKTable)) {
throw new KsqlException("Unsupported Join. Only stream-table joins are supported, but was "
+ getLeft() + "-" + getRight());
}
return (SchemaKTable) schemaKStream;
}
示例4: KsqlEngine
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public KsqlEngine(final KsqlConfig ksqlConfig, final KafkaTopicClient topicClient, SchemaRegistryClient schemaRegistryClient) {
Objects.requireNonNull(ksqlConfig, "Streams properties map cannot be null as it may be mutated later on");
this.ksqlConfig = ksqlConfig;
this.metaStore = new MetaStoreImpl();
this.topicClient = topicClient;
this.ddlCommandExec = new DDLCommandExec(metaStore);
this.queryEngine = new QueryEngine(this, new CommandFactories(topicClient, this));
this.persistentQueries = new HashMap<>();
this.livePersistentQueries = new HashSet<>();
this.allLiveQueries = new HashSet<>();
this.functionRegistry = new FunctionRegistry();
this.schemaRegistryClient = schemaRegistryClient;
this.engineMetrics = new KsqlEngineMetrics("ksql-engine", this);
this.aggregateMetricsCollector = Executors.newSingleThreadScheduledExecutor();
aggregateMetricsCollector.scheduleAtFixedRate(engineMetrics::updateMetrics, 1000, 1000,
TimeUnit.MILLISECONDS);
}
示例5: SchemaKStream
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public SchemaKStream(final Schema schema,
final KStream<String, GenericRow> kstream,
final Field keyField,
final List<SchemaKStream> sourceSchemaKStreams,
final Type type,
final FunctionRegistry functionRegistry,
final SchemaRegistryClient schemaRegistryClient) {
this.schema = schema;
this.kstream = kstream;
this.keyField = keyField;
this.sourceSchemaKStreams = sourceSchemaKStreams;
this.genericRowValueTypeEnforcer = new GenericRowValueTypeEnforcer(schema);
this.type = type;
this.functionRegistry = functionRegistry;
this.schemaRegistryClient = schemaRegistryClient;
}
示例6: create
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public static KsqlContext create(KsqlConfig ksqlConfig, SchemaRegistryClient schemaRegistryClient) {
if (ksqlConfig == null) {
ksqlConfig = new KsqlConfig(Collections.emptyMap());
}
Map<String, Object> streamsProperties = ksqlConfig.getKsqlStreamConfigProps();
if (!streamsProperties.containsKey(StreamsConfig.APPLICATION_ID_CONFIG)) {
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_OPTION_DEFAULT);
}
if (!streamsProperties.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT);
}
AdminClient adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
if (schemaRegistryClient == null) {
return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient));
} else {
return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient, schemaRegistryClient));
}
}
示例7: PhysicalPlanBuilder
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public PhysicalPlanBuilder(final StreamsBuilder builder,
final KsqlConfig ksqlConfig,
final KafkaTopicClient kafkaTopicClient,
final MetastoreUtil metastoreUtil,
final FunctionRegistry functionRegistry,
final Map<String, Object> overriddenStreamsProperties,
final boolean updateMetastore,
final MetaStore metaStore,
final SchemaRegistryClient schemaRegistryClient,
final KafkaStreamsBuilder kafkaStreamsBuilder) {
this.builder = builder;
this.ksqlConfig = ksqlConfig;
this.kafkaTopicClient = kafkaTopicClient;
this.metastoreUtil = metastoreUtil;
this.functionRegistry = functionRegistry;
this.overriddenStreamsProperties = overriddenStreamsProperties;
this.metaStore = metaStore;
this.updateMetastore = updateMetastore;
this.schemaRegistryClient = schemaRegistryClient;
this.kafkaStreamsBuilder = kafkaStreamsBuilder;
}
示例8: fetchSchemaMetadata
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
private SchemaMetadata fetchSchemaMetadata(
AbstractStreamCreateStatement abstractStreamCreateStatement,
SchemaRegistryClient schemaRegistryClient,
String kafkaTopicName) throws IOException, RestClientException {
if (abstractStreamCreateStatement.getProperties().containsKey(KsqlConstants.AVRO_SCHEMA_ID)) {
int schemaId;
try {
schemaId = Integer.parseInt(StringUtil.cleanQuotes(abstractStreamCreateStatement.getProperties().get(KsqlConstants.AVRO_SCHEMA_ID).toString()));
} catch (NumberFormatException e) {
throw new KsqlException(String.format("Invalid schema id property: %s.",
abstractStreamCreateStatement.getProperties().get(KsqlConstants.AVRO_SCHEMA_ID).toString()));
}
return schemaRegistryClient.getSchemaMetadata(
kafkaTopicName +KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX, schemaId);
} else {
return schemaRegistryClient.getLatestSchemaMetadata(kafkaTopicName +
KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX);
}
}
示例9: validatePersistentQueryResults
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public void validatePersistentQueryResults(
final PersistentQueryMetadata persistentQueryMetadata,
final SchemaRegistryClient schemaRegistryClient) {
if (persistentQueryMetadata.getResultTopicSerde() == DataSource.DataSourceSerDe.AVRO) {
String avroSchemaString = SchemaUtil.buildAvroSchema(persistentQueryMetadata
.getResultSchema(),
persistentQueryMetadata
.getResultTopic().getName());
boolean isValidSchema = isValidAvroSchemaForTopic(persistentQueryMetadata.getResultTopic()
.getTopicName(),
avroSchemaString, schemaRegistryClient);
if (!isValidSchema) {
throw new KsqlException(String.format("Cannot register avro schema for %s since it is "
+ "not valid for schema registry.", persistentQueryMetadata
.getResultTopic().getKafkaTopicName()));
}
}
}
示例10: shouldPassAvroCheck
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
@Test
public void shouldPassAvroCheck() throws Exception {
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
SchemaMetadata schemaMetadata = new SchemaMetadata(1, 1, ordersAveroSchemaStr);
expect(schemaRegistryClient.getLatestSchemaMetadata(anyString())).andReturn(schemaMetadata);
replay(schemaRegistryClient);
AbstractStreamCreateStatement abstractStreamCreateStatement = getAbstractStreamCreateStatement
("CREATE STREAM S1 WITH "
+ "(kafka_topic='s1_topic', "
+ "value_format='avro' );");
Pair<AbstractStreamCreateStatement, String> checkResult = avroUtil.checkAndSetAvroSchema(abstractStreamCreateStatement, new HashMap<>(), schemaRegistryClient);
AbstractStreamCreateStatement newAbstractStreamCreateStatement = checkResult.getLeft();
assertThat(newAbstractStreamCreateStatement.getElements(), equalTo(Arrays.asList(
new TableElement("ORDERTIME", "BIGINT"),
new TableElement("ORDERID", "BIGINT"),
new TableElement("ITEMID", "VARCHAR"),
new TableElement("ORDERUNITS", "DOUBLE"),
new TableElement("ARRAYCOL", "ARRAY<DOUBLE>"),
new TableElement("MAPCOL", "MAP<VARCHAR,DOUBLE>")
)));
}
示例11: shouldNotPassAvroCheckIfSchemaDoesNotExist
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
@Test
public void shouldNotPassAvroCheckIfSchemaDoesNotExist() throws Exception {
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
SchemaMetadata schemaMetadata = new SchemaMetadata(1, 1, null);
expect(schemaRegistryClient.getLatestSchemaMetadata(anyString())).andReturn(schemaMetadata);
replay(schemaRegistryClient);
AbstractStreamCreateStatement abstractStreamCreateStatement = getAbstractStreamCreateStatement
("CREATE STREAM S1 WITH "
+ "(kafka_topic='s1_topic', "
+ "value_format='avro' );");
try {
avroUtil.checkAndSetAvroSchema(abstractStreamCreateStatement, new HashMap<>(), schemaRegistryClient);
fail();
} catch (Exception e) {
assertThat("Expected different message message.", e.getMessage(), equalTo(" Could not "
+ "fetch the AVRO schema "
+ "from schema registry. null "));
}
}
示例12: shouldValidatePersistentQueryResultCorrectly
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
@Test
public void shouldValidatePersistentQueryResultCorrectly()
throws IOException, RestClientException {
SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
KsqlTopic resultTopic = new KsqlTopic("testTopic", "testTopic", new KsqlAvroTopicSerDe());
Schema resultSchema = SerDeUtil.getSchemaFromAvro(ordersAveroSchemaStr);
PersistentQueryMetadata persistentQueryMetadata = new PersistentQueryMetadata("",
null,
null,
"",
null,
DataSource.DataSourceType.KSTREAM,
"",
mock(KafkaTopicClient.class),
resultSchema,
resultTopic,
null);
org.apache.avro.Schema.Parser parser = new org.apache.avro.Schema.Parser();
org.apache.avro.Schema avroSchema = parser.parse(ordersAveroSchemaStr);
expect(schemaRegistryClient.testCompatibility(anyString(), EasyMock.isA(avroSchema.getClass())))
.andReturn(true);
replay(schemaRegistryClient);
avroUtil.validatePersistentQueryResults(persistentQueryMetadata, schemaRegistryClient);
}
示例13: registerSchema
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
private void registerSchema(SchemaRegistryClient schemaRegistryClient)
throws IOException, RestClientException {
String ordersAveroSchemaStr = "{"
+ "\"namespace\": \"kql\","
+ " \"name\": \"orders\","
+ " \"type\": \"record\","
+ " \"fields\": ["
+ " {\"name\": \"ordertime\", \"type\": \"long\"},"
+ " {\"name\": \"orderid\", \"type\": \"long\"},"
+ " {\"name\": \"itemid\", \"type\": \"string\"},"
+ " {\"name\": \"orderunits\", \"type\": \"double\"},"
+ " {\"name\": \"arraycol\", \"type\": {\"type\": \"array\", \"items\": \"double\"}},"
+ " {\"name\": \"mapcol\", \"type\": {\"type\": \"map\", \"values\": \"double\"}}"
+ " ]"
+ "}";
org.apache.avro.Schema.Parser parser = new org.apache.avro.Schema.Parser();
org.apache.avro.Schema avroSchema = parser.parse(ordersAveroSchemaStr);
schemaRegistryClient.register("orders-topic" + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX,
avroSchema);
}
示例14: KsqlGenericRowAvroSerializer
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
public KsqlGenericRowAvroSerializer(org.apache.kafka.connect.data.Schema schema,
SchemaRegistryClient schemaRegistryClient, KsqlConfig
ksqlConfig) {
String avroSchemaStr = SchemaUtil.buildAvroSchema(schema, "avro_schema");
Schema.Parser parser = new Schema.Parser();
avroSchema = parser.parse(avroSchemaStr);
fields = avroSchema.getFields();
Map<String, Object> map = new HashMap<>();
// Automatically register the schema in the Schema Registry if it has not been registered.
map.put(AbstractKafkaAvroSerDeConfig.AUTO_REGISTER_SCHEMAS, true);
map.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, ksqlConfig.getString(KsqlConfig.SCHEMA_REGISTRY_URL_PROPERTY));
kafkaAvroSerializer = new KafkaAvroSerializer(schemaRegistryClient, map);
}
示例15: getSerializedRow
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; //导入依赖的package包/类
private byte[] getSerializedRow(String topicName, SchemaRegistryClient schemaRegistryClient,
Schema rowAvroSchema,
GenericRow
genericRow) {
Map map = new HashMap();
// Automatically register the schema in the Schema Registry if it has not been registered.
map.put(AbstractKafkaAvroSerDeConfig.AUTO_REGISTER_SCHEMAS, true);
map.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "");
KafkaAvroSerializer kafkaAvroSerializer = new KafkaAvroSerializer(schemaRegistryClient, map);
GenericRecord avroRecord = new GenericData.Record(rowAvroSchema);
List<Schema.Field> fields = rowAvroSchema.getFields();
for (int i = 0; i < genericRow.getColumns().size(); i++) {
if (fields.get(i).schema().getType() == Schema.Type.ARRAY) {
avroRecord.put(fields.get(i).name(), Arrays.asList((Object[]) genericRow.getColumns().get(i)));
} else {
avroRecord.put(fields.get(i).name(), genericRow.getColumns().get(i));
}
}
return kafkaAvroSerializer.serialize(topicName, avroRecord);
}