本文整理汇总了Java中org.apache.kafka.clients.admin.AdminClient类的典型用法代码示例。如果您正苦于以下问题:Java AdminClient类的具体用法?Java AdminClient怎么用?Java AdminClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AdminClient类属于org.apache.kafka.clients.admin包,在下文中一共展示了AdminClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createTopic
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
* Creates a topic in Kafka. If the topic already exists this does nothing.
* @param topicName - the namespace name to create.
* @param partitions - the number of partitions to create.
*/
public void createTopic(final String topicName, final int partitions) {
final short replicationFactor = 1;
// Create admin client
try (final AdminClient adminClient = KafkaAdminClient.create(buildDefaultClientConfig())) {
try {
// Define topic
final NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor);
// Create topic, which is async call.
final CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singleton(newTopic));
// Since the call is Async, Lets wait for it to complete.
createTopicsResult.values().get(topicName).get();
} catch (InterruptedException | ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw new RuntimeException(e.getMessage(), e);
}
// TopicExistsException - Swallow this exception, just means the topic already exists.
}
}
}
示例2: main
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public static void main(String[] args) throws ExecutionException, InterruptedException {
Map props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections
// to the Kakfa cluster
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
"kafka-local:9092");
// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// value to block, after which it will throw a TimeoutException
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
AdminClient adminClient = AdminClient.create(props);
adminClient.describeCluster();
Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
System.out.println(topicListings);
}
示例3: DefaultConsumer
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public DefaultConsumer(String configPath, List<TopicPartition> topicPartitions, List<TopicFiber> topicFibers) throws ConfigFileNotFoundException
{
ConsumerConfig config = ConsumerConfig.INSTANCE();
config.init(configPath);
config.validate();
this.topicPartitions = topicPartitions;
this.topicFibers = topicFibers;
Properties props = new Properties();
props.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
props.setProperty("client.id", "consumerAdmin");
props.setProperty("metadata.max.age.ms", "3000");
props.setProperty("group.id", config.getGroupId());
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "1000");
props.setProperty("session.timeout.ms", "30000");
props.setProperty("key.deserializer", config.getKafkaKeyDeserializerClass());
props.setProperty("value.deserializer", config.getKafkaValueDeserializerClass());
kafkaAdminClient = AdminClient.create(props);
this.dataThreadManager = DataThreadManager.INSTANCE();
init();
}
示例4: DefaultProducer
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public DefaultProducer(String configPath) throws ConfigFileNotFoundException
{
ProducerConfig config = ProducerConfig.INSTANCE();
config.init(configPath);
config.validate();
this.offerTimeout = config.getBufferOfferTimeout();
// init meta client
metaClient = new MetaClient(config.getMetaServerHost(),
config.getMetaServerPort());
// init kafka admin client
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
properties.setProperty("client.id", "producerAdmin");
properties.setProperty("metadata.max.age.ms", "3000");
kafkaAdminClient = AdminClient.create(properties);
init();
}
示例5: create
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
* Create a new AdminClient instance.
* @param clusterConfig What cluster to connect to.
* @param clientId What clientId to associate the connection with.
*/
public AdminClient create(final ClusterConfig clusterConfig, final String clientId) {
// Create a map
final Map<String, Object> config = new HashMap<>();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterConfig.getConnectString());
config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId);
config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout);
if (clusterConfig.isUseSsl()) {
config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
config.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getKeyStoreFile());
config.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, clusterConfig.getKeyStorePassword());
config.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getTrustStoreFile());
config.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, clusterConfig.getTrustStorePassword());
}
return KafkaAdminClient.create(config);
}
示例6: testCreateNonSslAdminClient
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
* Test that KafkaAdminFactory can create a working AdminClient when connecting to a non-ssl cluster.
*/
@Test
public void testCreateNonSslAdminClient() throws ExecutionException, InterruptedException {
// Create Cluster config
final ClusterConfig clusterConfig = ClusterConfig.newBuilder()
.withBrokerHosts(sharedKafkaTestResource.getKafkaConnectString())
.build();
final KafkaAdminFactory kafkaAdminFactory = new KafkaAdminFactory("NotUsed");
// Create instance
try (final AdminClient adminClient = kafkaAdminFactory.create(clusterConfig, "MyClientId")) {
// Call method to validate things work as expected
final DescribeClusterResult results = adminClient.describeCluster();
assertNotNull("Should have a non-null result", results);
// Request future result
final Collection<Node> nodes = results.nodes().get();
assertNotNull("Should have non-null node result", nodes);
assertFalse("Should have non-empty node", nodes.isEmpty());
}
}
示例7: init
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void init() {
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
if (multiplier != null) {
timeoutMultiplier = Double.parseDouble(multiplier);
}
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
List<String> bAddresses = new ArrayList<>();
for (BrokerAddress bAddress : brokerAddresses) {
bAddresses.add(bAddress.toString());
}
String[] foo = new String[bAddresses.size()];
Map<String, Object> adminConfigs = new HashMap<>();
adminConfigs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bAddresses.toArray(foo)[0]);
adminClient = AdminClient.create(adminConfigs);
}
示例8: create
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public static KsqlContext create(KsqlConfig ksqlConfig, SchemaRegistryClient schemaRegistryClient) {
if (ksqlConfig == null) {
ksqlConfig = new KsqlConfig(Collections.emptyMap());
}
Map<String, Object> streamsProperties = ksqlConfig.getKsqlStreamConfigProps();
if (!streamsProperties.containsKey(StreamsConfig.APPLICATION_ID_CONFIG)) {
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_OPTION_DEFAULT);
}
if (!streamsProperties.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT);
}
AdminClient adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
if (schemaRegistryClient == null) {
return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient));
} else {
return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient, schemaRegistryClient));
}
}
示例9: before
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void before() throws Exception {
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
configMap.put("application.id", "KSQL");
configMap.put("commit.interval.ms", 0);
configMap.put("cache.max.bytes.buffering", 0);
configMap.put("auto.offset.reset", "earliest");
KsqlConfig ksqlConfig = new KsqlConfig(configMap);
adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(adminClient));
metaStore = ksqlEngine.getMetaStore();
topicProducer = new TopicProducer(CLUSTER);
topicConsumer = new TopicConsumer(CLUSTER);
createInitTopics();
produceInitData();
execInitCreateStreamQueries();
}
示例10: start
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public void start() throws Exception {
embeddedKafkaCluster = new EmbeddedSingleNodeKafkaCluster();
embeddedKafkaCluster.start();
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers());
configMap.put("application.id", "KSQL");
configMap.put("commit.interval.ms", 0);
configMap.put("cache.max.bytes.buffering", 0);
configMap.put("auto.offset.reset", "earliest");
configMap.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
this.ksqlConfig = new KsqlConfig(configMap);
this.adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
this.topicClient = new KafkaTopicClientImpl(adminClient);
}
示例11: shouldRunSimpleStatements
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Test
public void shouldRunSimpleStatements() throws Exception {
AdminClient adminClient = mock(AdminClient.class);
KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClientImpl.class);
KsqlEngine ksqlEngine = mock(KsqlEngine.class);
Map<QueryId, PersistentQueryMetadata> liveQueryMap = new HashMap<>();
KsqlContext ksqlContext = new KsqlContext(adminClient, kafkaTopicClient, ksqlEngine);
expect(ksqlEngine.buildMultipleQueries(statement1, Collections.emptyMap()))
.andReturn
(Collections.emptyList());
expect(ksqlEngine.buildMultipleQueries(statement2, Collections.emptyMap()))
.andReturn(getQueryMetadata(new QueryId("CSAS_BIGORDERS"), DataSource.DataSourceType.KSTREAM));
expect(ksqlEngine.getPersistentQueries()).andReturn(liveQueryMap);
replay(ksqlEngine);
ksqlContext.sql(statement1);
ksqlContext.sql(statement2);
verify(ksqlEngine);
}
示例12: create
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
* Factory method.
* @param cluster What cluster to connect to.
* @param userId What userId to associate the connection with.
* @return KafkaOperations client.
*/
public KafkaOperations create(final Cluster cluster, final long userId) {
final String clientId = consumerIdPrefix + userId;
// Create new Operational Client
final ClusterConfig clusterConfig = ClusterConfig.newBuilder(cluster, secretManager).build();
final AdminClient adminClient = kafkaAdminFactory.create(clusterConfig, clientId);
return new KafkaOperations(adminClient);
}
示例13: KafkaTopicProvisioner
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
KafkaProperties kafkaProperties) {
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
String kafkaConnectionString = kafkaBinderConfigurationProperties.getKafkaConnectionString();
if (ObjectUtils.isEmpty(adminClientProperties.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|| !kafkaConnectionString.equals(kafkaBinderConfigurationProperties.getDefaultKafkaConnectionString())) {
adminClientProperties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
}
this.configurationProperties = kafkaBinderConfigurationProperties;
this.adminClient = AdminClient.create(adminClientProperties);
}
示例14: before
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void before() throws Exception {
testHarness = new IntegrationTestHarness(DataSource.DataSourceSerDe.JSON.name());
testHarness.start();
Map<String, Object> streamsConfig = testHarness.ksqlConfig.getKsqlStreamConfigProps();
streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KsqlConfig ksqlconfig = new KsqlConfig(streamsConfig);
AdminClient adminClient = AdminClient.create(ksqlconfig.getKsqlAdminClientConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
ksqlEngine = new KsqlEngine(ksqlconfig, topicClient);
testHarness.createTopic(pageViewTopic);
testHarness.createTopic(usersTopic);
pageViewDataProvider = new PageViewDataProvider();
userDataProvider = new UserDataProvider();
testHarness.publishTestData(usersTopic, userDataProvider, System.currentTimeMillis() - 10000);
testHarness.publishTestData(pageViewTopic, pageViewDataProvider, System.currentTimeMillis());
ksqlEngine.buildMultipleQueries(String.format("CREATE TABLE %s (registertime bigint, gender varchar, regionid varchar, " +
"userid varchar) WITH (kafka_topic='%s', value_format='JSON', key = 'userid');",
userTable,
usersTopic), Collections.emptyMap());
ksqlEngine.buildMultipleQueries(String.format("CREATE STREAM %s (viewtime bigint, userid varchar, pageid varchar) " +
"WITH (kafka_topic='%s', value_format='JSON');", pageViewStream, pageViewTopic), Collections.emptyMap());
}
示例15: testCreateTopic
import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Test
public void testCreateTopic() {
AdminClient adminClient = mock(AdminClient.class);
expect(adminClient.describeCluster()).andReturn(getDescribeClusterResult());
expect(adminClient.createTopics(anyObject())).andReturn(getCreateTopicsResult());
expect(adminClient.listTopics()).andReturn(getListTopicsResult());
expect(adminClient.describeConfigs(anyObject())).andReturn(getDescribeConfigsResult());
replay(adminClient);
KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
kafkaTopicClient.createTopic("test", 1, (short)1);
verify(adminClient);
}