当前位置: 首页>>代码示例>>Java>>正文


Java AdminClient类代码示例

本文整理汇总了Java中org.apache.kafka.clients.admin.AdminClient的典型用法代码示例。如果您正苦于以下问题:Java AdminClient类的具体用法?Java AdminClient怎么用?Java AdminClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


AdminClient类属于org.apache.kafka.clients.admin包,在下文中一共展示了AdminClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createTopic

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
 * Creates a topic in Kafka. If the topic already exists this does nothing.
 * @param topicName - the namespace name to create.
 * @param partitions - the number of partitions to create.
 */
public void createTopic(final String topicName, final int partitions) {
    final short replicationFactor = 1;

    // Create admin client
    try (final AdminClient adminClient = KafkaAdminClient.create(buildDefaultClientConfig())) {
        try {
            // Define topic
            final NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor);

            // Create topic, which is async call.
            final CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singleton(newTopic));

            // Since the call is Async, Lets wait for it to complete.
            createTopicsResult.values().get(topicName).get();
        } catch (InterruptedException | ExecutionException e) {
            if (!(e.getCause() instanceof TopicExistsException)) {
                throw new RuntimeException(e.getMessage(), e);
            }
            // TopicExistsException - Swallow this exception, just means the topic already exists.
        }
    }
}
 
开发者ID:salesforce,项目名称:kafka-junit,代码行数:28,代码来源:KafkaTestServer.java

示例2: main

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public static void main(String[] args) throws ExecutionException, InterruptedException {
        Map props = new HashMap<>();
        // list of host:port pairs used for establishing the initial connections
        // to the Kakfa cluster
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
                "kafka-local:9092");
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
//                JsonSerializer.class);
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
//                JsonSerializer.class);
        // value to block, after which it will throw a TimeoutException
        props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
        AdminClient adminClient = AdminClient.create(props);
        adminClient.describeCluster();
        Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
        System.out.println(topicListings);
    }
 
开发者ID:kloiasoft,项目名称:eventapis,代码行数:18,代码来源:Eventapis.java

示例3: DefaultConsumer

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public DefaultConsumer(String configPath, List<TopicPartition> topicPartitions, List<TopicFiber> topicFibers) throws ConfigFileNotFoundException
{
    ConsumerConfig config = ConsumerConfig.INSTANCE();
    config.init(configPath);
    config.validate();
    this.topicPartitions = topicPartitions;
    this.topicFibers = topicFibers;
    Properties props = new Properties();
    props.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
    props.setProperty("client.id", "consumerAdmin");
    props.setProperty("metadata.max.age.ms", "3000");
    props.setProperty("group.id", config.getGroupId());
    props.setProperty("enable.auto.commit", "true");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty("session.timeout.ms", "30000");
    props.setProperty("key.deserializer", config.getKafkaKeyDeserializerClass());
    props.setProperty("value.deserializer", config.getKafkaValueDeserializerClass());
    kafkaAdminClient = AdminClient.create(props);
    this.dataThreadManager = DataThreadManager.INSTANCE();
    init();
}
 
开发者ID:dbiir,项目名称:paraflow,代码行数:22,代码来源:DefaultConsumer.java

示例4: DefaultProducer

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public DefaultProducer(String configPath) throws ConfigFileNotFoundException
{
    ProducerConfig config = ProducerConfig.INSTANCE();
        config.init(configPath);
        config.validate();
    this.offerTimeout = config.getBufferOfferTimeout();
    // init meta client
    metaClient = new MetaClient(config.getMetaServerHost(),
            config.getMetaServerPort());
    // init kafka admin client
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
    properties.setProperty("client.id", "producerAdmin");
    properties.setProperty("metadata.max.age.ms", "3000");
    kafkaAdminClient = AdminClient.create(properties);
    init();
}
 
开发者ID:dbiir,项目名称:paraflow,代码行数:18,代码来源:DefaultProducer.java

示例5: create

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
 * Create a new AdminClient instance.
 * @param clusterConfig What cluster to connect to.
 * @param clientId What clientId to associate the connection with.
 */
public AdminClient create(final ClusterConfig clusterConfig, final String clientId) {
    // Create a map
    final Map<String, Object> config = new HashMap<>();
    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterConfig.getConnectString());
    config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId);
    config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout);

    if (clusterConfig.isUseSsl()) {
        config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
        config.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getKeyStoreFile());
        config.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, clusterConfig.getKeyStorePassword());
        config.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getTrustStoreFile());
        config.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, clusterConfig.getTrustStorePassword());
    }

    return KafkaAdminClient.create(config);
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:23,代码来源:KafkaAdminFactory.java

示例6: testCreateNonSslAdminClient

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
 * Test that KafkaAdminFactory can create a working AdminClient when connecting to a non-ssl cluster.
 */
@Test
public void testCreateNonSslAdminClient() throws ExecutionException, InterruptedException {
    // Create Cluster config
    final ClusterConfig clusterConfig = ClusterConfig.newBuilder()
        .withBrokerHosts(sharedKafkaTestResource.getKafkaConnectString())
        .build();

    final KafkaAdminFactory kafkaAdminFactory = new KafkaAdminFactory("NotUsed");

    // Create instance
    try (final AdminClient adminClient = kafkaAdminFactory.create(clusterConfig, "MyClientId")) {

        // Call method to validate things work as expected
        final DescribeClusterResult results = adminClient.describeCluster();
        assertNotNull("Should have a non-null result", results);

        // Request future result
        final Collection<Node> nodes = results.nodes().get();
        assertNotNull("Should have non-null node result", nodes);
        assertFalse("Should have non-empty node", nodes.isEmpty());
    }
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:26,代码来源:KafkaAdminFactoryTest.java

示例7: init

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void init() {
	String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
	if (multiplier != null) {
		timeoutMultiplier = Double.parseDouble(multiplier);
	}

	BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
	List<String> bAddresses = new ArrayList<>();
	for (BrokerAddress bAddress : brokerAddresses) {
		bAddresses.add(bAddress.toString());
	}
	String[] foo = new String[bAddresses.size()];

	Map<String, Object> adminConfigs = new HashMap<>();
	adminConfigs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bAddresses.toArray(foo)[0]);
	adminClient = AdminClient.create(adminConfigs);
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:19,代码来源:KafkaBinderTests.java

示例8: create

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public static KsqlContext create(KsqlConfig ksqlConfig, SchemaRegistryClient schemaRegistryClient) {
  if (ksqlConfig == null) {
    ksqlConfig = new KsqlConfig(Collections.emptyMap());
  }
  Map<String, Object> streamsProperties = ksqlConfig.getKsqlStreamConfigProps();
  if (!streamsProperties.containsKey(StreamsConfig.APPLICATION_ID_CONFIG)) {
    streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_OPTION_DEFAULT);
  }
  if (!streamsProperties.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
    streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT);
  }
  AdminClient adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
  if (schemaRegistryClient == null) {
    return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient));
  } else {
    return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient, schemaRegistryClient));
  }

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:21,代码来源:KsqlContext.java

示例9: before

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void before() throws Exception {

  Map<String, Object> configMap = new HashMap<>();
  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");

  KsqlConfig ksqlConfig = new KsqlConfig(configMap);
  adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(adminClient));
  metaStore = ksqlEngine.getMetaStore();
  topicProducer = new TopicProducer(CLUSTER);
  topicConsumer = new TopicConsumer(CLUSTER);

  createInitTopics();
  produceInitData();
  execInitCreateStreamQueries();

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:23,代码来源:JsonFormatTest.java

示例10: start

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public void start() throws Exception {
  embeddedKafkaCluster = new EmbeddedSingleNodeKafkaCluster();
  embeddedKafkaCluster.start();
  Map<String, Object> configMap = new HashMap<>();

  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers());
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");
  configMap.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());

  this.ksqlConfig = new KsqlConfig(configMap);
  this.adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  this.topicClient = new KafkaTopicClientImpl(adminClient);

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:18,代码来源:IntegrationTestHarness.java

示例11: shouldRunSimpleStatements

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Test
public void shouldRunSimpleStatements() throws Exception {
  AdminClient adminClient = mock(AdminClient.class);
  KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClientImpl.class);
  KsqlEngine ksqlEngine = mock(KsqlEngine.class);

  Map<QueryId, PersistentQueryMetadata> liveQueryMap = new HashMap<>();

  KsqlContext ksqlContext = new KsqlContext(adminClient, kafkaTopicClient, ksqlEngine);

  expect(ksqlEngine.buildMultipleQueries(statement1, Collections.emptyMap()))
      .andReturn
      (Collections.emptyList());
  expect(ksqlEngine.buildMultipleQueries(statement2, Collections.emptyMap()))
      .andReturn(getQueryMetadata(new QueryId("CSAS_BIGORDERS"), DataSource.DataSourceType.KSTREAM));
  expect(ksqlEngine.getPersistentQueries()).andReturn(liveQueryMap);
  replay(ksqlEngine);
  ksqlContext.sql(statement1);
  ksqlContext.sql(statement2);

  verify(ksqlEngine);
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:23,代码来源:KsqlContextTest.java

示例12: create

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
/**
 * Factory method.
 * @param cluster What cluster to connect to.
 * @param userId What userId to associate the connection with.
 * @return KafkaOperations client.
 */
public KafkaOperations create(final Cluster cluster, final long userId) {
    final String clientId = consumerIdPrefix + userId;

    // Create new Operational Client
    final ClusterConfig clusterConfig = ClusterConfig.newBuilder(cluster, secretManager).build();
    final AdminClient adminClient = kafkaAdminFactory.create(clusterConfig, clientId);

    return new KafkaOperations(adminClient);
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:16,代码来源:KafkaOperationsFactory.java

示例13: KafkaTopicProvisioner

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
							KafkaProperties kafkaProperties) {
	Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
	Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
	String kafkaConnectionString = kafkaBinderConfigurationProperties.getKafkaConnectionString();

	if (ObjectUtils.isEmpty(adminClientProperties.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
			|| !kafkaConnectionString.equals(kafkaBinderConfigurationProperties.getDefaultKafkaConnectionString())) {
		adminClientProperties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
	}
	this.configurationProperties = kafkaBinderConfigurationProperties;
	this.adminClient = AdminClient.create(adminClientProperties);
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:14,代码来源:KafkaTopicProvisioner.java

示例14: before

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Before
public void before() throws Exception {
  testHarness = new IntegrationTestHarness(DataSource.DataSourceSerDe.JSON.name());
  testHarness.start();
  Map<String, Object> streamsConfig = testHarness.ksqlConfig.getKsqlStreamConfigProps();
  streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

  KsqlConfig ksqlconfig = new KsqlConfig(streamsConfig);
  AdminClient adminClient = AdminClient.create(ksqlconfig.getKsqlAdminClientConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  ksqlEngine = new KsqlEngine(ksqlconfig, topicClient);

  testHarness.createTopic(pageViewTopic);
  testHarness.createTopic(usersTopic);

  pageViewDataProvider = new PageViewDataProvider();
  userDataProvider = new UserDataProvider();

  testHarness.publishTestData(usersTopic, userDataProvider, System.currentTimeMillis() - 10000);
  testHarness.publishTestData(pageViewTopic, pageViewDataProvider, System.currentTimeMillis());

  ksqlEngine.buildMultipleQueries(String.format("CREATE TABLE %s (registertime bigint, gender varchar, regionid varchar, " +
          "userid varchar) WITH (kafka_topic='%s', value_format='JSON', key = 'userid');",
                                                       userTable,
                                                       usersTopic), Collections.emptyMap());
  ksqlEngine.buildMultipleQueries(String.format("CREATE STREAM %s (viewtime bigint, userid varchar, pageid varchar) " +
          "WITH (kafka_topic='%s', value_format='JSON');", pageViewStream, pageViewTopic), Collections.emptyMap());
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:30,代码来源:EndToEndIntegrationTest.java

示例15: testCreateTopic

import org.apache.kafka.clients.admin.AdminClient; //导入依赖的package包/类
@Test
public void testCreateTopic() {
  AdminClient adminClient = mock(AdminClient.class);
  expect(adminClient.describeCluster()).andReturn(getDescribeClusterResult());
  expect(adminClient.createTopics(anyObject())).andReturn(getCreateTopicsResult());
  expect(adminClient.listTopics()).andReturn(getListTopicsResult());
  expect(adminClient.describeConfigs(anyObject())).andReturn(getDescribeConfigsResult());

  replay(adminClient);
  KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
  kafkaTopicClient.createTopic("test", 1, (short)1);
  verify(adminClient);
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:14,代码来源:KafkaTopicClientTest.java


注:本文中的org.apache.kafka.clients.admin.AdminClient类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。