当前位置: 首页>>代码示例>>Java>>正文


Java AdminClient.create方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.admin.AdminClient.create方法的典型用法代码示例。如果您正苦于以下问题:Java AdminClient.create方法的具体用法?Java AdminClient.create怎么用?Java AdminClient.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.admin.AdminClient的用法示例。


在下文中一共展示了AdminClient.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public static void main(String[] args) throws ExecutionException, InterruptedException {
        Map props = new HashMap<>();
        // list of host:port pairs used for establishing the initial connections
        // to the Kakfa cluster
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
                "kafka-local:9092");
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
//                JsonSerializer.class);
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
//                JsonSerializer.class);
        // value to block, after which it will throw a TimeoutException
        props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
        AdminClient adminClient = AdminClient.create(props);
        adminClient.describeCluster();
        Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
        System.out.println(topicListings);
    }
 
开发者ID:kloiasoft,项目名称:eventapis,代码行数:18,代码来源:Eventapis.java

示例2: DefaultConsumer

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public DefaultConsumer(String configPath, List<TopicPartition> topicPartitions, List<TopicFiber> topicFibers) throws ConfigFileNotFoundException
{
    ConsumerConfig config = ConsumerConfig.INSTANCE();
    config.init(configPath);
    config.validate();
    this.topicPartitions = topicPartitions;
    this.topicFibers = topicFibers;
    Properties props = new Properties();
    props.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
    props.setProperty("client.id", "consumerAdmin");
    props.setProperty("metadata.max.age.ms", "3000");
    props.setProperty("group.id", config.getGroupId());
    props.setProperty("enable.auto.commit", "true");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty("session.timeout.ms", "30000");
    props.setProperty("key.deserializer", config.getKafkaKeyDeserializerClass());
    props.setProperty("value.deserializer", config.getKafkaValueDeserializerClass());
    kafkaAdminClient = AdminClient.create(props);
    this.dataThreadManager = DataThreadManager.INSTANCE();
    init();
}
 
开发者ID:dbiir,项目名称:paraflow,代码行数:22,代码来源:DefaultConsumer.java

示例3: DefaultProducer

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public DefaultProducer(String configPath) throws ConfigFileNotFoundException
{
    ProducerConfig config = ProducerConfig.INSTANCE();
        config.init(configPath);
        config.validate();
    this.offerTimeout = config.getBufferOfferTimeout();
    // init meta client
    metaClient = new MetaClient(config.getMetaServerHost(),
            config.getMetaServerPort());
    // init kafka admin client
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", config.getKafkaBootstrapServers());
    properties.setProperty("client.id", "producerAdmin");
    properties.setProperty("metadata.max.age.ms", "3000");
    kafkaAdminClient = AdminClient.create(properties);
    init();
}
 
开发者ID:dbiir,项目名称:paraflow,代码行数:18,代码来源:DefaultProducer.java

示例4: init

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Before
public void init() {
	String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
	if (multiplier != null) {
		timeoutMultiplier = Double.parseDouble(multiplier);
	}

	BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
	List<String> bAddresses = new ArrayList<>();
	for (BrokerAddress bAddress : brokerAddresses) {
		bAddresses.add(bAddress.toString());
	}
	String[] foo = new String[bAddresses.size()];

	Map<String, Object> adminConfigs = new HashMap<>();
	adminConfigs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bAddresses.toArray(foo)[0]);
	adminClient = AdminClient.create(adminConfigs);
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:19,代码来源:KafkaBinderTests.java

示例5: create

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public static KsqlContext create(KsqlConfig ksqlConfig, SchemaRegistryClient schemaRegistryClient) {
  if (ksqlConfig == null) {
    ksqlConfig = new KsqlConfig(Collections.emptyMap());
  }
  Map<String, Object> streamsProperties = ksqlConfig.getKsqlStreamConfigProps();
  if (!streamsProperties.containsKey(StreamsConfig.APPLICATION_ID_CONFIG)) {
    streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_OPTION_DEFAULT);
  }
  if (!streamsProperties.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
    streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVER_OPTION_DEFAULT);
  }
  AdminClient adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
  if (schemaRegistryClient == null) {
    return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient));
  } else {
    return new KsqlContext(adminClient, topicClient, new KsqlEngine(ksqlConfig, topicClient, schemaRegistryClient));
  }

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:21,代码来源:KsqlContext.java

示例6: before

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Before
public void before() throws Exception {

  Map<String, Object> configMap = new HashMap<>();
  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");

  KsqlConfig ksqlConfig = new KsqlConfig(configMap);
  adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(adminClient));
  metaStore = ksqlEngine.getMetaStore();
  topicProducer = new TopicProducer(CLUSTER);
  topicConsumer = new TopicConsumer(CLUSTER);

  createInitTopics();
  produceInitData();
  execInitCreateStreamQueries();

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:23,代码来源:JsonFormatTest.java

示例7: start

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public void start() throws Exception {
  embeddedKafkaCluster = new EmbeddedSingleNodeKafkaCluster();
  embeddedKafkaCluster.start();
  Map<String, Object> configMap = new HashMap<>();

  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers());
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");
  configMap.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());

  this.ksqlConfig = new KsqlConfig(configMap);
  this.adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
  this.topicClient = new KafkaTopicClientImpl(adminClient);

}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:18,代码来源:IntegrationTestHarness.java

示例8: KafkaTopicProvisioner

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
							KafkaProperties kafkaProperties) {
	Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
	Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
	String kafkaConnectionString = kafkaBinderConfigurationProperties.getKafkaConnectionString();

	if (ObjectUtils.isEmpty(adminClientProperties.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
			|| !kafkaConnectionString.equals(kafkaBinderConfigurationProperties.getDefaultKafkaConnectionString())) {
		adminClientProperties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
	}
	this.configurationProperties = kafkaBinderConfigurationProperties;
	this.adminClient = AdminClient.create(adminClientProperties);
}
 
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:14,代码来源:KafkaTopicProvisioner.java

示例9: before

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Before
public void before() throws Exception {
  testHarness = new IntegrationTestHarness(DataSource.DataSourceSerDe.JSON.name());
  testHarness.start();
  Map<String, Object> streamsConfig = testHarness.ksqlConfig.getKsqlStreamConfigProps();
  streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

  KsqlConfig ksqlconfig = new KsqlConfig(streamsConfig);
  AdminClient adminClient = AdminClient.create(ksqlconfig.getKsqlAdminClientConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  ksqlEngine = new KsqlEngine(ksqlconfig, topicClient);

  testHarness.createTopic(pageViewTopic);
  testHarness.createTopic(usersTopic);

  pageViewDataProvider = new PageViewDataProvider();
  userDataProvider = new UserDataProvider();

  testHarness.publishTestData(usersTopic, userDataProvider, System.currentTimeMillis() - 10000);
  testHarness.publishTestData(pageViewTopic, pageViewDataProvider, System.currentTimeMillis());

  ksqlEngine.buildMultipleQueries(String.format("CREATE TABLE %s (registertime bigint, gender varchar, regionid varchar, " +
          "userid varchar) WITH (kafka_topic='%s', value_format='JSON', key = 'userid');",
                                                       userTable,
                                                       usersTopic), Collections.emptyMap());
  ksqlEngine.buildMultipleQueries(String.format("CREATE STREAM %s (viewtime bigint, userid varchar, pageid varchar) " +
          "WITH (kafka_topic='%s', value_format='JSON');", pageViewStream, pageViewTopic), Collections.emptyMap());
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:30,代码来源:EndToEndIntegrationTest.java

示例10: admin

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public AdminClient admin() {
    if (uri == null) throw new Error("uri must not be null");

    Map<String, Object> config = Maps.newHashMap();
    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, uri);
    return AdminClient.create(config);
}
 
开发者ID:neowu,项目名称:core-ng-project,代码行数:8,代码来源:Kafka.java

示例11: testListTopics

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Test(enabled = false)
public void testListTopics() throws Exception {
    Map<String, Object> adminClientConfig = new HashMap<>();
    adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "api.local.getbux.com:32400,api.local.getbux.com:32401,api.local.getbux.com:32402");
    adminClientConfig.put(AdminClientConfig.CLIENT_ID_CONFIG, "test-adminClient");
    AdminClient adminClient = AdminClient.create(adminClientConfig);

    // ensure all the topics are created
    ListTopicsResult listTopicsResult = adminClient.listTopics();
    Map<String, TopicListing> availableTopics = listTopicsResult.namesToListings().get();

    assertNotNull(availableTopics);
}
 
开发者ID:elasticsoftwarefoundation,项目名称:elasticactors,代码行数:14,代码来源:KafkaAdminClientTest.java

示例12: shouldAggregateWithNoWindow

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Test
public void shouldAggregateWithNoWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "NOWINDOW_AGGTEST";

  final String queryString = String.format(
      "CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
      streamName,
      "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();

  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName,
                                                                         resultSchema, 1, new
                                                                                 StringDeserializer(), MAX_POLL_PER_ITERATION);
    final GenericRow actual = aggregateResults.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:45,代码来源:WindowingIntTest.java

示例13: shouldAggregateTumblingWindow

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Test
public void shouldAggregateTumblingWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "TUMBLING_AGGTEST";

  final String queryString = String.format(
          "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
          streamName,
          "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)",
          "TUMBLING ( SIZE 10 SECONDS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();

  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new WindowedDeserializer<>(new StringDeserializer()), MAX_POLL_PER_ITERATION);
    updateResults(results, windowedResults);
    final GenericRow actual = results.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:45,代码来源:WindowingIntTest.java

示例14: shouldAggregateHoppingWindow

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
@Test
public void shouldAggregateHoppingWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "HOPPING_AGGTEST";

  final String queryString = String.format(
          "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
          streamName,
          "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)",
          "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();


  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new WindowedDeserializer<>(new StringDeserializer()), 1000);
    updateResults(results, windowedResults);
    final GenericRow actual = results.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:46,代码来源:WindowingIntTest.java

示例15: StandaloneExecutor

import org.apache.kafka.clients.admin.AdminClient; //导入方法依赖的package包/类
public StandaloneExecutor(Map streamProperties) throws ExecutionException, InterruptedException {
  KsqlConfig ksqlConfig = new KsqlConfig(streamProperties);
  ksqlEngine = new KsqlEngine(ksqlConfig, new KafkaTopicClientImpl(AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps())));
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:5,代码来源:StandaloneExecutor.java


注:本文中的org.apache.kafka.clients.admin.AdminClient.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。