当前位置: 首页>>代码示例>>Java>>正文


Java AdminUtils.fetchTopicMetadataFromZk方法代码示例

本文整理汇总了Java中kafka.admin.AdminUtils.fetchTopicMetadataFromZk方法的典型用法代码示例。如果您正苦于以下问题:Java AdminUtils.fetchTopicMetadataFromZk方法的具体用法?Java AdminUtils.fetchTopicMetadataFromZk怎么用?Java AdminUtils.fetchTopicMetadataFromZk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.admin.AdminUtils的用法示例。


在下文中一共展示了AdminUtils.fetchTopicMetadataFromZk方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initKafka

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@BeforeClass
public static void initKafka() throws Exception {
  synchronized (TestKafkaSuit.class) {
    if (initCount.get() == 0) {
      ZookeeperTestUtil.setZookeeperSaslTestConfigProps();
      System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, ClassLoader.getSystemResource(LOGIN_CONF_RESOURCE_PATHNAME).getFile());
      embeddedKafkaCluster = new EmbeddedKafkaCluster();
      Properties topicProps = new Properties();
      zkClient = new ZkClient(embeddedKafkaCluster.getZkServer().getConnectionString(), SESSION_TIMEOUT, CONN_TIMEOUT, ZKStringSerializer$.MODULE$);
      ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
      AdminUtils.createTopic(zkUtils, QueryConstants.JSON_TOPIC, 1, 1, topicProps, RackAwareMode.Disabled$.MODULE$);

      org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk = AdminUtils
          .fetchTopicMetadataFromZk(QueryConstants.JSON_TOPIC, zkUtils);
      logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);

      KafkaMessageGenerator generator = new KafkaMessageGenerator(embeddedKafkaCluster.getKafkaBrokerList(),
          StringSerializer.class);
      generator.populateJsonMsgIntoKafka(QueryConstants.JSON_TOPIC, NUM_JSON_MSG);
    }
    initCount.incrementAndGet();
    runningSuite = true;
  }
  logger.info("Initialized Embedded Zookeeper and Kafka");
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:26,代码来源:TestKafkaSuit.java

示例2: initializeTopicsMaps

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
/**
 * Populates topicReplicationFactor map and topicMetadataMap.
 *
 * @param zookeeperPath
 *            zookeeper observer url
 * @param topics
 *            list of topics
 */
private void initializeTopicsMaps(String zookeeperPath, Set<String> topics) {
	String zkConnect = zookeeperPath;
	ZkUtils zkUtils = ZkUtils.apply(zkConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, false);
	scala.collection.Set<TopicMetadata> topicsMetaDataSet = null;
	scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentMap = null;
	try {
		topicsMetaDataSet = AdminUtils.fetchTopicMetadataFromZk(
				scala.collection.JavaConversions.asScalaSet(topics), zkUtils);
		partitionAssignmentMap = zkUtils.getPartitionAssignmentForTopics(javaCollectionToScalaSeq(topics));
	} finally {
		zkUtils.close();
	}
	Set<TopicMetadata> topicsMetaData = scala.collection.JavaConversions.asJavaSet(topicsMetaDataSet);
	for (TopicMetadata topicMetadata : topicsMetaData) {
		topicMetadataMap.put(topicMetadata.topic(), topicMetadata);
	}

	Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignment = scalaMapToJavaMap(partitionAssignmentMap);
	for (Entry<String, scala.collection.Map<Object, Seq<Object>>> entry : partitionAssignment.entrySet()) {
		String topicName = entry.getKey();
		Map<Object, Seq<Object>> partitions = scalaMapToJavaMap(entry.getValue());
		Seq<Object> replicas = partitions.get(FIRST_PARTITION);
		if (replicas == null || replicas.isEmpty()) {
			throw new IllegalArgumentException("Replicas cannot be found for " + topicName + "-" + FIRST_PARTITION);
		}
		topicReplicationFactorMap.put(topicName, replicas.size());
	}
}
 
开发者ID:flipkart-incubator,项目名称:kafka-balancer,代码行数:37,代码来源:BrokersTopicsCache.java

示例3: getTopic

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Override
public Topic getTopic(final String topicName) {
    if (AdminUtils.topicExists(zkUtils, topicName)) {
        final MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
        final Topic topic = new Topic();
        topic.setName(topicMetadata.topic());
        topic.setPartitions(topicMetadata.partitionMetadata().size());
        final int replicas = topicMetadata.partitionMetadata().stream().mapToInt(e -> e.replicas().size()).sum();
        topic.setReplications(replicas);
        topic.setProperties(getTopicProperties(topicName));
        return topic;
    }
    throw new UnknownTopicException(topicName);
}
 
开发者ID:craftsmenlabs,项目名称:kafka-admin-rest-api,代码行数:15,代码来源:TopicServiceImpl.java

示例4: fetchTopicMeta

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public MetadataResponse.TopicMetadata fetchTopicMeta(String topic) {
    ZkClient zkClient = new ZkClient(zkConnection);
    ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
    zkClient.setZkSerializer(new ZKStringSerializer());
    MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
    zkClient.close();
    return topicMetadata;
}
 
开发者ID:apache,项目名称:kylin,代码行数:9,代码来源:MockKafka.java

示例5: getTopicMetadata

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public TopicMetadata getTopicMetadata(String topic) {
    TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
    return topicMetadata;
}
 
开发者ID:warlock-china,项目名称:azeroth,代码行数:5,代码来源:ZkConsumerCommand.java

示例6: getTopicMetadata

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public TopicMetadata getTopicMetadata(String topic) {
	TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
	return topicMetadata;
}
 
开发者ID:vakinge,项目名称:jeesuite-libs,代码行数:5,代码来源:ZkConsumerCommand.java

示例7: getZKTopicMetadata

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
/**
 * Returns the ZK topic meta-data for the named topic
 * @param topicName The name of the topic
 * @return the topic meta-data
 */
public TopicMetadata getZKTopicMetadata(final String topicName) {
	if(!connected.get()) throw new IllegalStateException("The KafkaTestServer is not running");
	if(topicName==null || topicName.trim().isEmpty()) throw new IllegalArgumentException("The passed topic name was null or empty");
	return AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
}
 
开发者ID:nickman,项目名称:HeliosStreams,代码行数:11,代码来源:KafkaAdminClient.java

示例8: testTopicPartitionCreationCount

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Test
public void testTopicPartitionCreationCount()
    throws IOException, InterruptedException {
  String topic = "topicPartition4";
  int clusterCount = _kafkaTestHelper.getClusterCount();
  int partionCount = clusterCount/2;
  int zkPort = _kafkaTestHelper.getZookeeperPort();
  Properties props = new Properties();
  
  //	Setting Topic Properties
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
  props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, String.valueOf(clusterCount));
  props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT,  String.valueOf(partionCount));
  props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, "localhost:"+zkPort);
  System.out.println(_kafkaTestHelper.getBootServersList());
  
  // Setting Producer Properties
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", _kafkaTestHelper.getBootServersList());    
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
  
  Kafka09DataWriter<String> kafka09DataWriter = new Kafka09DataWriter<String>(props);
  String zookeeperConnect = "localhost:"+_kafkaTestHelper.getZookeeperPort();
  int sessionTimeoutMs = 10 * 1000;
  int connectionTimeoutMs = 8 * 1000;
  // Note: You must initialize the ZkClient with ZKStringSerializer.  If you don't, then
  // createTopic() will only seem to work (it will return without error).  The topic will exist in
  // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
  // topic.
  ZkClient zkClient = new ZkClient(
      zookeeperConnect,
      sessionTimeoutMs,
      connectionTimeoutMs,
      ZKStringSerializer$.MODULE$);
  boolean isSecureKafkaCluster = false;
  ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster);
  
  TopicMetadata metaData =
  		AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils);
  Assert.assertEquals(metaData.partitionsMetadata().size(), partionCount);

}
 
开发者ID:apache,项目名称:incubator-gobblin,代码行数:42,代码来源:Kafka09TopicProvisionTest.java

示例9: testLiveTopicPartitionCreationCount

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Test
 public void testLiveTopicPartitionCreationCount()
     throws IOException, InterruptedException {
String liveClusterCount = System.getProperty("live.cluster.count");
String liveZookeeper = System.getProperty("live.zookeeper");
String liveBroker = System.getProperty("live.broker");
String topic = System.getProperty("live.newtopic");
String topicReplicationCount = System.getProperty("live.newtopic.replicationCount");
String topicPartitionCount = System.getProperty("live.newtopic.partitionCount");
if(StringUtils.isEmpty(liveClusterCount)){
	Assert.assertTrue(true);
	return;
}
if(StringUtils.isEmpty(topicPartitionCount)){
	int clusterCount = Integer.parseInt(liveClusterCount);
	clusterCount--;
	int partionCount = clusterCount/2;
	topicReplicationCount = String.valueOf(clusterCount);
	topicPartitionCount = String.valueOf(partionCount);
}

   Properties props = new Properties();
   //	Setting Topic Properties
   props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
   props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, topicReplicationCount);
   props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, topicPartitionCount );
   props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, liveZookeeper);
   // Setting Producer Properties
   props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", liveBroker);    
   props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
   
   Kafka09DataWriter<String> kafka09DataWriter = new Kafka09DataWriter<String>(props);
   int sessionTimeoutMs = 10 * 1000;
   int connectionTimeoutMs = 8 * 1000;
   // Note: You must initialize the ZkClient with ZKStringSerializer.  If you don't, then
   // createTopic() will only seem to work (it will return without error).  The topic will exist in
   // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
   // topic.
   ZkClient zkClient = new ZkClient(
   	liveZookeeper,
       sessionTimeoutMs,
       connectionTimeoutMs,
       ZKStringSerializer$.MODULE$);
   boolean isSecureKafkaCluster = false;
   ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster);
   
   TopicMetadata metaData =
   		AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils);
   Assert.assertEquals(metaData.partitionsMetadata().size(), Integer.parseInt(topicPartitionCount));

 }
 
开发者ID:apache,项目名称:incubator-gobblin,代码行数:52,代码来源:Kafka09TopicProvisionTest.java

示例10: KafkaSplitSource

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
KafkaSplitSource(String connectorId, Table table,
        Iterable<Partition> hivePartitions,
        KafkaClientConfig kafkaConfig)
{
    this.connectorId = connectorId;
    this.fetchedIndex = 0;
    this.computedSplits = new ArrayList<Split>();
    String zookeeper = kafkaConfig.getZookeeper();
    int zkSessionTimeout = kafkaConfig.getZookeeperSessionTimeout();
    int zkConnectionTimeout = kafkaConfig.getZookeeperConnectTimeout();

    Map<String, String> tblProps = table.getParameters();
    String tableTopic = tblProps.get(KafkaTableProperties.kafkaTopicName);

    long splitRange = getDefault(tblProps, KafkaTableProperties.kafkaSplitRange, 60 * 60 * 1000);
    long scanRange = getDefault(tblProps, KafkaTableProperties.kafkaJobRange, 24 * 60 * 60 * 1000);
    int sampleRate = (int) getDefault(tblProps, KafkaTableProperties.kafkaTableSampleRate, 100);

    ZkClient zkclient = new ZkClient(zookeeper, zkSessionTimeout,
            zkConnectionTimeout, new ZKStringSerializer());

    TopicMetadata metadata = AdminUtils.fetchTopicMetadataFromZk(tableTopic, zkclient);
    List<PartitionMetadata> mds = scala.collection.JavaConversions.asJavaList(metadata.partitionsMetadata());

    List<long[]> offsetList = null;
    // if the table is partitioned, look at each partition and
    // determine the data to look at.
    List<FieldSchema> partCols = table.getPartitionKeys();
    if (partCols != null && partCols.size() > 0)
    {
        offsetList = generateTsOffsetsFromPartitions(hivePartitions, tblProps, splitRange, partCols);
    } else
    {
        // we will set the table property so that all the the queries hit here.
        offsetList = generateTsOffsetsNoPartitions(scanRange, mds.size());
    }

    for (PartitionMetadata md : mds)
    {
        Broker broker = md.leader().get();
        for (long[] offsets : offsetList)
        {
            long startTs = offsets[0];
            long endTs = offsets[1];
            KafkaSplit split = new KafkaSplit(connectorId,
                    tableTopic, md.partitionId(),
                    broker.host(), broker.port(),
                    sampleRate,
                    startTs, endTs, zookeeper,
                    zkSessionTimeout, zkConnectionTimeout);
            this.computedSplits.add(split);
        }
    }
}
 
开发者ID:dropbox,项目名称:presto-kafka-connector,代码行数:55,代码来源:KafkaSplitSourceProvider.java

示例11: getPartitions

import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public int getPartitions(String topicName) {
    MetadataResponse.TopicMetadata metaData = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
    return metaData.partitionMetadata().size();
}
 
开发者ID:Stratio,项目名称:bdt,代码行数:5,代码来源:KafkaUtils.java


注:本文中的kafka.admin.AdminUtils.fetchTopicMetadataFromZk方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。