本文整理汇总了Java中kafka.admin.AdminUtils.fetchTopicMetadataFromZk方法的典型用法代码示例。如果您正苦于以下问题:Java AdminUtils.fetchTopicMetadataFromZk方法的具体用法?Java AdminUtils.fetchTopicMetadataFromZk怎么用?Java AdminUtils.fetchTopicMetadataFromZk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.admin.AdminUtils
的用法示例。
在下文中一共展示了AdminUtils.fetchTopicMetadataFromZk方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initKafka
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@BeforeClass
public static void initKafka() throws Exception {
synchronized (TestKafkaSuit.class) {
if (initCount.get() == 0) {
ZookeeperTestUtil.setZookeeperSaslTestConfigProps();
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, ClassLoader.getSystemResource(LOGIN_CONF_RESOURCE_PATHNAME).getFile());
embeddedKafkaCluster = new EmbeddedKafkaCluster();
Properties topicProps = new Properties();
zkClient = new ZkClient(embeddedKafkaCluster.getZkServer().getConnectionString(), SESSION_TIMEOUT, CONN_TIMEOUT, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
AdminUtils.createTopic(zkUtils, QueryConstants.JSON_TOPIC, 1, 1, topicProps, RackAwareMode.Disabled$.MODULE$);
org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk = AdminUtils
.fetchTopicMetadataFromZk(QueryConstants.JSON_TOPIC, zkUtils);
logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);
KafkaMessageGenerator generator = new KafkaMessageGenerator(embeddedKafkaCluster.getKafkaBrokerList(),
StringSerializer.class);
generator.populateJsonMsgIntoKafka(QueryConstants.JSON_TOPIC, NUM_JSON_MSG);
}
initCount.incrementAndGet();
runningSuite = true;
}
logger.info("Initialized Embedded Zookeeper and Kafka");
}
示例2: initializeTopicsMaps
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
/**
* Populates topicReplicationFactor map and topicMetadataMap.
*
* @param zookeeperPath
* zookeeper observer url
* @param topics
* list of topics
*/
private void initializeTopicsMaps(String zookeeperPath, Set<String> topics) {
String zkConnect = zookeeperPath;
ZkUtils zkUtils = ZkUtils.apply(zkConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, false);
scala.collection.Set<TopicMetadata> topicsMetaDataSet = null;
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentMap = null;
try {
topicsMetaDataSet = AdminUtils.fetchTopicMetadataFromZk(
scala.collection.JavaConversions.asScalaSet(topics), zkUtils);
partitionAssignmentMap = zkUtils.getPartitionAssignmentForTopics(javaCollectionToScalaSeq(topics));
} finally {
zkUtils.close();
}
Set<TopicMetadata> topicsMetaData = scala.collection.JavaConversions.asJavaSet(topicsMetaDataSet);
for (TopicMetadata topicMetadata : topicsMetaData) {
topicMetadataMap.put(topicMetadata.topic(), topicMetadata);
}
Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignment = scalaMapToJavaMap(partitionAssignmentMap);
for (Entry<String, scala.collection.Map<Object, Seq<Object>>> entry : partitionAssignment.entrySet()) {
String topicName = entry.getKey();
Map<Object, Seq<Object>> partitions = scalaMapToJavaMap(entry.getValue());
Seq<Object> replicas = partitions.get(FIRST_PARTITION);
if (replicas == null || replicas.isEmpty()) {
throw new IllegalArgumentException("Replicas cannot be found for " + topicName + "-" + FIRST_PARTITION);
}
topicReplicationFactorMap.put(topicName, replicas.size());
}
}
示例3: getTopic
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Override
public Topic getTopic(final String topicName) {
if (AdminUtils.topicExists(zkUtils, topicName)) {
final MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
final Topic topic = new Topic();
topic.setName(topicMetadata.topic());
topic.setPartitions(topicMetadata.partitionMetadata().size());
final int replicas = topicMetadata.partitionMetadata().stream().mapToInt(e -> e.replicas().size()).sum();
topic.setReplications(replicas);
topic.setProperties(getTopicProperties(topicName));
return topic;
}
throw new UnknownTopicException(topicName);
}
示例4: fetchTopicMeta
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public MetadataResponse.TopicMetadata fetchTopicMeta(String topic) {
ZkClient zkClient = new ZkClient(zkConnection);
ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
zkClient.setZkSerializer(new ZKStringSerializer());
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
zkClient.close();
return topicMetadata;
}
示例5: getTopicMetadata
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public TopicMetadata getTopicMetadata(String topic) {
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
return topicMetadata;
}
示例6: getTopicMetadata
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public TopicMetadata getTopicMetadata(String topic) {
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
return topicMetadata;
}
示例7: getZKTopicMetadata
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
/**
* Returns the ZK topic meta-data for the named topic
* @param topicName The name of the topic
* @return the topic meta-data
*/
public TopicMetadata getZKTopicMetadata(final String topicName) {
if(!connected.get()) throw new IllegalStateException("The KafkaTestServer is not running");
if(topicName==null || topicName.trim().isEmpty()) throw new IllegalArgumentException("The passed topic name was null or empty");
return AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
}
示例8: testTopicPartitionCreationCount
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Test
public void testTopicPartitionCreationCount()
throws IOException, InterruptedException {
String topic = "topicPartition4";
int clusterCount = _kafkaTestHelper.getClusterCount();
int partionCount = clusterCount/2;
int zkPort = _kafkaTestHelper.getZookeeperPort();
Properties props = new Properties();
// Setting Topic Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, String.valueOf(clusterCount));
props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, String.valueOf(partionCount));
props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, "localhost:"+zkPort);
System.out.println(_kafkaTestHelper.getBootServersList());
// Setting Producer Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", _kafkaTestHelper.getBootServersList());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka09DataWriter<String> kafka09DataWriter = new Kafka09DataWriter<String>(props);
String zookeeperConnect = "localhost:"+_kafkaTestHelper.getZookeeperPort();
int sessionTimeoutMs = 10 * 1000;
int connectionTimeoutMs = 8 * 1000;
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient = new ZkClient(
zookeeperConnect,
sessionTimeoutMs,
connectionTimeoutMs,
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster);
TopicMetadata metaData =
AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils);
Assert.assertEquals(metaData.partitionsMetadata().size(), partionCount);
}
示例9: testLiveTopicPartitionCreationCount
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
@Test
public void testLiveTopicPartitionCreationCount()
throws IOException, InterruptedException {
String liveClusterCount = System.getProperty("live.cluster.count");
String liveZookeeper = System.getProperty("live.zookeeper");
String liveBroker = System.getProperty("live.broker");
String topic = System.getProperty("live.newtopic");
String topicReplicationCount = System.getProperty("live.newtopic.replicationCount");
String topicPartitionCount = System.getProperty("live.newtopic.partitionCount");
if(StringUtils.isEmpty(liveClusterCount)){
Assert.assertTrue(true);
return;
}
if(StringUtils.isEmpty(topicPartitionCount)){
int clusterCount = Integer.parseInt(liveClusterCount);
clusterCount--;
int partionCount = clusterCount/2;
topicReplicationCount = String.valueOf(clusterCount);
topicPartitionCount = String.valueOf(partionCount);
}
Properties props = new Properties();
// Setting Topic Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, topicReplicationCount);
props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, topicPartitionCount );
props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, liveZookeeper);
// Setting Producer Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", liveBroker);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka09DataWriter<String> kafka09DataWriter = new Kafka09DataWriter<String>(props);
int sessionTimeoutMs = 10 * 1000;
int connectionTimeoutMs = 8 * 1000;
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient = new ZkClient(
liveZookeeper,
sessionTimeoutMs,
connectionTimeoutMs,
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster);
TopicMetadata metaData =
AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils);
Assert.assertEquals(metaData.partitionsMetadata().size(), Integer.parseInt(topicPartitionCount));
}
示例10: KafkaSplitSource
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
KafkaSplitSource(String connectorId, Table table,
Iterable<Partition> hivePartitions,
KafkaClientConfig kafkaConfig)
{
this.connectorId = connectorId;
this.fetchedIndex = 0;
this.computedSplits = new ArrayList<Split>();
String zookeeper = kafkaConfig.getZookeeper();
int zkSessionTimeout = kafkaConfig.getZookeeperSessionTimeout();
int zkConnectionTimeout = kafkaConfig.getZookeeperConnectTimeout();
Map<String, String> tblProps = table.getParameters();
String tableTopic = tblProps.get(KafkaTableProperties.kafkaTopicName);
long splitRange = getDefault(tblProps, KafkaTableProperties.kafkaSplitRange, 60 * 60 * 1000);
long scanRange = getDefault(tblProps, KafkaTableProperties.kafkaJobRange, 24 * 60 * 60 * 1000);
int sampleRate = (int) getDefault(tblProps, KafkaTableProperties.kafkaTableSampleRate, 100);
ZkClient zkclient = new ZkClient(zookeeper, zkSessionTimeout,
zkConnectionTimeout, new ZKStringSerializer());
TopicMetadata metadata = AdminUtils.fetchTopicMetadataFromZk(tableTopic, zkclient);
List<PartitionMetadata> mds = scala.collection.JavaConversions.asJavaList(metadata.partitionsMetadata());
List<long[]> offsetList = null;
// if the table is partitioned, look at each partition and
// determine the data to look at.
List<FieldSchema> partCols = table.getPartitionKeys();
if (partCols != null && partCols.size() > 0)
{
offsetList = generateTsOffsetsFromPartitions(hivePartitions, tblProps, splitRange, partCols);
} else
{
// we will set the table property so that all the the queries hit here.
offsetList = generateTsOffsetsNoPartitions(scanRange, mds.size());
}
for (PartitionMetadata md : mds)
{
Broker broker = md.leader().get();
for (long[] offsets : offsetList)
{
long startTs = offsets[0];
long endTs = offsets[1];
KafkaSplit split = new KafkaSplit(connectorId,
tableTopic, md.partitionId(),
broker.host(), broker.port(),
sampleRate,
startTs, endTs, zookeeper,
zkSessionTimeout, zkConnectionTimeout);
this.computedSplits.add(split);
}
}
}
示例11: getPartitions
import kafka.admin.AdminUtils; //导入方法依赖的package包/类
public int getPartitions(String topicName) {
MetadataResponse.TopicMetadata metaData = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
return metaData.partitionMetadata().size();
}