当前位置: 首页>>代码示例>>Java>>正文


Java TopicMetadata.topic方法代码示例

本文整理汇总了Java中kafka.javaapi.TopicMetadata.topic方法的典型用法代码示例。如果您正苦于以下问题:Java TopicMetadata.topic方法的具体用法?Java TopicMetadata.topic怎么用?Java TopicMetadata.topic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.TopicMetadata的用法示例。


在下文中一共展示了TopicMetadata.topic方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: updateLeaderMap

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
private void updateLeaderMap() {
  for (String broker : brokerList) {
    try {
      SimpleConsumer consumer = getSimpleConsumer(broker);
      TopicMetadataRequest req = new TopicMetadataRequest(auditTopics);
      kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
      List<TopicMetadata> metaData = resp.topicsMetadata();

      for (TopicMetadata tmd : metaData) {
        for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
          TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId());
          partitionLeader.put(topicAndPartition, getHostPort(pmd.leader()));
        }
      }
    } catch (Exception e) {
      logger.warn("Got exception to get metadata from broker=" + broker, e);
    }
  }
}
 
开发者ID:uber,项目名称:chaperone,代码行数:20,代码来源:KafkaMonitor.java

示例2: initializeLastProcessingOffset

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
private void initializeLastProcessingOffset()
{
  // read last received kafka message
  TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());

  if (tm == null) {
    throw new RuntimeException("Failed to retrieve topic metadata");
  }

  partitionNum = tm.partitionsMetadata().size();

  lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);

  for (PartitionMetadata pm : tm.partitionsMetadata()) {

    String leadBroker = pm.leader().host();
    int port = pm.leader().port();
    String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
    SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);

    long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);

    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();

    FetchResponse fetchResponse = consumer.fetch(req);
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {

      Message m = messageAndOffset.message();

      ByteBuffer payload = m.payload();
      ByteBuffer key = m.key();
      byte[] valueBytes = new byte[payload.limit()];
      byte[] keyBytes = new byte[key.limit()];
      payload.get(valueBytes);
      key.get(keyBytes);
      lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
    }
  }
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:40,代码来源:AbstractExactlyOnceKafkaOutputOperator.java

示例3: getAllTopicPartition

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
/***
 * Dedupe the partition metadata from all brokers
 *
 * @return Deduped topic metadata
 */
public List<kafka.javaapi.TopicMetadata> getAllTopicPartition()
{
    List<kafka.javaapi.TopicMetadata> topicMetadataList = getMetaDataFromAllBrokers();
    HashSet<TopicPartition> exploredTopicPartition = new HashSet<TopicPartition>();
    List<kafka.javaapi.TopicMetadata> ret = new ArrayList<TopicMetadata>();

    // Filter any white list topics
    HashSet<String> whiteListTopics = new HashSet<String>(m_mDProps.whiteListTopics);
    if (!whiteListTopics.isEmpty()) {
        topicMetadataList = filterWhitelistTopics(topicMetadataList, whiteListTopics);
    }

    // Filter all blacklist topics
    HashSet<String> blackListTopics = new HashSet<String>(m_mDProps.blackListTopics);
    String regex = "";
    if (!blackListTopics.isEmpty()) {
        regex = createTopicRegEx(blackListTopics);
    }

    for (TopicMetadata item : topicMetadataList)
    {
        if (Pattern.matches(regex, item.topic())) {
            m_logger.debug("Discarding topic (blacklisted): " + item.topic());
            continue;
        }
        List<kafka.api.PartitionMetadata> pml = new ArrayList<kafka.api.PartitionMetadata>();
        for (PartitionMetadata part : item.partitionsMetadata())
        {
            if (!exploredTopicPartition.contains(new TopicPartition(item.topic(), part.partitionId())))
            {
                kafka.api.PartitionMetadata pm =
                        new kafka.api.PartitionMetadata(
                                part.partitionId(),
                                        Option.apply(part.leader()),
                                        JavaConversions.asScalaBuffer(part.replicas()).toList(),
                                        JavaConversions.asScalaBuffer(part.isr()).toList(),
                                        part.errorCode());
                pml.add(pm);
                exploredTopicPartition.add(new TopicPartition(item.topic(), part.partitionId()));
            }
        }
        if (pml.size() > 0)
        {
            kafka.api.TopicMetadata tm =
                    new kafka.api.TopicMetadata(
                            item.topic(),
                            JavaConversions.asScalaBuffer(pml).toList(),
                            item.errorCode());
            ret.add(new kafka.javaapi.TopicMetadata(tm));
        }
    }
    Collections.sort(ret, new TopicMetadataComparator());
    return ret;
}
 
开发者ID:Microsoft,项目名称:Availability-Monitor-for-Kafka,代码行数:60,代码来源:MetaDataManager.java

示例4: getPartitionLeadersForTopics

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:64,代码来源:Kafka08PartitionDiscoverer.java

示例5: getSplits

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout)
{
    KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();

    SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));

    TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
    TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);

    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();

    for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
        for (PartitionMetadata part : metadata.partitionsMetadata()) {
            log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());

            Broker leader = part.leader();
            if (leader == null) { // Leader election going on...
                log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
                continue;
            }

            HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());

            SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
            // Kafka contains a reverse list of "end - start" pairs for the splits

            List<HostAddress> partitionNodes = ImmutableList.copyOf(Lists.transform(part.isr(), KafkaSplitManager::brokerToHostAddress));

            long[] offsets = findAllOffsets(leaderConsumer,  metadata.topic(), part.partitionId());

            for (int i = offsets.length - 1; i > 0; i--) {
                KafkaSplit split = new KafkaSplit(
                        connectorId,
                        metadata.topic(),
                        kafkaTableHandle.getKeyDataFormat(),
                        kafkaTableHandle.getMessageDataFormat(),
                        part.partitionId(),
                        offsets[i],
                        offsets[i - 1],
                        partitionNodes);
                splits.add(split);
            }
        }
    }

    return new FixedSplitSource(connectorId, splits.build());
}
 
开发者ID:y-lan,项目名称:presto,代码行数:49,代码来源:KafkaSplitManager.java


注:本文中的kafka.javaapi.TopicMetadata.topic方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。