当前位置: 首页>>代码示例>>Java>>正文


Java TopicMetadata.errorCode方法代码示例

本文整理汇总了Java中kafka.javaapi.TopicMetadata.errorCode方法的典型用法代码示例。如果您正苦于以下问题:Java TopicMetadata.errorCode方法的具体用法?Java TopicMetadata.errorCode怎么用?Java TopicMetadata.errorCode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.TopicMetadata的用法示例。


在下文中一共展示了TopicMetadata.errorCode方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPartitionCount

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
@Override
public int getPartitionCount(
    String metadataBrokerList,
    String topic,
    Map<String, Object> kafkaClientConfigs,
    int messageSendMaxRetries,
    long retryBackoffMs
) throws StageException {
  List<HostAndPort> kafkaBrokers = getKafkaBrokers(metadataBrokerList);
  TopicMetadata topicMetadata;
  try {
    topicMetadata = KafkaValidationUtil08.getTopicMetadata(
        kafkaBrokers,
        topic,
        messageSendMaxRetries,
        retryBackoffMs
    );
    if (topicMetadata == null) {
      // Could not get topic metadata from any of the supplied brokers
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
    if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
      // Topic does not exist
      throw new StageException(KafkaErrors.KAFKA_04, topic);
    }
    if (topicMetadata.errorCode() != 0) {
      // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode()
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
  } catch (IOException e) {
    LOG.error(KafkaErrors.KAFKA_11.getMessage(), topic, kafkaBrokers, e.toString(), e);
    throw new StageException(KafkaErrors.KAFKA_11, topic, kafkaBrokers, e.toString());
  }
  return topicMetadata.partitionsMetadata().size();
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:36,代码来源:KafkaValidationUtil08.java

示例2: getAllTopicPartition

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
/***
 * Dedupe the partition metadata from all brokers
 *
 * @return Deduped topic metadata
 */
public List<kafka.javaapi.TopicMetadata> getAllTopicPartition()
{
    List<kafka.javaapi.TopicMetadata> topicMetadataList = getMetaDataFromAllBrokers();
    HashSet<TopicPartition> exploredTopicPartition = new HashSet<TopicPartition>();
    List<kafka.javaapi.TopicMetadata> ret = new ArrayList<TopicMetadata>();

    // Filter any white list topics
    HashSet<String> whiteListTopics = new HashSet<String>(m_mDProps.whiteListTopics);
    if (!whiteListTopics.isEmpty()) {
        topicMetadataList = filterWhitelistTopics(topicMetadataList, whiteListTopics);
    }

    // Filter all blacklist topics
    HashSet<String> blackListTopics = new HashSet<String>(m_mDProps.blackListTopics);
    String regex = "";
    if (!blackListTopics.isEmpty()) {
        regex = createTopicRegEx(blackListTopics);
    }

    for (TopicMetadata item : topicMetadataList)
    {
        if (Pattern.matches(regex, item.topic())) {
            m_logger.debug("Discarding topic (blacklisted): " + item.topic());
            continue;
        }
        List<kafka.api.PartitionMetadata> pml = new ArrayList<kafka.api.PartitionMetadata>();
        for (PartitionMetadata part : item.partitionsMetadata())
        {
            if (!exploredTopicPartition.contains(new TopicPartition(item.topic(), part.partitionId())))
            {
                kafka.api.PartitionMetadata pm =
                        new kafka.api.PartitionMetadata(
                                part.partitionId(),
                                        Option.apply(part.leader()),
                                        JavaConversions.asScalaBuffer(part.replicas()).toList(),
                                        JavaConversions.asScalaBuffer(part.isr()).toList(),
                                        part.errorCode());
                pml.add(pm);
                exploredTopicPartition.add(new TopicPartition(item.topic(), part.partitionId()));
            }
        }
        if (pml.size() > 0)
        {
            kafka.api.TopicMetadata tm =
                    new kafka.api.TopicMetadata(
                            item.topic(),
                            JavaConversions.asScalaBuffer(pml).toList(),
                            item.errorCode());
            ret.add(new kafka.javaapi.TopicMetadata(tm));
        }
    }
    Collections.sort(ret, new TopicMetadataComparator());
    return ret;
}
 
开发者ID:Microsoft,项目名称:Availability-Monitor-for-Kafka,代码行数:60,代码来源:MetaDataManager.java

示例3: getAllTopics

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
@Override
protected List<String> getAllTopics() {
	List<String> topics = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				topics.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					topics.add(item.topic());
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return topics;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:48,代码来源:Kafka08PartitionDiscoverer.java

示例4: getPartitionLeadersForTopics

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:64,代码来源:Kafka08PartitionDiscoverer.java

示例5: validateTopicExistence

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
@Override
public boolean validateTopicExistence(
  Stage.Context context,
  String groupName,
  String configName,
  List<HostAndPort> kafkaBrokers,
  String metadataBrokerList,
  String topic,
  Map<String, Object> kafkaClientConfigs,
  List<Stage.ConfigIssue> issues,
  boolean producer
) {
  boolean valid = true;
  if(topic == null || topic.isEmpty()) {
    issues.add(context.createConfigIssue(groupName, configName, KafkaErrors.KAFKA_05));
    valid = false;
  } else {
    TopicMetadata topicMetadata;
    try {
      topicMetadata = KafkaValidationUtil08.getTopicMetadata(kafkaBrokers, topic, 1, 0);
      if(topicMetadata == null) {
        //Could not get topic metadata from any of the supplied brokers
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_03,
                topic,
                metadataBrokerList
            )
        );
        valid = false;
      } else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
        //Topic does not exist
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_04,
                topic
            )
        );
        valid = false;
      } else if (topicMetadata.errorCode() != 0) {
        // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode()
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_03,
                topic,
                metadataBrokerList
            )
        );
        valid = false;
      }
    } catch (IOException e) {
      //Could not connect to kafka with the given metadata broker list
      issues.add(
          context.createConfigIssue(
              groupName,
              KAFKA_CONFIG_BEAN_PREFIX + "metadataBrokerList",
              KafkaErrors.KAFKA_67,
              metadataBrokerList
          )
      );
      valid = false;
    }
  }
  return valid;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:72,代码来源:KafkaValidationUtil08.java

示例6: getPartitionCount

import kafka.javaapi.TopicMetadata; //导入方法依赖的package包/类
public synchronized int getPartitionCount(String topic, long timeoutMillis) {
  int unknownTopicReplyCount = 0;
  final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
  int kafkaErrorCount = 0;
  final int MAX_KAFKA_ERROR_COUNT = 10;

  final long endTime = System.currentTimeMillis() + timeoutMillis;

  while(System.currentTimeMillis() < endTime) {
    // Try to get into a state where we're connected to Kafka
    while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) {
      _currentState.process();
    }

    if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) {
      throw new TimeoutException("Failed to get the partition count for topic " + topic + " within " + timeoutMillis
          + " ms");
    }

    // Send the metadata request to Kafka
    TopicMetadataResponse topicMetadataResponse = null;
    try {
      topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(topic)));
    } catch (Exception e) {
      _currentState.handleConsumerException(e);
      continue;
    }

    final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
    final short errorCode = topicMetadata.errorCode();

    if (errorCode == Errors.NONE.code()) {
      return topicMetadata.partitionsMetadata().size();
    } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
      // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
      throw new RuntimeException("Invalid topic name " + topic);
    } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
      if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
        throw new RuntimeException("Topic " + topic + " does not exist");
      } else {
        // Kafka topic creation can sometimes take some time, so we'll retry after a little bit
        unknownTopicReplyCount++;
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
      }
    } else {
      // Retry after a short delay
      kafkaErrorCount++;

      if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
        throw exceptionForKafkaErrorCode(errorCode);
      }

      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
  }

  throw new TimeoutException();
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:61,代码来源:SimpleConsumerWrapper.java


注:本文中的kafka.javaapi.TopicMetadata.errorCode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。