当前位置: 首页>>代码示例>>Java>>正文


Java TopicPartition.partition方法代码示例

本文整理汇总了Java中org.apache.kafka.common.TopicPartition.partition方法的典型用法代码示例。如果您正苦于以下问题:Java TopicPartition.partition方法的具体用法?Java TopicPartition.partition怎么用?Java TopicPartition.partition使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.common.TopicPartition的用法示例。


在下文中一共展示了TopicPartition.partition方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: committedFileName

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
public static String committedFileName(String url, String topicsDir, String directory,
                                       TopicPartition topicPart, long startOffset, long endOffset,
                                       String extension, String zeroPadFormat) {
  String topic = topicPart.topic();
  int partition = topicPart.partition();
  StringBuilder sb = new StringBuilder();
  sb.append(topic);
  sb.append(HdfsSinkConnectorConstants.COMMMITTED_FILENAME_SEPARATOR);
  sb.append(partition);
  sb.append(HdfsSinkConnectorConstants.COMMMITTED_FILENAME_SEPARATOR);
  sb.append(String.format(zeroPadFormat, startOffset));
  sb.append(HdfsSinkConnectorConstants.COMMMITTED_FILENAME_SEPARATOR);
  sb.append(String.format(zeroPadFormat, endOffset));
  sb.append(extension);
  String name = sb.toString();
  return fileName(url, topicsDir, directory, name);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:18,代码来源:FileUtils.java

示例2: hasPartition

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
private boolean hasPartition(final TopicPartition topicPartition) {
    final List<PartitionInfo> partitions = partitionInfo.get(topicPartition.topic());

    if (partitions == null) {
        return false;
    }

    for (final PartitionInfo partition : partitions) {
        if (partition.partition() == topicPartition.partition()) {
            return true;
        }
    }

    return false;

}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:StoreChangelogReader.java

示例3: compare

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
@Override
public int compare(TopicPartition p1, TopicPartition p2) {
    int result = p1.topic().compareTo(p2.topic());

    if (result != 0) {
        return result;
    } else {
        return p1.partition() < p2.partition() ? UNKNOWN : (p1.partition() > p2.partition() ? 1 : 0);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:StreamPartitionAssignor.java

示例4: compare

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
@Override
public int compare(TopicPartition o1, TopicPartition o2) {
    int ret = map.get(o1).size() - map.get(o2).size();
    if (ret == 0) {
        ret = o1.topic().compareTo(o2.topic());
        if (ret == 0)
            ret = o1.partition() - o2.partition();
    }
    return ret;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:StickyAssignor.java

示例5: onConsume

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
    onConsumeCount++;
    if (throwExceptionOnConsume)
        throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume.");

    // filters out topic/partitions with partition == FILTER_PARTITION
    Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        if (tp.partition() != filterPartition)
            recordMap.put(tp, records.records(tp));
    }
    return new ConsumerRecords<K, V>(recordMap);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:15,代码来源:ConsumerInterceptorsTest.java

示例6: compare

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
@Override
public int compare(TopicPartition x, TopicPartition y) {
  int result = x.topic().compareTo(y.topic());
  if (result == 0) {
    result = x.partition() - y.partition();
  }
  return result;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:9,代码来源:KafkaUtils.java

示例7: partitionHandleDotInTopicName

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
private TopicPartition partitionHandleDotInTopicName(TopicPartition tp) {
  // In the reported metrics, the "." in the topic name will be replaced by "_".
  return !tp.topic().contains(".") ? tp :
    new TopicPartition(tp.topic().replace('.', '_'), tp.partition());
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:6,代码来源:CruiseControlMetricsProcessor.java

示例8: directoryName

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
public static String directoryName(String url, String topicsDir, TopicPartition topicPart) {
  String topic = topicPart.topic();
  int partition = topicPart.partition();
  return url + "/" + topicsDir + "/" + topic + "/" + partition;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:6,代码来源:FileUtils.java

示例9: fileName

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
public static String fileName(String url, String topicsDir, TopicPartition topicPart,
                              String name) {
  String topic = topicPart.topic();
  int partition = topicPart.partition();
  return url + "/" + topicsDir + "/" + topic + "/" + partition + "/" + name;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:7,代码来源:FileUtils.java

示例10: getOffsetQuarz

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
public static List<OffsetInfo> getOffsetQuarz() {

        Map<String, Map<String, List<OffsetInfo>>> groupTopicPartitionListMap = new ConcurrentHashMap<>();

        for (Map.Entry<GroupTopicPartition, OffsetAndMetadata> entry: kafkaConsumerOffsets.entrySet()) {
            GroupTopicPartition groupTopicPartition = entry.getKey();
            OffsetAndMetadata offsetAndMetadata = entry.getValue();
            String group = groupTopicPartition.group();
            TopicPartition topicPartition = groupTopicPartition.topicPartition();
            String topic = topicPartition.topic();
            int partition = topicPartition.partition();
            Long committedOffset = offsetAndMetadata.offset();

            if (!logEndOffsetMap.containsKey(topicPartition)) {
                logger.error("The logEndOffsetMap not contains " + topicPartition);
                return null;
            }
            long logSize = logEndOffsetMap.get(topicPartition);

            // May the refresh operation thread take some time to update
            logSize = logSize >= committedOffset ? logSize : committedOffset;
            long lag = committedOffset == -1 ? 0 : (logSize - committedOffset);

            OffsetInfo offsetInfo = new OffsetInfo();
            offsetInfo.setGroup(group);
            offsetInfo.setTopic(topic);
            offsetInfo.setCommittedOffset(committedOffset);
            offsetInfo.setLogSize(logSize);
            offsetInfo.setLag(lag);
            offsetInfo.setTimestamp(offsetAndMetadata.commitTimestamp());

            if (!groupTopicPartitionListMap.containsKey(group)) {
                Map<String, List<OffsetInfo>> topicPartitionMap = new ConcurrentHashMap<>();
                groupTopicPartitionListMap.put(group, topicPartitionMap);
            }
            if (!groupTopicPartitionListMap.get(group).containsKey(topic)) {
                List<OffsetInfo> offsetInfos = new ArrayList<>();
                groupTopicPartitionListMap.get(group).put(topic, offsetInfos);
            }
            groupTopicPartitionListMap.get(group).get(topic).add(offsetInfo);

        }
        return flattenNestedMap(groupTopicPartitionListMap);
    }
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:45,代码来源:KafkaOffsetGetter.java

示例11: getPartition

import org.apache.kafka.common.TopicPartition; //导入方法依赖的package包/类
private int getPartition(final String topic) {
    final TopicPartition partition = partitionForTopic.get(topic);
    return partition == null ? taskId.partition : partition.partition();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:5,代码来源:ProcessorStateManager.java


注:本文中的org.apache.kafka.common.TopicPartition.partition方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。