当前位置: 首页>>代码示例>>Java>>正文


Java TopicPartition类代码示例

本文整理汇总了Java中org.apache.kafka.common.TopicPartition的典型用法代码示例。如果您正苦于以下问题:Java TopicPartition类的具体用法?Java TopicPartition怎么用?Java TopicPartition使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TopicPartition类属于org.apache.kafka.common包,在下文中一共展示了TopicPartition类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assignPartition

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 * @return the consumer to which the given partition is assigned. The assignment should improve the overall balance
 * of the partition assignments to consumers.
 */
private String assignPartition(TopicPartition partition,
                               TreeSet<String> sortedCurrentSubscriptions,
                               Map<String, List<TopicPartition>> currentAssignment,
                               Map<String, List<TopicPartition>> consumer2AllPotentialPartitions,
                               Map<TopicPartition, String> currentPartitionConsumer) {
    for (String consumer: sortedCurrentSubscriptions) {
        if (consumer2AllPotentialPartitions.get(consumer).contains(partition)) {
            sortedCurrentSubscriptions.remove(consumer);
            currentAssignment.get(consumer).add(partition);
            currentPartitionConsumer.put(partition, consumer);
            sortedCurrentSubscriptions.add(consumer);
            return consumer;
        }
    }
    return null;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:StickyAssignor.java

示例2: createConsumer

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 * createConsumer - create a new consumer
 * @return
 * @throws Exception
 */
private Consumer<String, String> createConsumer() throws Exception {
    Properties props = ConfUtils.getProps(CONSUMER_PROPS);
    Consumer<String, String> consumer = new KafkaConsumer<>(props);
   
    // Seek to end automatically
    List<TopicPartition> pts = topics.stream().map(s -> new TopicPartition(s, 0)).collect(Collectors.toList());
    consumer.assign(pts);
    if(rollBack==0){
        consumer.seekToEnd(pts);  
    }else{
        for (TopicPartition topicPartition : pts) {
            consumer.seek(topicPartition, consumer.position(topicPartition)-rollBack);
            logger.info("Consumer seeked to -500000 :"+consumer.position(topicPartition));
        }    
    }  
    return consumer;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:23,代码来源:FullPullerPerfChecker.java

示例3: recordPartitionLag

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private void recordPartitionLag(TopicPartition tp, long lag) {
    this.recordsFetchLag.record(lag);

    String name = partitionLagMetricName(tp);
    Sensor recordsLag = this.metrics.getSensor(name);
    if (recordsLag == null) {
        recordsLag = this.metrics.sensor(name);
        recordsLag.add(this.metrics.metricName(name,
                metricsRegistry.partitionRecordsLag.group(),
                metricsRegistry.partitionRecordsLag.description()), new Value());
        recordsLag.add(this.metrics.metricName(name + "-max",
                metricsRegistry.partitionRecordsLagMax.group(),
                metricsRegistry.partitionRecordsLagMax.description()), new Max());
        recordsLag.add(this.metrics.metricName(name + "-avg",
                metricsRegistry.partitionRecordsLagAvg.group(),
                metricsRegistry.partitionRecordsLagAvg.description()), new Avg());
    }
    recordsLag.record(lag);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:Fetcher.java

示例4: assignNewPartitonsCreatesProcessors

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void assignNewPartitonsCreatesProcessors() {
    PartitionProcessorFactory ppf = processorFactory();
    AssignedPartitions ap = new AssignedPartitions(ppf);

    Collection<TopicPartition> newPartitions = new ArrayList<>();
    newPartitions.add(new TopicPartition(TOPIC, 3));
    newPartitions.add(new TopicPartition(TOPIC, 1));
    newPartitions.add(new TopicPartition(TOPIC, 99));

    ap.assignNewPartitions(newPartitions);

    assertFalse(ap.allPartitions().isEmpty());
    Set<TopicPartition> partitions = ap.allPartitions();
    assertEquals(3, partitions.size());
    assertTrue(partitions.containsAll(newPartitions));
}
 
开发者ID:Sixt,项目名称:ja-micro,代码行数:18,代码来源:AssignedPartitionsTest.java

示例5: prepareOffsetCommitResponse

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final Map<TopicPartition, Long> partitionOffsets) {
    final AtomicBoolean commitReceived = new AtomicBoolean(true);
    Map<TopicPartition, Errors> response = new HashMap<>();
    for (TopicPartition partition : partitionOffsets.keySet())
        response.put(partition, Errors.NONE);

    client.prepareResponseFrom(new MockClient.RequestMatcher() {
        @Override
        public boolean matches(AbstractRequest body) {
            OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
            for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
                OffsetCommitRequest.PartitionData partitionData = commitRequest.offsetData().get(partitionOffset.getKey());
                // verify that the expected offset has been committed
                if (partitionData.offset != partitionOffset.getValue()) {
                    commitReceived.set(false);
                    return false;
                }
            }
            return true;
        }
    }, offsetCommitResponse(response), coordinator);
    return commitReceived;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:KafkaConsumerTest.java

示例6: updateReplicaReassignmentTimestamp

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
public static void updateReplicaReassignmentTimestamp(String brokerZkUrl,
                                                       ReplicaStat replicaStat) {
  if (!replicaReassignmentTimestamps.containsKey(brokerZkUrl)) {
    replicaReassignmentTimestamps.put(brokerZkUrl, new ConcurrentHashMap<>());
  }
  ConcurrentHashMap<TopicPartition, Long> replicaTimestamps =
      replicaReassignmentTimestamps.get(brokerZkUrl);
  TopicPartition topicPartition = new TopicPartition(
      replicaStat.getTopic(), replicaStat.getPartition());

  if (!replicaTimestamps.containsKey(topicPartition) ||
      replicaTimestamps.get(topicPartition) < replicaStat.getTimestamp()) {
    replicaTimestamps.put(topicPartition, replicaStat.getTimestamp());
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:16,代码来源:ReplicaStatsManager.java

示例7: shouldDeleteCheckpointFileOnCreationIfEosEnabled

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void shouldDeleteCheckpointFileOnCreationIfEosEnabled() throws Exception {
    checkpoint.write(Collections.<TopicPartition, Long>emptyMap());
    assertTrue(checkpointFile.exists());

    ProcessorStateManager stateManager = null;
    try {
        stateManager = new ProcessorStateManager(
            taskId,
            noPartitions,
            false,
            stateDirectory,
            Collections.<String, String>emptyMap(),
            changelogReader,
            true);

        assertFalse(checkpointFile.exists());
    } finally {
        if (stateManager != null) {
            stateManager.close(null);
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:ProcessorStateManagerTest.java

示例8: toString

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 * Produces a string representation containing useful information about a StreamTask starting with the given indent.
 * This is useful in debugging scenarios.
 * @return A string representation of the StreamTask instance.
 */
public String toString(final String indent) {
    final StringBuilder sb = new StringBuilder();
    sb.append(indent);
    sb.append("StreamsTask taskId: ");
    sb.append(id);
    sb.append("\n");

    // print topology
    if (topology != null) {
        sb.append(indent).append(topology.toString(indent + "\t"));
    }

    // print assigned partitions
    if (partitions != null && !partitions.isEmpty()) {
        sb.append(indent).append("Partitions [");
        for (final TopicPartition topicPartition : partitions) {
            sb.append(topicPartition.toString()).append(", ");
        }
        sb.setLength(sb.length() - 2);
        sb.append("]\n");
    }
    return sb.toString();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:29,代码来源:AbstractTask.java

示例9: messagesRemaining

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private static long messagesRemaining(KafkaConsumer<String, String> consumer, TopicPartition partition) {
    long currentPosition = consumer.position(partition);
    Map<TopicPartition, Long> endOffsets = consumer.endOffsets(singleton(partition));
    if (endOffsets.containsKey(partition)) {
        return endOffsets.get(partition) - currentPosition;
    }
    return 0;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:9,代码来源:TransactionalMessageCopier.java

示例10: update

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 *  Record the stats, and update the topic partition list based on the stats
 *
 *  @param stats the broker stats
 */
public void update(BrokerStats stats) {
  if (stats == null
      || (latestStats != null && latestStats.getTimestamp() > stats.getTimestamp())
      || stats.getHasFailure()) {
    return;
  }

  brokerName = stats.getName();
  latestStats = stats;
  if (rackId == null) {
    rackId = stats.getRackId() != null ? stats.getRackId() : stats.getAvailabilityZone();
  }

  // TODO: handle null poniter expceiton properly
  leaderReplicas = stats.getLeaderReplicas().stream().map(tps ->
      new TopicPartition(tps.getTopic(), tps.getPartition())).collect(Collectors.toSet());

  followerReplicas = stats.getFollowerReplicas().stream().map(tps ->
      new TopicPartition(tps.getTopic(), tps.getPartition())).collect(Collectors.toSet());
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:26,代码来源:KafkaBroker.java

示例11: testFetchResponseV4

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void testFetchResponseV4() {
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> responseData = new LinkedHashMap<>();

    MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));

    List<FetchResponse.AbortedTransaction> abortedTransactions = asList(
            new FetchResponse.AbortedTransaction(10, 100),
            new FetchResponse.AbortedTransaction(15, 50)
    );
    responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData(Errors.NONE, 100000,
            FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, abortedTransactions, records));
    responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData(Errors.NONE, 900000,
            5, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
    responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(Errors.NONE, 70000,
            6, FetchResponse.INVALID_LOG_START_OFFSET, Collections.<FetchResponse.AbortedTransaction>emptyList(), records));

    FetchResponse response = new FetchResponse(responseData, 10);
    FetchResponse deserialized = FetchResponse.parse(toBuffer(response.toStruct((short) 4)), (short) 4);
    assertEquals(responseData, deserialized.responseData());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:RequestResponseTest.java

示例12: testOneConsumerMultipleTopics

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void testOneConsumerMultipleTopics() {
    String topic1 = "topic1";
    String topic2 = "topic2";
    String consumerId = "consumer";

    Map<String, Integer> partitionsPerTopic = new HashMap<>();
    partitionsPerTopic.put(topic1, 1);
    partitionsPerTopic.put(topic2, 2);

    Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic,
            Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2))));

    assertEquals(Collections.singleton(consumerId), assignment.keySet());
    assertAssignment(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:RangeAssignorTest.java

示例13: partitionGroups

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 * Generate tasks with the assigned topic partitions.
 *
 * @param topicGroups   group of topics that need to be joined together
 * @param metadata      metadata of the consuming cluster
 * @return The map from generated task ids to the assigned partitions
 */
public Map<TaskId, Set<TopicPartition>> partitionGroups(Map<Integer, Set<String>> topicGroups, Cluster metadata) {
    Map<TaskId, Set<TopicPartition>> groups = new HashMap<>();

    for (Map.Entry<Integer, Set<String>> entry : topicGroups.entrySet()) {
        Integer topicGroupId = entry.getKey();
        Set<String> topicGroup = entry.getValue();

        int maxNumPartitions = maxNumPartitions(metadata, topicGroup);

        for (int partitionId = 0; partitionId < maxNumPartitions; partitionId++) {
            Set<TopicPartition> group = new HashSet<>(topicGroup.size());

            for (String topic : topicGroup) {
                List<PartitionInfo> partitions = metadata.partitionsForTopic(topic);
                if (partitionId < partitions.size()) {
                    group.add(new TopicPartition(topic, partitionId));
                }
            }
            groups.put(new TaskId(topicGroupId, partitionId), Collections.unmodifiableSet(group));
        }
    }

    return Collections.unmodifiableMap(groups);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:32,代码来源:DefaultPartitionGrouper.java

示例14: next

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
 * Seek to the next 'page' of records.
 */
public void next() {
    // Get all available partitions
    final List<TopicPartition> topicPartitions = getAllPartitions();

    // Get head offsets for each partition
    final Map<TopicPartition, Long> tailOffsets = kafkaConsumer.endOffsets(topicPartitions);

    // Loop over each partition
    for (final TopicPartition topicPartition: topicPartitions) {
        // Calculate our previous offsets
        final long tailOffset = tailOffsets.get(topicPartition);
        final long currentOffset = kafkaConsumer.position(topicPartition);
        long newOffset = currentOffset + clientConfig.getMaxResultsPerPartition();

        if (newOffset < tailOffset) {
            newOffset = tailOffset;
        }
        logger.info("Partition: {} Previous Offset: {} New Offset: {}", topicPartition.partition(), currentOffset, newOffset);

        // Seek to earlier offset
        kafkaConsumer.seek(topicPartition, newOffset);
    }
    commit();
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:28,代码来源:WebKafkaConsumer.java

示例15: onMessageConsumed

import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private synchronized void onMessageConsumed(ConsumerRecord<String, String> record) {
    log.info(String.format("Consumed message: [%s]", record));

    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());

    if (!messagesByTopicPartition.containsKey(topicPartition)) {
        messagesByTopicPartition.put(topicPartition, new VersionedMessages(Lists.newLinkedList()));
    }

    VersionedMessages versionedMessages = messagesByTopicPartition.get(topicPartition);
    LinkedList<ConsumerRecord<String, String>> messages = versionedMessages.messages;
    messages.addFirst(record);

    if (messages.size() > maxTopicMessagesCount) {
        messages.removeLast();
    }

    versionedMessages.version.incrementAndGet();
}
 
开发者ID:enthusiast94,项目名称:kafka-visualizer,代码行数:20,代码来源:KafkaTopicsDataTracker.java


注:本文中的org.apache.kafka.common.TopicPartition类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。