當前位置: 首頁>>代碼示例>>Java>>正文


Java ZkUtils.getLeaderForPartition方法代碼示例

本文整理匯總了Java中kafka.utils.ZkUtils.getLeaderForPartition方法的典型用法代碼示例。如果您正苦於以下問題:Java ZkUtils.getLeaderForPartition方法的具體用法?Java ZkUtils.getLeaderForPartition怎麽用?Java ZkUtils.getLeaderForPartition使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在kafka.utils.ZkUtils的用法示例。


在下文中一共展示了ZkUtils.getLeaderForPartition方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getPartitionInfo

import kafka.utils.ZkUtils; //導入方法依賴的package包/類
private static List<PartitionInfo> getPartitionInfo(ZkUtils zkUtils, String topic) {
  scala.collection.mutable.ArrayBuffer<String> topicList = new scala.collection.mutable.ArrayBuffer<>();
  topicList.$plus$eq(topic);
  scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
      zkUtils.getPartitionAssignmentForTopics(topicList).apply(topic);
  List<PartitionInfo> partitionInfoList = new ArrayList<>();
  scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
  while (it.hasNext()) {
    scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
    Integer partition = (Integer) scalaTuple._1();
    scala.Option<Object> leaderOption = zkUtils.getLeaderForPartition(topic, partition);
    Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
    Node[] replicas = new Node[scalaTuple._2().size()];
    for (int i = 0; i < replicas.length; i++) {
      Integer brokerId = (Integer) scalaTuple._2().apply(i);
      replicas[i] = new Node(brokerId, "", -1);
    }
    partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
  }

  return partitionInfoList;
}
 
開發者ID:linkedin,項目名稱:kafka-monitor,代碼行數:23,代碼來源:MultiClusterTopicManagementService.java

示例2: getTopicLogSize

import kafka.utils.ZkUtils; //導入方法依賴的package包/類
private long getTopicLogSize(String topic, int pid) {
  Option<Object> o = ZkUtils.getLeaderForPartition(zkClient, topic, pid);
  if (o.isEmpty() || o.get() == null) {
    log.error("No broker for partition %s - %s", topic, pid);
    return 0;
  }
  Integer leaderId = Int.unbox(o.get());
  SimpleConsumer consumer = consumerMap.get(leaderId);
  if (consumer == null) {
    consumer = createSimpleConsumer(leaderId);
  }
  // createSimpleConsumer may fail.
  if (consumer == null) {
    return 0;
  }
  consumerMap.put(leaderId, consumer);
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid);
  PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
  OffsetRequest request = new OffsetRequest(
    new Map1<TopicAndPartition, PartitionOffsetRequestInfo>(topicAndPartition, requestInfo),
    0,
    Request.OrdinaryConsumerId()
  );
  OffsetResponse response = consumer.getOffsetsBefore(request);
  PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get();
  return scala.Long.unbox(offsetsResponse.offsets().head());
}
 
開發者ID:shunfei,項目名稱:DCMonitor,代碼行數:28,代碼來源:KafkaInfos.java

示例3: leadBroker

import kafka.utils.ZkUtils; //導入方法依賴的package包/類
public Integer leadBroker(String topic, int partition) {
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zkClient, topic, partition);
    if (!leaderOpt.isDefined()) {
        logger.warn(String.format("Leader for topic %s partition %d does not exist", topic, partition));
    }
    return leaderOpt.isDefined() ? (Integer) leaderOpt.get() : null;
}
 
開發者ID:wired-mind,項目名稱:usher,代碼行數:8,代碼來源:EmbeddedKafkaServer.java

示例4: testDefaultParameters

import kafka.utils.ZkUtils; //導入方法依賴的package包/類
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1\n" +
            "}";


    KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    while (msgIterator.hasNext()) {
        sink.writeTo(new StringMessage(msgIterator.next()));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
    assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
 
開發者ID:Netflix,項目名稱:suro,代碼行數:46,代碼來源:TestKafkaSink.java

示例5: testDefaultParameters

import kafka.utils.ZkUtils; //導入方法依賴的package包/類
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    // create send test messages to Kafka
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
    while (msgIterator.hasNext()) {
        StringMessage next = new StringMessage(msgIterator.next());
        sink.writeTo(next); // send
        sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
    }
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    // get data back from Kafka
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    for( int i=0; i<messageSet.size(); i++ ){
        // ensure that received message was one that was sent
        String receivedPayload = new String(extractMessage(messageSet, i));
        System.out.println( "Got message: " + new String( receivedPayload ) );
        assert( sentPayloads.remove( receivedPayload ) );
    }
    assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
 
開發者ID:Netflix,項目名稱:suro,代碼行數:56,代碼來源:TestKafkaSinkV2.java


注:本文中的kafka.utils.ZkUtils.getLeaderForPartition方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。