当前位置: 首页>>代码示例>>Java>>正文


Java ZkUtils.getLeaderForPartition方法代码示例

本文整理汇总了Java中kafka.utils.ZkUtils.getLeaderForPartition方法的典型用法代码示例。如果您正苦于以下问题:Java ZkUtils.getLeaderForPartition方法的具体用法?Java ZkUtils.getLeaderForPartition怎么用?Java ZkUtils.getLeaderForPartition使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.utils.ZkUtils的用法示例。


在下文中一共展示了ZkUtils.getLeaderForPartition方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPartitionInfo

import kafka.utils.ZkUtils; //导入方法依赖的package包/类
private static List<PartitionInfo> getPartitionInfo(ZkUtils zkUtils, String topic) {
  scala.collection.mutable.ArrayBuffer<String> topicList = new scala.collection.mutable.ArrayBuffer<>();
  topicList.$plus$eq(topic);
  scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
      zkUtils.getPartitionAssignmentForTopics(topicList).apply(topic);
  List<PartitionInfo> partitionInfoList = new ArrayList<>();
  scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
  while (it.hasNext()) {
    scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
    Integer partition = (Integer) scalaTuple._1();
    scala.Option<Object> leaderOption = zkUtils.getLeaderForPartition(topic, partition);
    Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
    Node[] replicas = new Node[scalaTuple._2().size()];
    for (int i = 0; i < replicas.length; i++) {
      Integer brokerId = (Integer) scalaTuple._2().apply(i);
      replicas[i] = new Node(brokerId, "", -1);
    }
    partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
  }

  return partitionInfoList;
}
 
开发者ID:linkedin,项目名称:kafka-monitor,代码行数:23,代码来源:MultiClusterTopicManagementService.java

示例2: getTopicLogSize

import kafka.utils.ZkUtils; //导入方法依赖的package包/类
private long getTopicLogSize(String topic, int pid) {
  Option<Object> o = ZkUtils.getLeaderForPartition(zkClient, topic, pid);
  if (o.isEmpty() || o.get() == null) {
    log.error("No broker for partition %s - %s", topic, pid);
    return 0;
  }
  Integer leaderId = Int.unbox(o.get());
  SimpleConsumer consumer = consumerMap.get(leaderId);
  if (consumer == null) {
    consumer = createSimpleConsumer(leaderId);
  }
  // createSimpleConsumer may fail.
  if (consumer == null) {
    return 0;
  }
  consumerMap.put(leaderId, consumer);
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid);
  PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
  OffsetRequest request = new OffsetRequest(
    new Map1<TopicAndPartition, PartitionOffsetRequestInfo>(topicAndPartition, requestInfo),
    0,
    Request.OrdinaryConsumerId()
  );
  OffsetResponse response = consumer.getOffsetsBefore(request);
  PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get();
  return scala.Long.unbox(offsetsResponse.offsets().head());
}
 
开发者ID:shunfei,项目名称:DCMonitor,代码行数:28,代码来源:KafkaInfos.java

示例3: leadBroker

import kafka.utils.ZkUtils; //导入方法依赖的package包/类
public Integer leadBroker(String topic, int partition) {
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zkClient, topic, partition);
    if (!leaderOpt.isDefined()) {
        logger.warn(String.format("Leader for topic %s partition %d does not exist", topic, partition));
    }
    return leaderOpt.isDefined() ? (Integer) leaderOpt.get() : null;
}
 
开发者ID:wired-mind,项目名称:usher,代码行数:8,代码来源:EmbeddedKafkaServer.java

示例4: testDefaultParameters

import kafka.utils.ZkUtils; //导入方法依赖的package包/类
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1\n" +
            "}";


    KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    while (msgIterator.hasNext()) {
        sink.writeTo(new StringMessage(msgIterator.next()));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
    assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
 
开发者ID:Netflix,项目名称:suro,代码行数:46,代码来源:TestKafkaSink.java

示例5: testDefaultParameters

import kafka.utils.ZkUtils; //导入方法依赖的package包/类
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    // create send test messages to Kafka
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
    while (msgIterator.hasNext()) {
        StringMessage next = new StringMessage(msgIterator.next());
        sink.writeTo(next); // send
        sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
    }
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    // get data back from Kafka
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    for( int i=0; i<messageSet.size(); i++ ){
        // ensure that received message was one that was sent
        String receivedPayload = new String(extractMessage(messageSet, i));
        System.out.println( "Got message: " + new String( receivedPayload ) );
        assert( sentPayloads.remove( receivedPayload ) );
    }
    assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
 
开发者ID:Netflix,项目名称:suro,代码行数:56,代码来源:TestKafkaSinkV2.java


注:本文中的kafka.utils.ZkUtils.getLeaderForPartition方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。