当前位置: 首页>>代码示例>>Java>>正文


Java Node.id方法代码示例

本文整理汇总了Java中org.apache.kafka.common.Node.id方法的典型用法代码示例。如果您正苦于以下问题:Java Node.id方法的具体用法?Java Node.id怎么用?Java Node.id使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.common.Node的用法示例。


在下文中一共展示了Node.id方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: populateSnapshots

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private void populateSnapshots(Cluster kafkaCluster,
                               ClusterModel clusterModel,
                               TopicPartition tp,
                               Snapshot[] leaderLoadSnapshots) throws ModelInputException {
  PartitionInfo partitionInfo = kafkaCluster.partition(tp);
  // If partition info does not exist, the topic may have been deleted.
  if (partitionInfo != null) {
    for (Node replica : partitionInfo.replicas()) {
      boolean isLeader = partitionInfo.leader() != null && replica.id() == partitionInfo.leader().id();
      String rack = getRackHandleNull(replica);
      // Note that we assume the capacity resolver can still return the broker capacity even if the broker
      // is dead. We need this to get the host resource capacity.
      Map<Resource, Double> brokerCapacity =
          _brokerCapacityConfigResolver.capacityForBroker(rack, replica.host(), replica.id());
      clusterModel.createReplicaHandleDeadBroker(rack, replica.id(), tp, isLeader, brokerCapacity);
      // Push the load snapshot to the replica one by one.
      for (int i = 0; i < leaderLoadSnapshots.length; i++) {
        clusterModel.pushLatestSnapshot(rack, replica.id(), tp,
                                        isLeader ? leaderLoadSnapshots[i].duplicate() : MonitorUtils.toFollowerSnapshot(leaderLoadSnapshots[i]));
      }
    }
  }
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:24,代码来源:LoadMonitor.java

示例2: isValidSample

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
/**
 * This is a simple sanity check on the sample data. We only verify that
 * <p>
 * 1. the broker of the sampled data is from the broker who holds the leader replica. If it is not, we simply
 * discard the data because leader migration may have occurred so the metrics on the old data might not be
 * accurate anymore.
 * <p>
 * 2. The sample contains metric for all the resources.
 *
 * @param sample the sample to do the sanity check.
 * @param leaderValidation whether skip the leader validation or not.
 * @return <tt>true</tt> if the sample is valid.
 */
private boolean isValidSample(PartitionMetricSample sample, boolean leaderValidation) {
  boolean validLeader = true;
  if (leaderValidation) {
    Node leader = _metadata.fetch().leaderFor(sample.topicPartition());
    validLeader = (leader != null) && (sample.brokerId() == leader.id());
    if (!validLeader) {
      LOG.warn("The metric sample is discarded due to invalid leader. Current leader {}, Sample: {}", leader, sample);
    }
  }
  boolean completeMetrics = sample.numMetrics() == Resource.values().length;
  if (!completeMetrics) {
    LOG.warn("The metric sample is discarded due to missing metrics. Sample: {}", sample);
  }
  return validLeader && completeMetrics;
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:29,代码来源:MetricSampleAggregator.java

示例3: isReplicaMovementDone

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private boolean isReplicaMovementDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  boolean destinationExists = false;
  boolean sourceExists = false;
  for (Node node : cluster.partition(tp).replicas()) {
    destinationExists = destinationExists || (node.id() == task.destinationBrokerId());
    sourceExists = sourceExists || (node.id() == task.sourceBrokerId());
  }
  switch (task.state()) {
    case IN_PROGRESS:
      return destinationExists && !sourceExists;
    case ABORTING:
      return !destinationExists && sourceExists;
    case DEAD:
      return !destinationExists && !sourceExists;
    default:
      throw new IllegalStateException("Should never be here. State " + task.state());
  }
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:19,代码来源:Executor.java

示例4: populateBrokerMetaData

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private void populateBrokerMetaData() {
	for (TopicMetadata topicMetadata : topicMetadataMap.values()) {
		for (PartitionMetadata partitionMetadata : topicMetadata.partitionMetadata()) {
			int leader = partitionMetadata.leader().id();
			BrokerMetaData leaderBroker = brokerMetaDataMap.get(leader);
			if (leaderBroker != null) {
				leaderBroker.incrementNumLeaders();
			}
			for (Node replica : partitionMetadata.replicas()) {
				int follower = replica.id();
				if (leader != follower) {
					BrokerMetaData followerBroker = brokerMetaDataMap.get(follower);
					if (followerBroker != null) {
						followerBroker.incrementNumFollowers();
					}
				}
			}
		}
	}
	log.debug("Broker meta cache" + BROKERS_TOPICS_CACHE_INSTANCE.getBrokerMetaDataMap());
}
 
开发者ID:flipkart-incubator,项目名称:kafka-balancer,代码行数:22,代码来源:BrokersTopicsCache.java

示例5: getTargetBroker

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private int getTargetBroker(List<Node> replicas, int leader) {
	int index = 0;
	int mostLoaded = Integer.MIN_VALUE;
	int size = replicas.size();
	for (int i = size - 1; i >= 0; i--) {
		Node node = replicas.get(i);
		int id = node.id();
		if (id == leader) {
			continue;
		}
		Integer numFollowers = brokersInfo.get(id).getNumFollowers();
		if (mostLoaded < numFollowers) {
			mostLoaded = numFollowers;
			index = i;
		}
	}
	return replicas.remove(index).id();
}
 
开发者ID:flipkart-incubator,项目名称:kafka-balancer,代码行数:19,代码来源:ReplicationSetterBalancer.java

示例6: fillInFollowerBytesInRate

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private void fillInFollowerBytesInRate(Cluster cluster, Map<Integer, Map<String, Integer>> leaderDistributionStats) {
  synchronized (this) {
    if (!_brokerFollowerLoad.isEmpty()) {
      return;
    }
    for (Node node : cluster.nodes()) {
      _brokerFollowerLoad.putIfAbsent(node.id(), 0.0);
      BrokerLoad brokerLoad = _brokerLoad.get(node.id());
      if (brokerLoad == null) {
        // new broker?
        continue;
      }
      for (PartitionInfo partitionInfo : cluster.partitionsForNode(node.id())) {
        IOLoad topicIOLoad = brokerLoad.ioLoad(partitionInfo.topic(), partitionInfo.partition());
        if (topicIOLoad == null) {
          // The topic did not report any IO metric and the partition does not exist on the broker.
          LOG.debug("No IO load reported from broker {} for partition {}-{}",
                    node.id(), partitionInfo.topic(), partitionInfo.partition());
          continue;
        }
        int numLeadersOnBroker = leaderDistributionStats.get(node.id()).get(partitionInfo.topic());
        double partitionBytesIn = topicIOLoad.bytesIn() / numLeadersOnBroker;
        for (Node replica : partitionInfo.replicas()) {
          if (replica.id() != node.id()) {
            _brokerFollowerLoad.merge(replica.id(), partitionBytesIn, (v0, v1) -> v0 + v1);
          }
        }
      }
    }
  }
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:32,代码来源:CruiseControlMetricsProcessor.java

示例7: isReplicaDeletionDone

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private boolean isReplicaDeletionDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  boolean sourceExists = false;
  for (Node node : cluster.partition(tp).replicas()) {
    sourceExists = sourceExists || (node.id() == task.sourceBrokerId());
  }
  return !sourceExists;
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:8,代码来源:Executor.java

示例8: isReplicaAdditionDone

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private boolean isReplicaAdditionDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  boolean destinationExists = false;
  for (Node node : cluster.partition(tp).replicas()) {
    destinationExists = destinationExists || (node.id() == task.destinationBrokerId());
  }
  switch (task.state()) {
    case IN_PROGRESS:
      return destinationExists;
    case ABORTING:
    case DEAD:
      return true;
    default:
      throw new IllegalStateException("Should never be here.");
  }
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:16,代码来源:Executor.java

示例9: isLeadershipMovementDone

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private boolean isLeadershipMovementDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  Node leader = cluster.leaderFor(tp);
  switch (task.state()) {
    case IN_PROGRESS:
      return leader != null && leader.id() == task.destinationBrokerId();
    case ABORTING:
    case DEAD:
      return true;
    default:
      throw new IllegalStateException("Should never be here.");
  }

}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:14,代码来源:Executor.java

示例10: populateBrokerData

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
/**
 * Populates broker data for a given topic.
 *
 * @param topicMetadata
 *            Topic metadata
 * @param brokersInfo
 *            <broker_id, brokerInfo> map
 * @param brokerMetaDataMap
 *            Broker Id - > BrokerMetaData Map
 */
public void populateBrokerData(TopicMetadata topicMetadata, Map<Integer, Broker> brokersInfo,
		Map<Integer, BrokerMetaData> brokerMetaDataMap) {
	// iterate through the topic metainfo
	log.debug("Topic: " + topicMetadata.topic());

	for (PartitionMetadata part : topicMetadata.partitionMetadata()) {
		String replicas = "";
		List<Integer> partitionReplicas = new ArrayList<>();

		String leaderHost = part.leader().host();
		int leaderId = part.leader().id();
		int partitionId = part.partition();

		addToBrokerList(partitionId, leaderId, brokersInfo, brokerMetaDataMap, true);
		partitionReplicas.add(leaderId);

		for (Node replica : part.replicas()) {
			int followerId = replica.id();
			replicas += " " + followerId;

			// dont add leader as replica also
			if (leaderId != followerId) {
				addToBrokerList(partitionId, followerId, brokersInfo, brokerMetaDataMap, false);
				partitionReplicas.add(followerId);
			}
		}

		log.debug("    Partition: " + partitionId + ": Leader: " + leaderHost + " Replicas:[" + replicas + "] ");
	}
}
 
开发者ID:flipkart-incubator,项目名称:kafka-balancer,代码行数:41,代码来源:KafkaBalancer.java

示例11: getControllerNode

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private Node getControllerNode(int controllerId, Collection<Node> brokers) {
    for (Node broker : brokers) {
        if (broker.id() == controllerId)
            return broker;
    }
    return null;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:8,代码来源:MetadataResponse.java

示例12: leaderChanged

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
private static boolean leaderChanged(PartitionInfo prevPartInfo, PartitionInfo currPartInfo) {
  Node prevLeader = prevPartInfo.leader();
  Node currLeader = currPartInfo.leader();
  return !(prevLeader == null && currLeader == null) && !(prevLeader != null && currLeader != null
      && prevLeader.id() == currLeader.id());
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:7,代码来源:MonitorUtils.java

示例13: process

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
MetricSampler.Samples process(Cluster cluster,
                              Collection<TopicPartition> partitions,
                              MetricSampler.SamplingMode samplingMode) {
  Map<Integer, Map<String, Integer>> leaderDistributionStats = leaderDistributionStats(cluster);
  fillInFollowerBytesInRate(cluster, leaderDistributionStats);
  //TODO: maybe need to skip the entire processing logic if broker load is not consistent.
  // Theoretically we should not move forward at all if a broker reported a different all topic bytes in from all
  // its resident replicas. However, it is not clear how often this would happen yet. At this point we still
  // continue process the other brokers. Later on if in practice all topic bytes in and the aggregation value is
  // rarely inconsistent we can just stop the sample generation when the this happens.
  _brokerLoad.values().forEach(BrokerLoad::validate);

  Set<PartitionMetricSample> partitionMetricSamples = new HashSet<>();
  Set<BrokerMetricSample> brokerMetricSamples = new HashSet<>();

  int skippedPartition = 0;
  if (samplingMode == MetricSampler.SamplingMode.ALL
      || samplingMode == MetricSampler.SamplingMode.PARTITION_METRICS_ONLY) {
    for (TopicPartition tp : partitions) {
      try {
        PartitionMetricSample sample = buildPartitionMetricSample(cluster, tp, leaderDistributionStats);
        if (sample != null) {
          LOG.debug("Added partition metrics sample for {}", tp);
          partitionMetricSamples.add(sample);
        } else {
          skippedPartition++;
        }
      } catch (Exception e) {
        LOG.error("Error building partition metric sample for " + tp, e);
        skippedPartition++;
      }
    }
  }

  int skippedBroker = 0;
  if (samplingMode == MetricSampler.SamplingMode.ALL
      || samplingMode == MetricSampler.SamplingMode.BROKER_METRICS_ONLY) {
    for (Node node : cluster.nodes()) {
      BrokerLoad brokerLoad = _brokerLoad.get(node.id());
      if (brokerLoad == null || !brokerLoad.isValid()) {
        // A new broker or broker metrics are not consistent.
        LOG.debug("Skip generating broker metric sample for broker {} because it does not have IO load metrics or "
                      + "the metrics are inconsistent.", node.id());
        skippedBroker++;
        continue;
      }
      double leaderCpuUtil = brokerLoad.cpuUtil();
      if (leaderCpuUtil > 0) {
        BrokerMetricSample brokerMetricSample =
            new BrokerMetricSample(node.id(),
                                   leaderCpuUtil,
                                   brokerLoad.bytesIn() / BYTES_IN_KB,
                                   brokerLoad.bytesOut() / BYTES_IN_KB,
                                   // The replication bytes in is only available from Kafka 0.11.0 and above.
                                   (brokerLoad.replicationBytesIn() > 0 ?
                                       brokerLoad.replicationBytesIn() : _brokerFollowerLoad.get(node.id())) / BYTES_IN_KB,
                                   brokerLoad.replicationBytesOut() / BYTES_IN_KB,
                                   brokerLoad.messagesInRate(),
                                   brokerLoad.produceRequestRate(),
                                   brokerLoad.consumerFetchRequestRate(),
                                   brokerLoad.followerFetchRequestRate(),
                                   brokerLoad.requestHandlerAvgIdlePercent(),
                                   -1.0,
                                   brokerLoad.allTopicsProduceRequestRate(),
                                   brokerLoad.allTopicsFetchRequestRate(),
                                   _maxMetricTimestamp);
        LOG.debug("Added broker metric sample for broker {}", node.id());
        brokerMetricSamples.add(brokerMetricSample);
      } else {
        skippedBroker++;
      }
    }
  }
  LOG.info("Generated {}{} partition metric samples and {}{} broker metric samples for timestamp {}",
           partitionMetricSamples.size(), skippedPartition > 0 ? "(" + skippedPartition + " skipped)" : "",
           brokerMetricSamples.size(), skippedBroker > 0 ? "(" + skippedBroker + " skipped)" : "",
           _maxMetricTimestamp);
  return new MetricSampler.Samples(partitionMetricSamples, brokerMetricSamples);
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:80,代码来源:CruiseControlMetricsProcessor.java

示例14: testCommitsFetchedDuringAssign

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Test
public void testCommitsFetchedDuringAssign() {
    long offset1 = 10000;
    long offset2 = 20000;

    int rebalanceTimeoutMs = 6000;
    int sessionTimeoutMs = 3000;
    int heartbeatIntervalMs = 2000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.assign(singletonList(tp0));

    // lookup coordinator
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());

    // fetch offset for one topic
    client.prepareResponseFrom(
            offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE),
            coordinator);

    assertEquals(offset1, consumer.committed(tp0).offset());

    consumer.assign(Arrays.asList(tp0, tp1));

    // fetch offset for two topics
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(tp0, offset1);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(offset1, consumer.committed(tp0).offset());

    offsets.remove(tp0);
    offsets.put(tp1, offset2);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(offset2, consumer.committed(tp1).offset());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:51,代码来源:KafkaConsumerTest.java

示例15: testManualAssignmentChangeWithAutoCommitEnabled

import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    Cluster cluster = TestUtils.singletonCluster(tpCounts);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    // lookup coordinator
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());

    // manual assignment
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));

    // fetch offset for one topic
    client.prepareResponseFrom(
            offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE),
            coordinator);
    assertEquals(0, consumer.committed(tp0).offset());

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));

    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 10L, 1));

    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(1, records.count());
    assertEquals(11L, consumer.position(tp0));

    // mock the offset commit response for to be revoked partitions
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);

    // new manual assignment
    consumer.assign(Arrays.asList(t2p0));

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
    // verify that the offset commits occurred as expected
    assertTrue(commitReceived.get());

    client.requests().clear();
    consumer.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:65,代码来源:KafkaConsumerTest.java


注:本文中的org.apache.kafka.common.Node.id方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。