本文整理汇总了Java中org.apache.kafka.common.Node.port方法的典型用法代码示例。如果您正苦于以下问题:Java Node.port方法的具体用法?Java Node.port怎么用?Java Node.port使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.common.Node
的用法示例。
在下文中一共展示了Node.port方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: compare
import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Override
public int compare(Node a, Node b) {
int result = a.host().compareTo(b.host());
if (result == 0) {
result = a.port() - b.port();
}
return result;
}
示例2: testCommitsFetchedDuringAssign
import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Test
public void testCommitsFetchedDuringAssign() {
long offset1 = 10000;
long offset2 = 20000;
int rebalanceTimeoutMs = 6000;
int sessionTimeoutMs = 3000;
int heartbeatIntervalMs = 2000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.assign(singletonList(tp0));
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// fetch offset for one topic
client.prepareResponseFrom(
offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE),
coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
consumer.assign(Arrays.asList(tp0, tp1));
// fetch offset for two topics
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, offset1);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
offsets.remove(tp0);
offsets.put(tp1, offset2);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset2, consumer.committed(tp1).offset());
consumer.close(0, TimeUnit.MILLISECONDS);
}
示例3: testManualAssignmentChangeWithAutoCommitEnabled
import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// fetch offset for one topic
client.prepareResponseFrom(
offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE),
coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// mock the offset commit response for to be revoked partitions
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);
// new manual assignment
consumer.assign(Arrays.asList(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
client.requests().clear();
consumer.close();
}
示例4: testManualAssignmentChangeWithAutoCommitDisabled
import org.apache.kafka.common.Node; //导入方法依赖的package包/类
@Test
public void testManualAssignmentChangeWithAutoCommitDisabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, autoCommitIntervalMs);
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// fetch offset for one topic
client.prepareResponseFrom(
offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE),
coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// new manual assignment
consumer.assign(Arrays.asList(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests())
assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}