当前位置: 首页>>代码示例>>Java>>正文


Java TestUtils.singletonCluster方法代码示例

本文整理汇总了Java中org.apache.kafka.test.TestUtils.singletonCluster方法的典型用法代码示例。如果您正苦于以下问题:Java TestUtils.singletonCluster方法的具体用法?Java TestUtils.singletonCluster怎么用?Java TestUtils.singletonCluster使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.test.TestUtils的用法示例。


在下文中一共展示了TestUtils.singletonCluster方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupCoordinator

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Before
public void setupCoordinator() {
    this.mockTime = new MockTime();
    this.mockClient = new MockClient(mockTime);

    Metadata metadata = new Metadata(100L, 60 * 60 * 1000L, true);
    this.consumerClient = new ConsumerNetworkClient(mockClient, metadata, mockTime,
            RETRY_BACKOFF_MS, REQUEST_TIMEOUT_MS);
    Metrics metrics = new Metrics();

    Cluster cluster = TestUtils.singletonCluster("topic", 1);
    metadata.update(cluster, Collections.<String>emptySet(), mockTime.milliseconds());
    this.node = cluster.nodes().get(0);
    mockClient.setNode(node);

    this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:AbstractCoordinatorTest.java

示例2: testQuotaMetrics

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Sender.throttleTimeSensor(metrics);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE,
            1000, 1000, 64 * 1024, 64 * 1024, 1000,
            time, true, new ApiVersions(), throttleTimeSensor);

    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds()))
        client.poll(1, time.milliseconds());
    selector.clear();

    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        ProduceRequest.Builder builder = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) 1, 1000,
                        Collections.<TopicPartition, MemoryRecords>emptyMap());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
        buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(metrics.metricName("produce-throttle-time-avg", METRIC_GROUP, ""));
    KafkaMetric maxMetric = allMetrics.get(metrics.metricName("produce-throttle-time-max", METRIC_GROUP, ""));
    // Throttle times are ApiVersions=400, Produce=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPS);
    assertEquals(400, maxMetric.value(), EPS);
    client.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:39,代码来源:SenderTest.java

示例3: testSendInOrder

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testSendInOrder() throws Exception {
    int maxRetries = 1;
    Metrics m = new Metrics();
    try {
        Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries,
                m, time, REQUEST_TIMEOUT, 50, null, apiVersions);
        // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
        Cluster cluster1 = TestUtils.clusterWith(2, "test", 2);
        metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds());

        // Send the first message.
        TopicPartition tp2 = new TopicPartition("test", 1);
        accumulator.append(tp2, 0L, "key1".getBytes(), "value1".getBytes(), null, null, MAX_BLOCK_TIMEOUT);
        sender.run(time.milliseconds()); // connect
        sender.run(time.milliseconds()); // send produce request
        String id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        Node node = new Node(Integer.parseInt(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));

        time.sleep(900);
        // Now send another message to tp2
        accumulator.append(tp2, 0L, "key2".getBytes(), "value2".getBytes(), null, null, MAX_BLOCK_TIMEOUT);

        // Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0
        Cluster cluster2 = TestUtils.singletonCluster("test", 2);
        metadata.update(cluster2, Collections.<String>emptySet(), time.milliseconds());
        // Sender should not send the second message to node 0.
        sender.run(time.milliseconds());
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.hasInFlightRequests());
    } finally {
        m.close();
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:39,代码来源:SenderTest.java

示例4: testQuotaMetrics

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE,
            1000, 1000, 64 * 1024, 64 * 1024, 1000,
            time, true, new ApiVersions(), throttleTimeSensor);

    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds()))
        client.poll(1, time.milliseconds());
    selector.clear();

    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        FetchRequest.Builder builder = FetchRequest.Builder.forConsumer(100, 100, new LinkedHashMap<TopicPartition, PartitionData>());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        FetchResponse response = fetchResponse(tp1, nextRecords, Errors.NONE, i, throttleTimeMs);
        buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeAvg));
    KafkaMetric maxMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeMax));
    // Throttle times are ApiVersions=400, Fetch=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPSILON);
    assertEquals(400, maxMetric.value(), EPSILON);
    client.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:38,代码来源:FetcherTest.java

示例5: verifyNoCoordinatorLookupForManualAssignmentWithSeek

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 3000;
    int heartbeatIntervalMs = 2000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));

    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 50L, 5));

    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(5, records.count());
    assertEquals(55L, consumer.position(tp0));
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:KafkaConsumerTest.java

示例6: testPollThrowsInterruptExceptionIfInterrupted

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testPollThrowsInterruptExceptionIfInterrupted() throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;

    final Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    final Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    final MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    final PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 0);

    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);

    consumer.poll(0);

    // interrupt the thread and call poll
    try {
        Thread.currentThread().interrupt();
        expectedException.expect(InterruptException.class);
        consumer.poll(0);
    } finally {
        // clear interrupted state again since this thread may be reused by JUnit
        Thread.interrupted();
    }
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:37,代码来源:KafkaConsumerTest.java

示例7: fetchResponseWithUnexpectedPartitionIsIgnored

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void fetchResponseWithUnexpectedPartitionIsIgnored() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;

    // adjust auto commit interval lower than heartbeat so we don't need to deal with
    // a concurrent heartbeat request
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(singletonMap(topic, 1));
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer));

    prepareRebalance(client, node, assignor, singletonList(tp0), null);

    Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
    fetches1.put(tp0, new FetchInfo(0, 1));
    fetches1.put(t2p0, new FetchInfo(0, 10)); // not assigned and not fetched
    client.prepareResponseFrom(fetchResponse(fetches1), node);

    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(0, records.count());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:38,代码来源:KafkaConsumerTest.java

示例8: verifyHeartbeatSent

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void verifyHeartbeatSent() throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 1000;
    int autoCommitIntervalMs = 10000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);

    // initial fetch
    client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node);

    consumer.poll(0);
    assertEquals(Collections.singleton(tp0), consumer.assignment());

    AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator);

    // heartbeat interval is 2 seconds
    time.sleep(heartbeatIntervalMs);
    Thread.sleep(heartbeatIntervalMs);

    consumer.poll(0);

    assertTrue(heartbeatReceived.get());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:42,代码来源:KafkaConsumerTest.java

示例9: verifyHeartbeatSentWhenFetchedDataReady

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void verifyHeartbeatSentWhenFetchedDataReady() throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 1000;
    int autoCommitIntervalMs = 10000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);

    consumer.poll(0);

    // respond to the outstanding fetch so that we have data available on the next poll
    client.respondFrom(fetchResponse(tp0, 0, 5), node);
    client.poll(0, time.milliseconds());

    client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);
    AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator);

    time.sleep(heartbeatIntervalMs);
    Thread.sleep(heartbeatIntervalMs);

    consumer.poll(0);

    assertTrue(heartbeatReceived.get());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:42,代码来源:KafkaConsumerTest.java

示例10: testCommitsFetchedDuringAssign

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testCommitsFetchedDuringAssign() {
    long offset1 = 10000;
    long offset2 = 20000;

    int rebalanceTimeoutMs = 6000;
    int sessionTimeoutMs = 3000;
    int heartbeatIntervalMs = 2000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.assign(singletonList(tp0));

    // lookup coordinator
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());

    // fetch offset for one topic
    client.prepareResponseFrom(
            offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE),
            coordinator);

    assertEquals(offset1, consumer.committed(tp0).offset());

    consumer.assign(Arrays.asList(tp0, tp1));

    // fetch offset for two topics
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(tp0, offset1);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(offset1, consumer.committed(tp0).offset());

    offsets.remove(tp0);
    offsets.put(tp1, offset2);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(offset2, consumer.committed(tp1).offset());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:51,代码来源:KafkaConsumerTest.java

示例11: testAutoCommitSentBeforePositionUpdate

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testAutoCommitSentBeforePositionUpdate() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;

    // adjust auto commit interval lower than heartbeat so we don't need to deal with
    // a concurrent heartbeat request
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);

    consumer.poll(0);

    // respond to the outstanding fetch so that we have data available on the next poll
    client.respondFrom(fetchResponse(tp0, 0, 5), node);
    client.poll(0, time.milliseconds());

    time.sleep(autoCommitIntervalMs);

    client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);

    // no data has been returned to the user yet, so the committed offset should be 0
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 0);

    consumer.poll(0);

    assertTrue(commitReceived.get());
    consumer.close(0, TimeUnit.MILLISECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:46,代码来源:KafkaConsumerTest.java

示例12: testWakeupWithFetchDataAvailable

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testWakeupWithFetchDataAvailable() throws Exception {
    int rebalanceTimeoutMs = 60000;
    final int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;

    // adjust auto commit interval lower than heartbeat so we don't need to deal with
    // a concurrent heartbeat request
    int autoCommitIntervalMs = 1000;

    final Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);

    consumer.poll(0);

    // respond to the outstanding fetch so that we have data available on the next poll
    client.respondFrom(fetchResponse(tp0, 0, 5), node);
    client.poll(0, time.milliseconds());

    consumer.wakeup();

    try {
        consumer.poll(0);
        fail();
    } catch (WakeupException e) {
    }

    // make sure the position hasn't been updated
    assertEquals(0, consumer.position(tp0));

    // the next poll should return the completed fetch
    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(5, records.count());
    // Increment time asynchronously to clear timeouts in closing the consumer
    final ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor();
    exec.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            time.sleep(sessionTimeoutMs);
        }
    }, 0L, 10L, TimeUnit.MILLISECONDS);
    consumer.close();
    exec.shutdownNow();
    exec.awaitTermination(5L, TimeUnit.SECONDS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:60,代码来源:KafkaConsumerTest.java

示例13: testManualAssignmentChangeWithAutoCommitEnabled

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    Cluster cluster = TestUtils.singletonCluster(tpCounts);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);

    // lookup coordinator
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());

    // manual assignment
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));

    // fetch offset for one topic
    client.prepareResponseFrom(
            offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE),
            coordinator);
    assertEquals(0, consumer.committed(tp0).offset());

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));

    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 10L, 1));

    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(1, records.count());
    assertEquals(11L, consumer.position(tp0));

    // mock the offset commit response for to be revoked partitions
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);

    // new manual assignment
    consumer.assign(Arrays.asList(t2p0));

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
    // verify that the offset commits occurred as expected
    assertTrue(commitReceived.get());

    client.requests().clear();
    consumer.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:65,代码来源:KafkaConsumerTest.java

示例14: testManualAssignmentChangeWithAutoCommitDisabled

import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testManualAssignmentChangeWithAutoCommitDisabled() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;

    Time time = new MockTime();
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    Cluster cluster = TestUtils.singletonCluster(tpCounts);
    Node node = cluster.nodes().get(0);

    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());

    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();

    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor,
            rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, autoCommitIntervalMs);

    // lookup coordinator
    client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());

    // manual assignment
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));

    // fetch offset for one topic
    client.prepareResponseFrom(
            offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE),
            coordinator);
    assertEquals(0, consumer.committed(tp0).offset());

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));

    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 10L, 1));

    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(1, records.count());
    assertEquals(11L, consumer.position(tp0));

    // new manual assignment
    consumer.assign(Arrays.asList(t2p0));

    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));

    // the auto commit is disabled, so no offset commit request should be sent
    for (ClientRequest req : client.requests())
        assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);

    client.requests().clear();
    consumer.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:64,代码来源:KafkaConsumerTest.java


注:本文中的org.apache.kafka.test.TestUtils.singletonCluster方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。